Revert "Revert "Merge remote-tracking branch 'goog/upstream-main..."

Revert submission 24759249-revert-24736872-pigup-2309-GUZVFSRFJO

Reason for revert: fixing the breakages

Reverted changes: /q/submissionid:24759249-revert-24736872-pigup-2309-GUZVFSRFJO

Change-Id: Ib7465930f5c91f15416b968a5809d758211de5dd
diff --git a/.bazelrc b/.bazelrc
index 9fd023b..017ecc6 100644
--- a/.bazelrc
+++ b/.bazelrc
@@ -42,11 +42,16 @@
 # TODO(b/269204725): Move the following flags to the toolchain configuration.
 # By default build with C++17.
 build --cxxopt='-std=c++17'
+build --linkopt='-std=c++17'
 build --cxxopt="-fno-rtti"
 build --cxxopt="-Wnon-virtual-dtor"
 # Allow uses of the register keyword, which may appear in C headers.
 build --cxxopt="-Wno-register"
 
+# This feature can't be enabled until __unordtf2 and __letf2 are implemented by
+# compiler-rt. See https://reviews.llvm.org/D53608.
+# build --features=fully_static_link
+
 build --@mbedtls//:mbedtls_config=//third_party/mbedtls:default_config
 
 # Define the --config=asan-libfuzzer configuration.
diff --git a/.gn b/.gn
index 2b2ab66..837bdd1 100644
--- a/.gn
+++ b/.gn
@@ -34,10 +34,19 @@
   pw_build_PYTHON_PIP_INSTALL_FIND_LINKS =
       [ "${pw_env_setup_CIPD_MSRV_PYTHON}/pip_cache" ]
 
-  # Exclude third-party headers from static analysis.
-  pw_toolchain_STATIC_ANALYSIS_SKIP_SOURCES_RES = [ "third_party/.*" ]
+  pw_toolchain_STATIC_ANALYSIS_SKIP_SOURCES_RES = [
+    # Exclude third party CHRE headers that were downloaded from static analysis.
+    "environment/packages/chre/.*",
 
-  # Code generated by third-party tool.
-  pw_toolchain_STATIC_ANALYSIS_SKIP_INCLUDE_PATHS =
-      [ "pw_tls_client/generate_test_data" ]
+    # Exclude third-party headers from static analysis.
+    "third_party/.*",
+  ]
+
+  pw_toolchain_STATIC_ANALYSIS_SKIP_INCLUDE_PATHS = [
+    # Exclude third party CHRE code from static analysis.
+    "../environment/packages/chre/.*",
+
+    # Code generated by third-party tool.
+    "pw_tls_client/generate_test_data",
+  ]
 }
diff --git a/BUILD.gn b/BUILD.gn
index 90ab303..4ee4dfc 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -24,6 +24,7 @@
 import("$dir_pw_perf_test/perf_test.gni")
 import("$dir_pw_rpc/config.gni")
 import("$dir_pw_rust/rust.gni")
+import("$dir_pw_third_party/ambiq/ambiq.gni")
 import("$dir_pw_third_party/mcuxpresso/mcuxpresso.gni")
 import("$dir_pw_toolchain/c_optimization.gni")
 import("$dir_pw_toolchain/generate_toolchain.gni")
@@ -208,10 +209,6 @@
   toolchain_prefix = "$_internal_toolchains:pw_strict_host_clang_"
 }
 
-group("host_clang_fuzz") {
-  deps = [ ":pigweed_default($_internal_toolchains:pw_strict_host_clang_fuzz)" ]
-}
-
 # GCC is only supported for Windows. Pigweed doesn't yet provide a Windows
 # clang toolchain, and Pigweed does not provide gcc toolchains for macOS and
 # Linux.
@@ -232,6 +229,12 @@
   }
 }
 
+if (dir_pw_third_party_ambiq_SDK != "") {
+  _build_pigweed_default_at_all_optimization_levels("apollo4") {
+    toolchain_prefix = "$dir_pigweed/targets/apollo4:apollo4_"
+  }
+}
+
 _build_pigweed_default_at_all_optimization_levels("stm32f429i") {
   toolchain_prefix = "$dir_pigweed/targets/stm32f429i_disc1:stm32f429i_disc1_"
 }
@@ -335,26 +338,29 @@
   }
 }
 
-# Build-only target for fuzzers.
 group("fuzzers") {
   deps = []
 
-  # TODO(b/274437709): The client_fuzzer encounters build errors on macos. Limit
-  # it to Linux hosts for now.
-  if (host_os == "linux") {
-    _default_tc = _default_toolchain_prefix + pw_DEFAULT_C_OPTIMIZATION_LEVEL
-    deps += [ "$dir_pw_rpc/fuzz:client_fuzzer($_default_tc)" ]
-  }
-
   if (host_os != "win") {
     # Coverage-guided fuzzing is only supported on Linux and MacOS using clang.
-    deps += [
-      "$dir_pw_bluetooth_hci:fuzzers($dir_pigweed/targets/host:host_clang_fuzz)",
-      "$dir_pw_fuzzer:fuzzers($dir_pigweed/targets/host:host_clang_fuzz)",
-      "$dir_pw_protobuf:fuzzers($dir_pigweed/targets/host:host_clang_fuzz)",
-      "$dir_pw_random:fuzzers($dir_pigweed/targets/host:host_clang_fuzz)",
-      "$dir_pw_tokenizer:fuzzers($dir_pigweed/targets/host:host_clang_fuzz)",
-    ]
+    # Fuzztest-based fuzzers will run in unit test mode. libFuzzer-based fuzzers
+    # will only build.
+    _clang_fuzz_tc = _default_toolchain_prefix + "fuzz"
+    deps += [ ":pw_module_tests.run($_clang_fuzz_tc)" ]
+  }
+
+  # Also build (but do not run) bespoke fuzzers.
+  if (!pw_toolchain_OSS_FUZZ_ENABLED) {
+    _default_tc = _default_toolchain_prefix + pw_DEFAULT_C_OPTIMIZATION_LEVEL
+    deps += [ ":pw_custom_fuzzers($_default_tc)" ]
+  }
+}
+
+# Build-only target for OSS-Fuzz. No-op unless OSS-Fuzz is enabled.
+group("oss_fuzz") {
+  if (pw_toolchain_OSS_FUZZ_ENABLED) {
+    _clang_fuzz_tc = _default_toolchain_prefix + "fuzz"
+    deps = [ ":pw_module_tests($_clang_fuzz_tc)" ]
   }
 }
 
@@ -364,13 +370,11 @@
   }
 }
 
-# TODO(b/234876100): msan will not work until the C++ standard library included
-# in the sysroot has a variant built with msan.
+# TODO(b/234876100): msan will not work without false positives until the C++
+# standard library included in the sysroot has a variant built with msan.
 group("msan") {
   # TODO(b/259695498): msan doesn't work on macOS yet.
-  if (host_os != "win" && host_os != "mac" && host_os != "linux") {
-    deps = [ ":pw_module_tests.run($dir_pigweed/targets/host:host_clang_msan)" ]
-  }
+  deps = [ ":pw_module_tests.run($dir_pigweed/targets/host:host_clang_msan)" ]
 }
 
 group("tsan") {
@@ -580,6 +584,17 @@
     output_metadata = true
   }
 
+  # Fuzzers not based on a fuzzing engine. Engine-based fuzzers should be
+  # included in `pw_module_tests`.
+  pw_test_group("pw_custom_fuzzers") {
+    # TODO(b/274437709): The RPC client_fuzzer encounters build errors on macos.
+    # Limit it to Linux hosts for now.
+    if (host_os == "linux") {
+      tests = [ "$dir_pw_rpc/fuzz:client_fuzzer" ]
+    }
+    output_metadata = true
+  }
+
   # Modules that support C++14.
   # TODO(hepler): pw_kvs is supposed to compile as C++14, but does not.
   group("cpp14_modules") {
diff --git a/PIGWEED_MODULES b/PIGWEED_MODULES
index 7f0e2ed..dd20816 100644
--- a/PIGWEED_MODULES
+++ b/PIGWEED_MODULES
@@ -24,6 +24,7 @@
 pw_build_mcuxpresso
 pw_bytes
 pw_checksum
+pw_chre
 pw_chrono
 pw_chrono_embos
 pw_chrono_freertos
@@ -108,6 +109,7 @@
 pw_sync_threadx
 pw_sync_zephyr
 pw_sys_io
+pw_sys_io_ambiq_sdk
 pw_sys_io_arduino
 pw_sys_io_baremetal_lm3s6965evb
 pw_sys_io_baremetal_stm32f429
diff --git a/WORKSPACE b/WORKSPACE
index 5c5ec42..e872c7f 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -16,8 +16,8 @@
     name = "pigweed",
 )
 
-load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
 load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository", "new_git_repository")
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
 load(
     "//pw_env_setup/bazel/cipd_setup:cipd_rules.bzl",
     "cipd_client_repository",
@@ -58,12 +58,54 @@
 
 cipd_client_repository()
 
+# Set up legacy pw_transfer test binaries.
+# Required by: pigweed.
+# Used in modules: //pw_transfer.
 cipd_repository(
     name = "pw_transfer_test_binaries",
     path = "pigweed/pw_transfer_test_binaries/${os=linux}-${arch=amd64}",
     tag = "version:pw_transfer_test_binaries_528098d588f307881af83f769207b8e6e1b57520-linux-amd64-cipd.cipd",
 )
 
+# Fetch llvm toolchain.
+# Required by: pigweed.
+# Used in modules: //pw_toolchain.
+cipd_repository(
+    name = "llvm_toolchain",
+    path = "fuchsia/third_party/clang/${os}-${arch}",
+    tag = "git_revision:ebd0b8a0472b865b7eb6e1a32af97ae31d829033",
+)
+
+# Fetch linux sysroot for host builds.
+# Required by: pigweed.
+# Used in modules: //pw_toolchain.
+cipd_repository(
+    name = "linux_sysroot",
+    path = "fuchsia/third_party/sysroot/linux",
+    tag = "git_revision:d342388843734b6c5c50fb7e18cd3a76476b93aa",
+)
+
+register_toolchains(
+    "//pw_toolchain/host_clang:host_cc_toolchain_linux",
+    "//pw_toolchain/host_clang:host_cc_toolchain_macos",
+)
+
+# Fetch gcc-arm-none-eabi toolchain.
+# Required by: pigweed.
+# Used in modules: //pw_toolchain.
+cipd_repository(
+    name = "gcc_arm_none_eabi_toolchain",
+    path = "fuchsia/third_party/armgcc/${os}-${arch}",
+    tag = "version:2@12.2.mpacbti-rel1.1",
+)
+
+register_toolchains(
+    "//pw_toolchain/arm_gcc:arm_gcc_cc_toolchain_cortex-m0",
+    "//pw_toolchain/arm_gcc:arm_gcc_cc_toolchain_cortex-m3",
+    "//pw_toolchain/arm_gcc:arm_gcc_cc_toolchain_cortex-m4",
+    "//pw_toolchain/arm_gcc:arm_gcc_cc_toolchain_cortex-m4+nofp",
+)
+
 # Set up Starlark library.
 # Required by: io_bazel_rules_go, com_google_protobuf, rules_python
 # Used in modules: None.
@@ -81,12 +123,6 @@
 
 bazel_skylib_workspace()
 
-cipd_repository(
-    name = "llvm_toolchain",
-    path = "fuchsia/third_party/clang/${os}-${arch}",
-    tag = "git_revision:ebd0b8a0472b865b7eb6e1a32af97ae31d829033",
-)
-
 # Set up Python support.
 # Required by: rules_fuzzing, com_github_nanopb_nanopb.
 # Used in modules: None.
@@ -136,26 +172,7 @@
     ],
 )
 
-# Set up host hermetic host toolchain.
-# Required by: All cc targets.
-# Used in modules: All cc targets.
-git_repository(
-    name = "rules_cc_toolchain",
-    commit = "9f209fda87414285bc66accd3612575b29760fba",
-    remote = "https://github.com/bazelembedded/rules_cc_toolchain",
-    shallow_since = "1675385535 -0800",
-)
-
-load("@rules_cc_toolchain//:rules_cc_toolchain_deps.bzl", "rules_cc_toolchain_deps")
-
-rules_cc_toolchain_deps()
-
-load("@rules_cc_toolchain//cc_toolchain:cc_toolchain.bzl", "register_cc_toolchains")
-
-register_cc_toolchains()
-
 # Sets up Bazels documentation generator.
-# Required by: rules_cc_toolchain.
 # Required by modules: All
 git_repository(
     name = "io_bazel_stardoc",
@@ -251,9 +268,6 @@
         # Fix rustdoc test w/ proc macros
         # https://github.com/bazelbuild/rules_rust/pull/1952
         "//pw_rust/bazel_patches:0001-rustdoc_test-Apply-prefix-stripping-to-proc_macro-de.patch",
-        # Allow `rust_repository_set` to specify `opt_level`
-        # https://github.com/bazelbuild/rules_rust/pull/2036
-        "//pw_rust/bazel_patches:0002-Add-opt_level-argument-to-rust_repository_set.patch",
         # Adds prototype functionality for documenting multiple crates in one
         # HTML output directory.  While the approach in this patch may have
         # issues scaling to giant mono-repos, it is apporpriate for embedded
@@ -261,101 +275,37 @@
         # the `rules_rust` community decides on a way to propperly support this,
         # we will migrate to that solution.
         # https://github.com/konkers/rules_rust/tree/wip/rustdoc
-        "//pw_rust/bazel_patches:0003-PROTOTYPE-Add-ability-to-document-multiple-crates-at.patch",
+        "//pw_rust/bazel_patches:0002-PROTOTYPE-Add-ability-to-document-multiple-crates-at.patch",
     ],
-    sha256 = "190b5aeba104210f8ed9b1ff595d1f459297fe32db70f0a04f5c537a13ee0602",
-    urls = ["https://github.com/bazelbuild/rules_rust/releases/download/0.24.1/rules_rust-v0.24.1.tar.gz"],
+    sha256 = "9d04e658878d23f4b00163a72da3db03ddb451273eb347df7d7c50838d698f49",
+    urls = ["https://github.com/bazelbuild/rules_rust/releases/download/0.26.0/rules_rust-v0.26.0.tar.gz"],
 )
 
-load("@rules_rust//rust:repositories.bzl", "rules_rust_dependencies", "rust_analyzer_toolchain_repository", "rust_repository_set")
+load("@rules_rust//rust:repositories.bzl", "rules_rust_dependencies")
 
 rules_rust_dependencies()
 
-RUST_EMBEDDED_TARGET_TRIPLES = {
-    "thumbv8m.main-none-eabihf": [
-        "@platforms//cpu:armv8-m",
-        "@bazel_embedded//constraints/fpu:fpv5-d16",
-    ],
-    "thumbv7m-none-eabi": [
-        "@platforms//cpu:armv7-m",
-        "@bazel_embedded//constraints/fpu:none",
-    ],
-    "thumbv6m-none-eabi": [
-        "@platforms//cpu:armv6-m",
-        "@bazel_embedded//constraints/fpu:none",
-    ],
-}
-
-RUST_OPT_LEVELS = {
-    "thumbv8m.main-none-eabihf": {
-        "dbg": "0",
-        "fastbuild": "0",
-        "opt": "z",
-    },
-    "thumbv7m-none-eabi": {
-        "dbg": "0",
-        "fastbuild": "0",
-        "opt": "z",
-    },
-    "thumbv6m-none-eabi": {
-        "dbg": "0",
-        "fastbuild": "0",
-        "opt": "z",
-    },
-}
-
-# Here we register a specific set of toolchains.
-#
-# Note: This statement creates name mangled remotes of the form:
-# `@{name}__{triplet}_tools`
-# (example: `@rust_linux_x86_64__thumbv7m-none-eabi_tools/`)
-rust_repository_set(
-    name = "rust_linux_x86_64",
-    edition = "2021",
-    exec_triple = "x86_64-unknown-linux-gnu",
-    extra_target_triples = RUST_EMBEDDED_TARGET_TRIPLES,
-    opt_level = RUST_OPT_LEVELS,
-    versions = ["1.67.0"],
+load(
+    "//pw_toolchain/rust:defs.bzl",
+    "pw_rust_register_toolchain_and_target_repos",
+    "pw_rust_register_toolchains",
 )
 
-rust_repository_set(
-    name = "rust_macos_x86_64",
-    edition = "2021",
-    exec_triple = "x86_64-apple-darwin",
-    extra_target_triples = RUST_EMBEDDED_TARGET_TRIPLES,
-    opt_level = RUST_OPT_LEVELS,
-    versions = ["1.67.0"],
+pw_rust_register_toolchain_and_target_repos(
+    cipd_tag = "rust_revision:faee636ebfff793ea9dcff17960a611b580e3cd5",
 )
 
 # Allows creation of a `rust-project.json` file to allow rust analyzer to work.
 load("@rules_rust//tools/rust_analyzer:deps.bzl", "rust_analyzer_dependencies")
 
-# Since we do not use rust_register_toolchains, we need to define a
-# rust_analyzer_toolchain.
-register_toolchains(rust_analyzer_toolchain_repository(
-    name = "linux_rust_analyzer_toolchain",
-    exec_compatible_with = ["@platforms//os:linux"],
-    # This should match the currently registered linux toolchain.
-    version = "1.67.0",
-))
-
-register_toolchains(rust_analyzer_toolchain_repository(
-    name = "macos_rust_analyzer_toolchain",
-    exec_compatible_with = ["@platforms//os:macos"],
-    # This should match the currently registered macos toolchain.
-    version = "1.67.0",
-))
-
-register_toolchains(
-    "//pw_toolchain/host_clang:host_cc_toolchain_macos",
-)
-
 rust_analyzer_dependencies()
 
+pw_rust_register_toolchains()
+
 # Vendored third party rust crates.
 git_repository(
     name = "rust_crates",
-    commit = "e4dcd91091f0537e6b5482677f2007b32a94703e",
+    commit = "6d975531f7672cc6aa54bdd7517e1beeffa578da",
     remote = "https://pigweed.googlesource.com/third_party/rust_crates",
     shallow_since = "1675359057 +0000",
 )
diff --git a/docs/BUILD.gn b/docs/BUILD.gn
index 117dd1c..030dd9c 100644
--- a/docs/BUILD.gn
+++ b/docs/BUILD.gn
@@ -79,6 +79,8 @@
 group("target_docs") {
   deps = [
     "$dir_pigweed/targets/android:target_docs",
+    "$dir_pigweed/targets/apollo4:target_docs",
+    "$dir_pigweed/targets/apollo4_pw_system:target_docs",
     "$dir_pigweed/targets/arduino:target_docs",
     "$dir_pigweed/targets/docs:target_docs",
     "$dir_pigweed/targets/emcraft_sf2_som:docs",
@@ -112,15 +114,16 @@
   ]
 }
 
-_doxygen_input_files = [
-  # All sources with doxygen comment blocks.
+# All sources with doxygen comment blocks.
+_doxygen_input_files = [  # keep-sorted: start
   "$dir_pw_allocator/public/pw_allocator/block.h",
   "$dir_pw_allocator/public/pw_allocator/freelist.h",
+  "$dir_pw_analog/public/pw_analog/analog_input.h",
   "$dir_pw_async/public/pw_async/context.h",
   "$dir_pw_async/public/pw_async/dispatcher.h",
-  "$dir_pw_async/public/pw_async/heap_dispatcher.h",
   "$dir_pw_async/public/pw_async/fake_dispatcher_fixture.h",
   "$dir_pw_async/public/pw_async/function_dispatcher.h",
+  "$dir_pw_async/public/pw_async/heap_dispatcher.h",
   "$dir_pw_async/public/pw_async/task.h",
   "$dir_pw_async/public/pw_async/task_function.h",
   "$dir_pw_async_basic/public/pw_async_basic/dispatcher.h",
@@ -131,31 +134,38 @@
   "$dir_pw_bluetooth/public/pw_bluetooth/low_energy/central.h",
   "$dir_pw_bluetooth/public/pw_bluetooth/low_energy/connection.h",
   "$dir_pw_bluetooth/public/pw_bluetooth/low_energy/peripheral.h",
+  "$dir_pw_chre/public/pw_chre/chre.h",
+  "$dir_pw_chre/public/pw_chre/host_link.h",
+  "$dir_pw_chrono/public/pw_chrono/system_clock.h",
+  "$dir_pw_chrono/public/pw_chrono/system_timer.h",
   "$dir_pw_containers/public/pw_containers/filtered_view.h",
   "$dir_pw_containers/public/pw_containers/inline_deque.h",
   "$dir_pw_containers/public/pw_containers/inline_queue.h",
-  "$dir_pw_chrono/public/pw_chrono/system_clock.h",
-  "$dir_pw_chrono/public/pw_chrono/system_timer.h",
+  "$dir_pw_crypto/public/pw_crypto/ecdsa.h",
+  "$dir_pw_crypto/public/pw_crypto/sha256.h",
   "$dir_pw_digital_io/public/pw_digital_io/digital_io.h",
   "$dir_pw_function/public/pw_function/function.h",
   "$dir_pw_function/public/pw_function/pointer.h",
   "$dir_pw_function/public/pw_function/scope_guard.h",
   "$dir_pw_hdlc/public/pw_hdlc/decoder.h",
   "$dir_pw_hdlc/public/pw_hdlc/encoder.h",
-  "$dir_pw_interrupt/public/pw_interrupt/context.h",
   "$dir_pw_i2c/public/pw_i2c/initiator.h",
   "$dir_pw_i2c_linux/public/pw_i2c_linux/initiator.h",
+  "$dir_pw_interrupt/public/pw_interrupt/context.h",
+  "$dir_pw_log_string/public/pw_log_string/handler.h",
+  "$dir_pw_log_tokenized/public/pw_log_tokenized/base64.h",
   "$dir_pw_log_tokenized/public/pw_log_tokenized/handler.h",
   "$dir_pw_log_tokenized/public/pw_log_tokenized/metadata.h",
   "$dir_pw_protobuf/public/pw_protobuf/find.h",
   "$dir_pw_random/public/pw_random/random.h",
+  "$dir_pw_random/public/pw_random/xor_shift.h",
   "$dir_pw_rpc/public/pw_rpc/internal/config.h",
   "$dir_pw_rpc/public/pw_rpc/synchronous_call.h",
-  "$dir_pw_string/public/pw_string/format.h",
-  "$dir_pw_string/public/pw_string/string.h",
   "$dir_pw_status/public/pw_status/status.h",
   "$dir_pw_stream/public/pw_stream/stream.h",
   "$dir_pw_stream_uart_linux/public/pw_stream_uart_linux/stream.h",
+  "$dir_pw_string/public/pw_string/format.h",
+  "$dir_pw_string/public/pw_string/string.h",
   "$dir_pw_string/public/pw_string/string_builder.h",
   "$dir_pw_string/public/pw_string/util.h",
   "$dir_pw_sync/public/pw_sync/binary_semaphore.h",
@@ -171,12 +181,15 @@
   "$dir_pw_sync/public/pw_sync/virtual_basic_lockable.h",
   "$dir_pw_sys_io/public/pw_sys_io/sys_io.h",
   "$dir_pw_thread/public/pw_thread/test_thread_context.h",
+  "$dir_pw_tokenizer/public/pw_tokenizer/config.h",
   "$dir_pw_tokenizer/public/pw_tokenizer/encode_args.h",
+  "$dir_pw_tokenizer/public/pw_tokenizer/token_database.h",
   "$dir_pw_tokenizer/public/pw_tokenizer/tokenize.h",
   "$dir_pw_toolchain/public/pw_toolchain/no_destructor.h",
-  "$dir_pw_varint/public/pw_varint/varint.h",
   "$dir_pw_varint/public/pw_varint/stream.h",
-]
+  "$dir_pw_varint/public/pw_varint/varint.h",
+  "$dir_pw_work_queue/public/pw_work_queue/work_queue.h",
+]  # keep-sorted: end
 
 pw_python_action("generate_doxygen") {
   _output_dir = "docs/doxygen"
diff --git a/docs/Doxyfile b/docs/Doxyfile
index 8132a75..ef05ea3 100644
--- a/docs/Doxyfile
+++ b/docs/Doxyfile
@@ -291,6 +291,7 @@
                          "cpp_class{1}=@crossref{cpp,class,\1}" \
                          "cpp_func{1}=@crossref{cpp,func,\1}" \
                          "cpp_type{1}=@crossref{cpp,type,\1}" \
+                         "cpp_enum{1}=@crossref{cpp,type,\1}" \
                          "pw_status{1}=@crossref{c,enumerator,\1}"
 
 # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
@@ -2383,6 +2384,7 @@
                          PW_LOCKABLE= \
                          PW_PRINTF_FORMAT(...)= \
                          PW_CONSTEXPR_CPP20= \
+                         PW_INLINE_VARIABLE=inline \
                          PW_EXCLUSIVE_LOCK_FUNCTION(...)= \
                          PW_EXCLUSIVE_TRYLOCK_FUNCTION(...)= \
                          PW_UNLOCK_FUNCTION(...)= \
@@ -2391,7 +2393,9 @@
                          PW_EXTERN_C_START= \
                          PW_LOCKS_EXCLUDED(...)= \
                          PW_EXCLUSIVE_LOCKS_REQUIRED(...)= \
-                         PW_GUARDED_BY(...)=
+                         PW_GUARDED_BY(...)= \
+                         PW_NO_SANITIZE(...)= \
+                         PW_EXCLUDE_FROM_DOXYGEN=1
 
 # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
 # tag can be used to specify a list of macro names that should be expanded. The
diff --git a/docs/_static/css/pigweed.css b/docs/_static/css/pigweed.css
index c55d85f..aec3288 100644
--- a/docs/_static/css/pigweed.css
+++ b/docs/_static/css/pigweed.css
@@ -27,6 +27,7 @@
 
 .sidebar-brand-text {
   font-size: 2.5rem;
+  text-align: center;
 }
 
 /********** General document coloring ***********/
@@ -78,6 +79,12 @@
     text-indent: 0em;
 }
 
+/* Remove blank space before/after function signature open/close parens. */
+.sig > dl {
+    margin-block-start: 0;
+    margin-block-end: 0;
+}
+
 /* Make inline code standout more */
 code.literal {
   border: 1px solid var(--color-inline-code-border);
@@ -177,6 +184,10 @@
   padding: 0;
 }
 
+.pw-text-center-align {
+    text-align: center
+}
+
 ul.pw-module-section-nav-group {
   display: flex;
   margin: 0 1em;
diff --git a/docs/_static/js/pigweed.js b/docs/_static/js/pigweed.js
index 624a27e..27bdfb1 100644
--- a/docs/_static/js/pigweed.js
+++ b/docs/_static/js/pigweed.js
@@ -12,7 +12,10 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-function scrollSiteNavToCurrentPage() {
+window.pigweed = {};
+
+// Scroll the site nav so that the current page is visible.
+window.pigweed.scrollSiteNav = () => {
   const siteNav = document.querySelector('.sidebar-scroll');
   // The node within the site nav that represents the page that the user is
   // currently looking at.
@@ -34,12 +37,15 @@
     targetNode.getBoundingClientRect().top -
     siteNav.getBoundingClientRect().top;
   siteNav.scrollTop = scrollDistance;
-}
+};
 
-window.addEventListener('load', () => {
-  // Run the scrolling function with a 1-second delay so that it doesn't
-  // interfere with Sphinx's scrolling function. E.g. when you visit
-  // https://pigweed.dev/pw_tokenizer/design.html#bit-tokenization we need
-  // to give Sphinx a chance to scroll to the #bit-tokenization section.
-  setTimeout(scrollSiteNavToCurrentPage, 1000);
+window.addEventListener('DOMContentLoaded', () => {
+  // (b/297384789) Start Mermaid diagram rendering as early as possible to
+  // prevent a race condition between Furo's scrolling logic and the Mermaid
+  // diagram rendering logic.
+  if (window.mermaid) {
+    // https://mermaid.js.org/config/usage.html#using-mermaid-run
+    window.mermaid.run();
+  }
+  window.pigweed.scrollSiteNav();
 });
diff --git a/docs/build_system.rst b/docs/build_system.rst
index 549f003..d355de4 100644
--- a/docs/build_system.rst
+++ b/docs/build_system.rst
@@ -202,15 +202,15 @@
 a project could configure the protobuf libraries that it uses. This is done by
 defining a ``default_args`` scope containing the overrides.
 
-.. code::
+.. code-block::
 
-  # The location of the BUILDCONFIG file.
-  buildconfig = "//BUILDCONFIG.gn"
+   # The location of the BUILDCONFIG file.
+   buildconfig = "//BUILDCONFIG.gn"
 
-  # Build arguments set across all Pigweed targets.
-  default_args = {
-    dir_pw_third_party_nanopb = "//third_party/nanopb-0.4.2"
-  }
+   # Build arguments set across all Pigweed targets.
+   default_args = {
+     dir_pw_third_party_nanopb = "//third_party/nanopb-0.4.2"
+   }
 
 Configuration: BUILDCONFIG.gn
 -----------------------------
@@ -273,22 +273,22 @@
 to achieve this is to wrap all dependencies within a condition checking the
 toolchain.
 
-.. code::
+.. code-block::
 
-  group("my_application_images") {
-    deps = []  # Empty in the default toolchain.
+   group("my_application_images") {
+     deps = []  # Empty in the default toolchain.
 
-    if (current_toolchain != default_toolchain) {
-      # This is only evaluated by Pigweed target toolchains, which configure
-      # all of the required options to build Pigweed code.
-      deps += [ "//images:evt" ]
-    }
-  }
+     if (current_toolchain != default_toolchain) {
+       # This is only evaluated by Pigweed target toolchains, which configure
+       # all of the required options to build Pigweed code.
+       deps += [ "//images:evt" ]
+     }
+   }
 
-  # The images group is instantiated for each of the project's Pigweed targets.
-  group("my_pigweed_target") {
-    deps = [ ":my_application_images(//toolchains:my_pigweed_target)" ]
-  }
+   # The images group is instantiated for each of the project's Pigweed targets.
+   group("my_pigweed_target") {
+     deps = [ ":my_application_images(//toolchains:my_pigweed_target)" ]
+   }
 
 .. warning::
   Pigweed's default toolchain is never used, so it is set to an empty toolchain
@@ -361,10 +361,10 @@
 * ``ubsan_heuristic`` -- `UndefinedBehaviorSanitizer`_ with the following
   additional checks enabled:
 
-   * ``integer``: Checks for undefined or suspicious integer behavior.
-   * ``float-divide-by-zero``: Checks for floating point division by zero.
-   * ``implicit-conversion``: Checks for suspicious behavior of implicit conversions.
-   * ``nullability``: Checks for null as function arg, lvalue and return type.
+  * ``integer``: Checks for undefined or suspicious integer behavior.
+  * ``float-divide-by-zero``: Checks for floating point division by zero.
+  * ``implicit-conversion``: Checks for suspicious behavior of implicit conversions.
+  * ``nullability``: Checks for null as function arg, lvalue and return type.
 
   These additional checks are heuristic and may not correspond to undefined
   behavior.
@@ -433,13 +433,13 @@
 To depend on Pigweed modules from GN code, import Pigweed's overrides file and
 reference these module variables.
 
-.. code::
+.. code-block::
 
-  # This must be imported before .gni files from any other Pigweed modules. To
-  # prevent gn format from reordering this import, it must be separated by a
-  # blank line from other imports.
+   # This must be imported before .gni files from any other Pigweed modules. To
+   # prevent gn format from reordering this import, it must be separated by a
+   # blank line from other imports.
 
-  import("//build_overrides/pigweed.gni")
+   import("//build_overrides/pigweed.gni")
 
 GN target type wrappers
 -----------------------
@@ -494,34 +494,34 @@
 
 1. Define your executable GN target using the ``pw_executable`` template.
 
-   .. code::
+   .. code-block::
 
-     # //foo/BUILD.gn
-     pw_executable("foo") {
-       sources = [ "main.cc" ]
-       deps = [ ":libfoo" ]
-     }
+      # //foo/BUILD.gn
+      pw_executable("foo") {
+        sources = [ "main.cc" ]
+        deps = [ ":libfoo" ]
+      }
 
 2. In the root ``BUILD.gn`` file, add the executable's GN target to the ``apps``
    group.
 
-   .. code::
+   .. code-block::
 
-     # //BUILD.gn
-     group("apps") {
-       deps = [
-         # ...
-         "//foo",  # Shorthand for //foo:foo
-       ]
-     }
+      # //BUILD.gn
+      group("apps") {
+        deps = [
+          # ...
+          "//foo",  # Shorthand for //foo:foo
+        ]
+      }
 
 3. Run the ninja build to compile your executable. The apps group is built by
    default, so there's no need to provide a target. The executable will be
    compiled for every supported Pigweed target.
 
-   .. code::
+   .. code-block::
 
-     ninja -C out
+      ninja -C out
 
    Alternatively, build your executable by itself by specifying its path to
    Ninja. When building a GN target manually, the Pigweed target for which it
@@ -529,9 +529,9 @@
 
    For example, to build for the Pigweed target ``host_gcc_debug``:
 
-   .. code::
+   .. code-block::
 
-     ninja -C out host_gcc_debug/obj/foo/bin/foo
+      ninja -C out host_gcc_debug/obj/foo/bin/foo
 
    .. note::
 
@@ -541,9 +541,9 @@
 4. Retrieve your compiled binary from the out directory. It is located at the
    path
 
-   .. code::
+   .. code-block::
 
-     out/<pw_target>/obj/<gn_path>/{bin,test}/<executable>
+      out/<pw_target>/obj/<gn_path>/{bin,test}/<executable>
 
    where ``pw_target`` is the Pigweed target for which the binary was built,
    ``gn_path`` is the GN path to the BUILD.gn file defining the executable,
@@ -554,9 +554,9 @@
    For example, the ``foo`` executable defined above and compiled for the
    Pigweed target stm32f429i_disc1_debug is found at:
 
-   .. code::
+   .. code-block::
 
-     out/stm32f429i_disc1_debug/obj/foo/bin/foo
+      out/stm32f429i_disc1_debug/obj/foo/bin/foo
 
 CMake
 -----
@@ -594,7 +594,7 @@
 are of particular importance when targeting embedded platforms. The most
 commonly used commands used in bazel are;
 
-.. code:: sh
+.. code-block:: sh
 
   bazel build //your:target
   bazel test //your:target
@@ -607,7 +607,7 @@
 When it comes to building/testing your Bazel target for a specific Pigweed
 target (e.g. stm32f429i-discovery) a slight variation is required.
 
-.. code:: sh
+.. code-block:: sh
 
   bazel build //your:target \
     --platforms=@pigweed//pw_build/platforms:stm32f429i-disc1
@@ -635,7 +635,7 @@
    your target).
 4. Run;
 
-   .. code:: sh
+   .. code-block:: sh
 
     bazel test //your:test --platforms=//your/platform --run_under=//your_handler
 
@@ -650,7 +650,7 @@
    `--test_tag_filters
    <https://bazel.build/docs/user-manual#test-tag-filters>`_. For example,
 
-   .. code:: sh
+   .. code-block:: sh
 
      bazel test --test_tag_filters=-integration //...
 
@@ -662,23 +662,23 @@
 
 1. Add the following lines to your '.bazelrc'.
 
-  .. code:: sh
+   .. code-block:: sh
 
-    coverage --experimental_generate_llvm_lcov
-    coverage --combined_report=lcov
+      coverage --experimental_generate_llvm_lcov
+      coverage --combined_report=lcov
 
 2. Generate a combined lcov coverage report. This will produce a combined lcov
    coverage report at the path 'bazel-out/_coverage/_coverage_report.dat'. e.g.
 
-  .. code:: sh
+   .. code-block:: sh
 
-    bazel coverage //pw_log/...
+      bazel coverage //pw_log/...
 
 3. View the results using the command line utility 'lcov'.
 
-  .. code:: sh
+   .. code-block:: sh
 
-    lcov --list bazel-out/_coverage/_coverage_report.dat
+      lcov --list bazel-out/_coverage/_coverage_report.dat
 
 Configuration
 -------------
@@ -695,7 +695,7 @@
 platform that is currently being targeted. For more information on this please
 see the `Bazel selects reference`_. e.g.
 
-.. code:: py
+.. code-block:: py
 
   pw_cc_library(
     name = "some_platform_dependant_library",
@@ -711,7 +711,7 @@
 compatible with. Consider an example where you want to specify that a target is
 compatible with only a host os;
 
-.. code:: py
+.. code-block:: py
 
   pw_cc_library(
     name = "some_host_only_lib",
@@ -730,7 +730,7 @@
 building with a wild card for a non-host platform this target will be skipped
 and the build will continue. e.g.
 
-.. code:: sh
+.. code-block:: sh
 
   bazel build //... --platforms=@pigweed//pw_build/platforms:cortex_m0
 
@@ -756,7 +756,7 @@
 A simple example of when it is useful to use a label_flag is when you want to
 swap out a single dependency from the command line. e.g.
 
-.. code:: py
+.. code-block:: py
 
   pw_cc_library(
     name = "some_default_io",
@@ -781,7 +781,7 @@
 From here the label_flag by default redirects to the target ":some_default_io",
 however it is possible to override this from the command line. e.g.
 
-.. code:: sh
+.. code-block:: sh
 
   bazel build //:some_target_that_needs_io --//:io=//:some_other_io
 
@@ -821,16 +821,16 @@
 2. Add a pigweed_config rule to your WORKSPACE, using Pigweed's default
    configuration.
 
-  .. code:: py
+   .. code-block:: py
 
-    # WORKSPACE ...
-    load("//pw_build:target_config.bzl", "pigweed_config")
+      # WORKSPACE ...
+      load("//pw_build:target_config.bzl", "pigweed_config")
 
-    # Configure Pigweeds backend.
-    pigweed_config(
-        name = "pigweed_config",
-        build_file = "@pigweed//targets:default_config.BUILD",
-    )
+      # Configure Pigweeds backend.
+      pigweed_config(
+          name = "pigweed_config",
+          build_file = "@pigweed//targets:default_config.BUILD",
+      )
 
 .. note::
   We are aware, that the experience of setting up your WORKSPACE file to work
@@ -848,20 +848,20 @@
 Continuing on with our scenario, consider that you maybe want to try using the
 '//pw_chrono' module. So you create a target in your repository like so;
 
-.. code::
+.. code-block::
 
-  # BUILD
-  pw_cc_library(
-    name = "time_is_relative",
-    srcs = ["relative_time_on_earth.cc"],
-    deps = ["@pigweed//pw_chrono"],
-  )
+   # BUILD
+   pw_cc_library(
+     name = "time_is_relative",
+     srcs = ["relative_time_on_earth.cc"],
+     deps = ["@pigweed//pw_chrono"],
+   )
 
 Now this should work out of the box for any host operating system. e.g. Running;
 
-.. code::
+.. code-block::
 
-  bazel build //:time_is_relative
+   bazel build //:time_is_relative
 
 will produce a working library. But as your probably here because Pigweed offers
 a set of embedded libraries you might be interested in running your code on some
@@ -870,7 +870,7 @@
 of our default '//pw_chrono' backend for FreeRTOS. You could build the following
 with;
 
-.. code:: sh
+.. code-block:: sh
 
   bazel build //:time_is_relative \
   --platforms=@pigweed//pw_build/platforms:freertos
@@ -880,29 +880,29 @@
 tree (that is important for configuration) in a project such as this would
 look like.
 
-.. code::
+.. code-block::
 
-  @pigweed//pw_chrono:pw_chrono_facade <-----------.
-   ^                                               |
-   |                            @pigweed//pw_chrono_freertos:system_clock
-   |                            (Actual backend)
-   |                                               ^
-   |                                               |
-   |                            @pigweed//pw_chrono:system_clock_backend_multiplexer
-   |                            Select backend based on OS:
-   |                            [FreeRTOS (X), Embos ( ), STL ( ), Threadx ( )]
-   |                                               ^
-   |                                               |
-  @pigweed//pw_chrono  -------> @pigweed_config//:pw_chrono_system_clock_backend
-   ^                            (Injectable)
-   |
-  //:time_is_relative
+   @pigweed//pw_chrono:pw_chrono_facade <-----------.
+    ^                                               |
+    |                            @pigweed//pw_chrono_freertos:system_clock
+    |                            (Actual backend)
+    |                                               ^
+    |                                               |
+    |                            @pigweed//pw_chrono:system_clock_backend_multiplexer
+    |                            Select backend based on OS:
+    |                            [FreeRTOS (X), Embos ( ), STL ( ), Threadx ( )]
+    |                                               ^
+    |                                               |
+   @pigweed//pw_chrono  -------> @pigweed_config//:pw_chrono_system_clock_backend
+    ^                            (Injectable)
+    |
+   //:time_is_relative
 
 So when evaluating this setup Bazel checks the dependencies for '//pw_chrono'
 and finds that it depends on "@pigweed_config//:pw_chrono_system_clock_backend" which looks
 like this;
 
-.. code:: py
+.. code-block:: py
 
   # pw_chrono config.
   label_flag(
@@ -917,7 +917,7 @@
 have four different backends we have to use the select semantics to choose the
 right one. In this case it looks like;
 
-.. code:: py
+.. code-block:: py
 
   pw_cc_library(
     name = "system_clock_backend_multiplexer",
@@ -943,25 +943,25 @@
 with the facade an easy and temporary way to override the dependency tree is
 to override the label flag in '@pigweed_config'. For example;
 
-.. code:: sh
+.. code-block:: sh
 
   bazel build //:time_is_relative \
     --@pigweed_config//pw_chrono_system_clock_backend=//pw_chrono_my_hardware_rtc:system_clock
 
 This temporarily modifies the build graph to look something like this;
 
-.. code::
+.. code-block::
 
-  @pigweed//pw_chrono:pw_chrono_facade <-----.
-   ^                                         |
-   |                      @your_workspace//pw_chrono_my_hardware_rtc:system_clock
-   |                      (Actual backend)
-   |                                         ^
-   |                                         |
-  @pigweed//pw_chrono  -> @pigweed_config//:pw_chrono_system_clock_backend
-   ^                      (Injectable)
-   |
-  //:time_is_relative
+   @pigweed//pw_chrono:pw_chrono_facade <-----.
+    ^                                         |
+    |                      @your_workspace//pw_chrono_my_hardware_rtc:system_clock
+    |                      (Actual backend)
+    |                                         ^
+    |                                         |
+   @pigweed//pw_chrono  -> @pigweed_config//:pw_chrono_system_clock_backend
+    ^                      (Injectable)
+    |
+   //:time_is_relative
 
 Now while this is a nice temporary change, but you might find yourself in need
 of a more permanent configuration. Particularly if you want to override multiple
@@ -1042,26 +1042,27 @@
 Building your target now will result in slightly different build graph. For
 example, running;
 
-.. code:: sh
+.. code-block:: sh
 
   bazel build //:time_is_relative --platforms=//platforms:primary_computer
 
 Will result in a build graph that looks like;
 
-.. code::
+.. code-block::
 
-  @pigweed//pw_chrono:pw_chrono_facade <---.
-   ^                                        |
-   |                     @your_workspace//pw_chrono_my_hardware_rtc:system_clock
-   |                     (Actual backend)
-   |                                        ^
-   |                                        |
-   |                     @your_workspace//pw_chrono:system_clock_backend_multiplexer
-   |                     Select backend based on OS:
-   |                     [Primary (X), Backup ( ), Host only default ( )]
-   |                                        ^
-   |                                        |
-  @pigweed//pw_chrono -> @pigweed_config//:pw_chrono_system_clock_backend
-   ^                     (Injectable)
-   |
-  //:time_is_relative
+   @pigweed//pw_chrono:pw_chrono_facade <---.
+    ^                                        |
+    |                     @your_workspace//pw_chrono_my_hardware_rtc:system_clock
+    |                     (Actual backend)
+    |                                        ^
+    |                                        |
+    |                     @your_workspace//pw_chrono:system_clock_backend_multiplexer
+    |                     Select backend based on OS:
+    |                     [Primary (X), Backup ( ), Host only default ( )]
+    |                                        ^
+    |                                        |
+   @pigweed//pw_chrono -> @pigweed_config//:pw_chrono_system_clock_backend
+    ^                     (Injectable)
+    |
+   //:time_is_relative
+
diff --git a/docs/changelog.rst b/docs/changelog.rst
index d95b12b..3d82772 100644
--- a/docs/changelog.rst
+++ b/docs/changelog.rst
@@ -9,11 +9,614 @@
 .. _docs-changelog-latest:
 
 ----------------------------
-Jul 27, 2023 to Aug 11, 2023
+Aug 25, 2023 to Sep 08, 2023
 ----------------------------
 
 .. changelog_highlights_start
 
+Highlights (Aug 25, 2023 to Sep 08, 2023):
+
+* SEED :ref:`seed-0107` has been approved! Pigweed will adopt a new sockets API as
+  its primary networking abstraction. The sockets API will be backed by a new,
+  lightweight embedded-focused network protocol stack inspired by TCP/IP.
+* SEED :ref:`seed-0108` has also been approved! Coming soon, the new ``pw_emu``
+  module will make it easier to work with emulators.
+
+Please join us at the next `Pigweed Live <https://discord.gg/M9NSeTA>`_ on
+**Mon, Sep 11 1PM PST** to discuss All Things Pigweed. Go to the
+``#pigweed-live`` channel to get a link to the video meeting.
+
+.. changelog_highlights_end
+
+Active SEEDs
+============
+Help shape the future of Pigweed! Please leave feedback on the following active RFCs (SEEDs):
+
+* `SEED-0103: pw_protobuf Object Model <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/133971>`__
+* `SEED-0104: display support <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/150793>`__
+* `SEED-0105: Add nested tokens and tokenized args to pw_tokenizer and pw_log <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/154190>`__
+* `SEED-0106: Project Template <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/155430>`__
+* `SEED-0109: Communication Buffers <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168357>`__
+
+Modules
+=======
+
+pw_assert
+---------
+We fixed circular dependencies in Bazel.
+
+* `Remove placeholder target <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168844>`__
+* `Fix Bazel circular deps <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/160794>`__
+  (issue `#234877642 <https://issues.pigweed.dev/issues/234877642>`__)
+* `Introduce pw_assert_backend_impl <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168774>`__
+  (issue `#234877642 <https://issues.pigweed.dev/issues/234877642>`__)
+
+pw_bluetooth
+------------
+We added :ref:`Emboss <module-pw_third_party_emboss>` definitions.
+
+* `Add SimplePairingCompleteEvent Emboss definition <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169916>`__
+* `Add UserPasskeyRequestEvent Emboss definition <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169912>`__
+* `Add UserConfirmationRequestEvent Emboss definition <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169871>`__
+* `Use hci.LinkKey in LinkKeyNotificationEvent.link_key <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168858>`__
+* `Add IoCapabilityResponseEvent Emboss definition <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168354>`__
+* `Add IoCapabilityRequestEvent Emboss definition <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168353>`__
+* `Add EncryptionKeyRefreshCompleteEvent Emboss definition <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168331>`__
+* `Add ExtendedInquiryResultEvent Emboss definition <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168330>`__
+
+pw_build
+--------
+* `Force watch and default recipe names <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169911>`__
+
+pw_build_mcuxpresso
+-------------------
+* `Output formatted bazel target <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169740>`__
+
+pw_cpu_exception
+----------------
+We added Bazel support.
+
+* `bazel build support <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169733>`__
+  (issue `#242183021 <https://issues.pigweed.dev/issues/242183021>`__)
+
+pw_crypto
+---------
+The complete ``pw_crypto`` API reference is now documented on :ref:`module-pw_crypto`.
+
+* `Add API reference <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169572>`__
+  (issue `#299147635 <https://issues.pigweed.dev/issues/299147635>`__)
+
+pw_env_setup
+------------
+Banners should not print correctly on Windows.
+
+* `Add i2c protos to python deps <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169231>`__
+* `Fix banner printing on Windows <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169172>`__
+  (issue `#289008307 <https://issues.pigweed.dev/issues/289008307>`__)
+
+pw_file
+-------
+* `Add pw_file python package <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168831>`__
+
+pw_function
+-----------
+The :cpp:func:`pw::bind_member()` template is now exposed in the public API.
+``bind_member()`` is useful for binding the ``this`` argument of a callable.
+We added a section to the docs explaining :ref:`why pw::Function is not a
+literal <module-pw_function-non-literal>`.
+
+* `Explain non-literal design rationale <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168777>`__
+* `Expose \`bind_member\` <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169123>`__
+
+pw_fuzzer
+---------
+We refactored ``pw_fuzzer`` logic to be more robust and expanded the
+:ref:`module-pw_fuzzer-guides-reproducing_oss_fuzz_bugs` doc.
+
+* `Refactor OSS-Fuzz support <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167348>`__
+  (issue `#56955 <https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=56955>`__)
+
+pw_i2c
+------
+* `Use new k{FieldName}MaxSize constants to get buffer size <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168913>`__
+
+pw_kvs
+------
+We are discouraging the use of the shorter macros because they collide with
+Abseil's logging API.
+
+* `Remove usage of pw_log/shorter.h API <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169920>`__
+  (issue `#299520256 <https://issues.pigweed.dev/issues/299520256>`__)
+
+pw_libc
+-------
+``snprintf()`` support was added.
+
+* `Import LLVM libc's snprintf <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/137735>`__
+
+pw_log_string
+-------------
+We added more detail to :ref:`module-pw_log_string`.
+
+* `Fix the default impl to handle zero length va args <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169975>`__
+* `Provide more detail in the getting started docs <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168934>`__
+  (issue `#298124226 <https://issues.pigweed.dev/issues/298124226>`__)
+
+pw_log_zephyr
+-------------
+It's now possible to define ``pw_log_tokenized_HandleLog()`` outside of Pigweed
+so that Zephyr projects have more flexibility around how they capture tokenized
+logs.
+
+* `Split tokenize handler into its own config <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168612>`__
+
+pw_package
+----------
+* `Handle failed cipd acl checks <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168530>`__
+
+pw_persistent_ram
+-----------------
+* `Add persistent_buffer flat_file_system_entry <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168832>`__
+
+pw_presubmit
+------------
+We added a reStructuredText formatter.
+
+* `Make builds_from_previous_iteration ints <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169721>`__
+  (issue `#299336222 <https://issues.pigweed.dev/issues/299336222>`__)
+* `Move colorize_diff to tools <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168839>`__
+* `RST formatting <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168541>`__
+
+pw_protobuf
+-----------
+``max_size`` and ``max_count`` are now exposed in generated headers.
+The new ``proto_message_field_props()`` helper function makes it easier to
+iterate through a messages fields and properties.
+
+* `Expose max_size, max_count in generated header file <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168973>`__
+  (issue `#297364973 <https://issues.pigweed.dev/issues/297364973>`__)
+* `Introduce proto_message_field_props() <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168972>`__
+* `Change PROTO_FIELD_PROPERTIES to a dict of classes <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168971>`__
+* `Rename 'node' to 'message' in forward_declare() <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168970>`__
+* `Simplify unnecessary Tuple return type <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168910>`__
+
+pw_random
+---------
+We're now auto-generating the ``XorShiftStarRng64`` API reference via Doxygen.
+
+* `Doxygenify xor_shift.h <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/164510>`__
+
+pw_rpc
+------
+The new ``request_completion()`` method in Python enables you to send a
+completion packet for server streaming calls.
+
+* `Add request_completion to ServerStreamingCall python API <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168439>`__
+
+pw_spi
+------
+* `Fix Responder.SetCompletionHandler() signature <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169130>`__
+
+pw_symbolizer
+-------------
+The ``LlvmSymbolizer`` Python class has a new ``close()`` method to
+deterministically close the background process.
+
+* `LlvmSymbolizer tool improvement <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168863>`__
+
+pw_sync
+-------
+We added :ref:`module-pw_sync-genericbasiclockable`.
+
+* `Add GenericBasicLockable <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/165930>`__
+
+pw_system
+---------
+``pw_system`` now supports different channels for primary and logging RPC.
+
+* `Multi-channel configuration <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167158>`__
+  (issue `#297076185 <https://issues.pigweed.dev/issues/297076185>`__)
+
+pw_thread_freertos
+------------------
+* `Add missing dep to library <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169239>`__
+
+pw_tokenizer
+------------
+We added :c:macro:`PW_TOKENIZE_FORMAT_STRING_ANY_ARG_COUNT` and
+:c:macro:`PW_TOKENIZER_REPLACE_FORMAT_STRING`. We refactored the docs
+so that you don't have to jump around the docs as much when learning about
+key topics like tokenization and token databases. Database loads now happen
+in a separate thread to avoid blocking the main thread. Change detection for
+directory databases now works more as expected. The config API is now exposed
+in the API reference.
+
+* `Remove some unused deps <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169573>`__
+* `Simplify implementing a custom tokenization macro <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169121>`__
+* `Refactor the docs to be task-focused <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169124>`__
+* `Reload database in dedicated thread <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168866>`__
+* `Combine duplicated docs sections <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168865>`__
+* `Support change detection for directory dbs <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168630>`__
+* `Move config value check to .cc file <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168615>`__
+
+pw_unit_test
+------------
+We added ``testing::Test::HasFailure()``, ``FRIEND_TEST``, and ``<<`` messages
+to improve gTest compatibility.
+
+* `Add testing::Test::HasFailure() <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168810>`__
+* `Add FRIEND_TEST <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169270>`__
+* `Allow <<-style messages in test expectations <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168860>`__
+
+pw_varint
+---------
+``pw_varint`` now has a :ref:`C-only API <module-pw_varint-api-c>`.
+
+* `Add C-only implementation; cleanup <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169122>`__
+
+pw_web
+------
+Logs can now be downloaded as plaintext.
+
+* `Fix TypeScript errors in Device files <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169930>`__
+* `Json Log Source example <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169176>`__
+* `Enable downloading logs as plain text <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168130>`__
+* `Fix UI/state bugs <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167911>`__
+* `NPM version bump to 0.0.11 <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168591>`__
+* `Add basic bundling tests for log viewer bundle <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168539>`__
+
+Build
+=====
+
+Bazel
+-----
+* `Fix alwayslink support in MacOS host_clang <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168614>`__
+  (issue `#297413805 <https://issues.pigweed.dev/issues/297413805>`__)
+* `Fix lint issues after roll <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169611>`__
+
+Docs
+====
+* `Fix broken links <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169579>`__
+  (issue `#299181944 <https://issues.pigweed.dev/issues/299181944>`__)
+* `Recommend enabling long file paths on Windows <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169578>`__
+* `Update Windows command for git hook <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168592>`__
+* `Fix main content scrolling <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168555>`__
+  (issue `#297384789 <https://issues.pigweed.dev/issues/297384789>`__)
+* `Update changelog <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168540>`__
+  (issue `#292247409 <https://issues.pigweed.dev/issues/292247409>`__)
+* `Use code-block:: instead of code:: everywhere <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168617>`__
+* `Add function signature line breaks <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168554>`__
+* `Cleanup indentation <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168537>`__
+
+SEEDs
+=====
+* `SEED-0108: Emulators Frontend <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/158190>`__
+
+Third party
+===========
+* `Add public configs for FuzzTest deps <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169711>`__
+* `Reconfigure deps & add cflags to config <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/152691>`__
+
+Miscellaneous
+=============
+* `Fix formatting with new clang version <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/169078>`__
+
+mimxrt595_evk_freertos
+----------------------
+* `Use config_assert helper <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/160378>`__
+
+----------------------------
+Aug 11, 2023 to Aug 25, 2023
+----------------------------
+
+Highlights (Aug 11, 2023 to Aug 25, 2023):
+
+* ``pw_tokenizer`` now has Rust support.
+* The ``pw_web`` log viewer now has advanced filtering and a jump-to-bottom
+  button.
+* The ``run_tests()`` method of ``pw_unit_test`` now returns a new
+  ``TestRecord`` dataclass which provides more detailed information
+  about the test run.
+* A new Ambiq Apollo4 target that uses the Ambiq Suite SDK and FreeRTOS
+  has been added.
+
+Please join us at the next Pigweed Live on **Monday, Aug 28 1PM PST** to
+discuss these changes and anything else on your mind. Join our
+`Discord <https://discord.gg/M9NSeTA>`_ and head over to the ``#pigweed-live``
+channel to get a link to the video meeting.
+
+Active SEEDs
+============
+Help shape the future of Pigweed! Please leave feedback on the following active RFCs (SEEDs):
+
+* `SEED-0103: pw_protobuf Object Model <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/133971>`__
+* `SEED-0104: display support <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/150793>`__
+* `SEED-0105: Add nested tokens and tokenized args to pw_tokenizer and pw_log <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/154190>`__
+* `SEED-0106: Project Template <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/155430>`__
+* `SEED-0108: Emulators Frontend <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/158190>`__
+
+Modules
+=======
+
+pw_bloat
+--------
+* `Fix typo in method name <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/166832>`__
+
+pw_bluetooth
+------------
+The :ref:`module-pw_third_party_emboss` files were refactored.
+
+* `Add SynchronousConnectionCompleteEvent Emboss definition <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167862>`__
+* `Add all Emboss headers/deps to emboss_test & fix errors <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168355>`__
+* `Add InquiryResultWithRssiEvent Emboss definition <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167859>`__
+* `Add DataBufferOverflowEvent Emboss definition <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167858>`__
+* `Add LinkKeyNotificationEvent Emboss definition <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167855>`__
+* `Add LinkKeyRequestEvent emboss definition <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167349>`__
+* `Remove unused hci emboss files <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167090>`__
+* `Add RoleChangeEvent emboss definition <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167230>`__
+* `Add missing test dependency <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167130>`__
+* `Add new hci subset files <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/166730>`__
+
+pw_build
+--------
+The ``pw_build`` docs were split up so that each build system has its own page
+now. The new ``output_logs`` flag enables you to not output logs for ``pw_python_venv``.
+
+* `Handle read-only files when deleting venvs <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167863>`__
+* `Split build system docs into separate pages <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/165071>`__
+* `Use pw_toolchain_clang_tools <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167671>`__
+* `Add missing pw_linker_script flag <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167632>`__
+  (issue `#296928739 <https://issues.pigweed.dev/issues/296928739>`__)
+* `Fix output_logs_ unused warning <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/166991>`__
+  (issue `#295524695 <https://issues.pigweed.dev/issues/295524695>`__)
+* `Don't include compile cmds when preprocessing ldscripts <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/166490>`__
+* `Add pw_python_venv.output_logs <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/165330>`__
+  (issue `#295524695 <https://issues.pigweed.dev/issues/295524695>`__)
+* `Increase size of test linker script memory region <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/164823>`__
+* `Add integration test metadata <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/154553>`__
+
+pw_cli
+------
+* `Default change pw_protobuf default <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/126806>`__
+  (issue `#266298474 <https://issues.pigweed.dev/issues/266298474>`__)
+
+pw_console
+----------
+* `Update web viewer to use pigweedjs <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/162995>`__
+
+pw_containers
+-------------
+* `Silence MSAN false positive in pw::Vector <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167111>`__
+
+pw_docgen
+---------
+Docs builds should be faster now because Sphinx has been configured to use
+all available cores.
+
+* `Remove top nav bar <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168446>`__
+* `Parallelize Sphinx <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/164738>`__
+
+pw_env_setup
+------------
+Sphinx was updated from v5.3.0 to v7.1.2. We switched back to the upstream Furo
+theme and updated to v2023.8.19. The content of ``pigweed_environment.gni`` now
+gets logged. There was an update to ensure that ``arm-none-eabi-gdb`` errors
+propagate correctly. There is now a way to override Bazel build files for CIPD
+repos.
+
+* `Upgrade sphinx and dependencies for docs <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168431>`__
+* `Upgrade sphinx-design <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168339>`__
+* `Copy pigweed_environment.gni to logs <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167850>`__
+* `arm-gdb: propagate errors <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/165411>`__
+* `arm-gdb: exclude %VIRTUAL_ENV%\Scripts from search paths <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/164370>`__
+* `Add ability to override bazel BUILD file for CIPD repos <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/165530>`__
+
+pw_function
+-----------
+* `Rename template parameter <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168334>`__
+
+pw_fuzzer
+---------
+* `Add test metadata <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/154555>`__
+
+pw_hdlc
+-------
+A new ``close()`` method was added to ``HdlcRpcClient`` to signal to the thread
+to stop.
+
+* `Use explicit logger name <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/166591>`__
+* `Mitigate errors on Python background thread <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/162712>`__
+  (issue `#293595266 <https://issues.pigweed.dev/issues/293595266>`__)
+
+pw_ide
+------
+A new ``--install-editable`` flag was added to install Pigweed Python modules
+in editable mode so that code changes are instantly realized.
+
+* `Add cmd to install Py packages as editable <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/163572>`__
+* `Make VSC extension run on older versions <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167054>`__
+
+pw_perf_test
+------------
+* `Add test metadata <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/154554>`__
+
+pw_presubmit
+------------
+``pw_presubmit`` now has an ESLint check for linting and a Prettier check for
+formatting JavaScript and TypeScript files.
+
+* `Add msan to OTHER_CHECKS <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168325>`__
+  (issue `#234876100 <https://issues.pigweed.dev/issues/234876100>`__)
+* `Upstream constraint file output fix <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/166270>`__
+* `JavaScript and TypeScript lint check <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/165410>`__
+* `Apply TypeScript formatting <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/164825>`__
+* `Use prettier for JS and TS files <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/165390>`__
+
+pw_rpc
+------
+A ``request_completion()`` method was added to the ``ServerStreamingCall``
+Python API. A bug was fixed related to encoding failures when dynamic buffers
+are enabled.
+
+* `Add request_completion to ServerStreamingCall python API <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168439>`__
+* `Various small enhancements <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167162>`__
+* `Remove deprecated method from Service <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/165510>`__
+* `Prevent encoding failure when dynamic buffer enabled <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/166833>`__
+  (issue `#269633514 <https://issues.pigweed.dev/issues/269633514>`__)
+
+pw_rpc_transport
+----------------
+* `Add simple_framing Soong rule <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/165350>`__
+
+pw_rust
+-------
+* `Update rules_rust to 0.26.0 <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/166831>`__
+
+pw_stm32cube_build
+------------------
+* `Windows path fixes <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167865>`__
+
+pw_stream
+---------
+Error codes were updated to be more accurate and descriptive.
+
+* `Use more appropriate error codes for Cursor <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/164592>`__
+
+pw_stream_uart_linux
+--------------------
+Common baud rates such as ``9600``, ``19200``, and so on are now supported.
+
+* `Add support for baud rates other than 115200 <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/165070>`__
+
+pw_sync
+-------
+Tests were added to make sure that ``pw::sync::Borrowable`` works with lock
+annotations.
+
+* `Test Borrowable with Mutex, TimedMutex, and InterruptSpinLock <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/153575>`__
+  (issue `#261078330 <https://issues.pigweed.dev/issues/261078330>`__)
+
+pw_system
+---------
+The ``pw_system.device.Device`` Python class can now be used as a
+`context manager <https://realpython.com/python-with-statement/>`_.
+
+* `Make pw_system.device.Device a context manager <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/163410>`__
+
+pw_tokenizer
+------------
+``pw_tokenizer`` now has Rust support. The ``pw_tokenizer`` C++ config API
+is now documented at :ref:`module-pw_tokenizer-api-configuration` and
+the C++ token database API is now documented at
+:ref:`module-pw_tokenizer-api-token-databases`. When creating a token
+database, parent directories are now automatically created if they don't
+already exist. ``PrefixedMessageDecoder`` has been renamed to
+``NestedMessageDecoder``.
+
+* `Move config value check to .cc file <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168615>`__
+* `Create parent directory as needed <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168510>`__
+* `Rework pw_tokenizer.detokenize.PrefixedMessageDecoder <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167150>`__
+* `Minor binary database improvements <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167053>`__
+* `Update binary DB docs and convert to Doxygen <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/163570>`__
+* `Deprecate tokenizer buffer size config <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/163257>`__
+* `Fix instance of -Wconstant-logical-operand <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/166731>`__
+* `Add Rust support <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/145389>`__
+
+pw_toolchain
+------------
+A new Linux host toolchain built using ``pw_toolchain_bazel`` has been
+started. CIPD-provided Rust toolchains are now being used.
+
+* `Link against system libraries using libs not ldflags <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/151050>`__
+* `Use %package% for cxx_builtin_include_directories <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168340>`__
+* `Extend documentation for tool prefixes <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167633>`__
+* `Add Linux host toolchain <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/164824>`__
+  (issue `#269204725 <https://issues.pigweed.dev/issues/269204725>`__)
+* `Use CIPD provided Rust toolchains <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/166852>`__
+* `Switch macOS to use builtin_sysroot <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/165414>`__
+* `Add cmake helpers for getting clang compile+link flags <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/163811>`__
+
+pw_unit_test
+------------
+``run_tests()`` now returns the new ``TestRecord`` dataclass which provides
+more detailed information about the test run. ``SetUpTestSuit()`` and
+``TearDownTestSuite()`` were added to improve GoogleTest compatibility.
+
+* `Add TestRecord of Test Results <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/166273>`__
+* `Reset static value before running tests <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/166590>`__
+  (issue `#296157327 <https://issues.pigweed.dev/issues/296157327>`__)
+* `Add per-fixture setup/teardown <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/165210>`__
+
+pw_web
+------
+Log viewers are now drawn every 100 milliseconds at most to prevent crashes
+when many logs arrive simultaneously. The log viewer now has a jump-to-bottom
+button. Advanced filtering has been added.
+
+* `NPM version bump to 0.0.11 <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168591>`__
+* `Add basic bundling tests for log viewer bundle <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168539>`__
+* `Limit LogViewer redraws to 100ms <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167852>`__
+* `Add jump to bottom button, fix UI bugs and fix state bugs <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/164272>`__
+* `Implement advanced filtering <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/162070>`__
+* `Remove object-path dependency from Device API <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/165013>`__
+* `Log viewer toolbar button toggle style <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/165412>`__
+* `Log-viewer line wrap toggle <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/164010>`__
+
+Targets
+=======
+
+targets
+-------
+A new Ambiq Apollo4 target that uses the Ambiq Suite SDK and FreeRTOS
+has been added.
+
+* `Ambiq Apollo4 support <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/129490>`__
+
+Language support
+================
+
+Python
+------
+* `Upgrade mypy to 1.5.0 <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/166272>`__
+* `Upgrade pylint to 2.17.5 <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/166271>`__
+
+Docs
+====
+Doxygen-generated function signatures now present each argument on a separate
+line. Tabbed content looks visually different than before.
+
+* `Use code-block:: instead of code:: everywhere <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168617>`__
+* `Add function signature line breaks <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168554>`__
+* `Cleanup indentation <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168537>`__
+* `Remove unused myst-parser <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168392>`__
+* `Use sphinx-design for tabbed content <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168341>`__
+* `Update changelog <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/164810>`__
+
+SEEDs
+=====
+:ref:`SEED-0107 (Pigweed Communications) <seed-0107>` was accepted and
+SEED-0109 (Communication Buffers) was started.
+
+* `Update protobuf SEED title in index <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/166470>`__
+* `Update status to Accepted <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167770>`__
+* `Pigweed communications <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/157090>`__
+* `Claim SEED number <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168358>`__
+
+Miscellaneous
+=============
+
+Build
+-----
+* `Make it possible to run MSAN in GN <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/167112>`__
+
+soong
+-----
+* `Remove host/vendor properties from defaults <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/165270>`__
+
+----------------------------
+Jul 27, 2023 to Aug 11, 2023
+----------------------------
+
 Highlights (Jul 27, 2023 to Aug 11, 2023):
 
 * We're prototyping a Pigweed extension for VS Code. Learn more at
@@ -32,8 +635,6 @@
 `Discord <https://discord.gg/M9NSeTA>`_ and head over to the ``#pigweed-live``
 channel to get a link to the video meeting.
 
-.. changelog_highlights_end
-
 Active SEEDs
 ============
 Help shape the future of Pigweed! Please leave feedback on the following active RFCs (SEEDs):
diff --git a/docs/conf.py b/docs/conf.py
index ed42c6b..839ca67 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -17,7 +17,7 @@
 import sphinx
 
 # The suffix of source filenames.
-source_suffix = ['.rst', '.md']
+source_suffix = ['.rst']
 
 # The master toctree document.  # inclusive-language: ignore
 master_doc = 'index'
@@ -49,24 +49,8 @@
     'sphinxarg.ext',  # Automatic documentation of Python argparse
     'sphinxcontrib.mermaid',
     'sphinx_design',
-    'myst_parser',
     'breathe',
     'sphinx_copybutton',  # Copy-to-clipboard button on code blocks
-    'sphinx_tabs.tabs',
-]
-
-myst_enable_extensions = [
-    # "amsmath",
-    "colon_fence",
-    # "deflist",
-    "dollarmath",
-    # "html_admonition",
-    # "html_image",
-    # "linkify",
-    # "replacements",
-    # "smartquotes",
-    # "substitution",
-    # "tasklist",
 ]
 
 # When a user clicks the copy-to-clipboard button the `$ ` prompt should not be
@@ -233,7 +217,8 @@
 
 mermaid_init_js = '''
 mermaid.initialize({
-  startOnLoad: true,
+  // Mermaid is manually started in //docs/_static/js/pigweed.js.
+  startOnLoad: false,
   // sequenceDiagram Note text alignment
   noteAlign: "left",
   // Set mermaid theme to the current furo theme
@@ -298,6 +283,11 @@
 # Disable Python type hints
 # autodoc_typehints = 'none'
 
+# Break class and function signature arguments into one arg per line if the
+# total length exceeds 130 characters. 130 seems about right for keeping one or
+# two parameters on a single line.
+maximum_signature_line_length = 130
+
 
 def do_not_skip_init(app, what, name, obj, would_skip, options):
     if name == "__init__":
diff --git a/docs/contributing.rst b/docs/contributing.rst
index dea4391..e9e6232 100644
--- a/docs/contributing.rst
+++ b/docs/contributing.rst
@@ -94,24 +94,20 @@
 commit message. You should set this up to be done automatically using the
 instructions below.
 
-**Linux/macOS**
-
-The command below assumes that your current working directory is the root
+The commands below assume that your current working directory is the root
 of your Pigweed repository.
 
-.. code:: bash
+**Linux/macOS**
 
-  $ f=`git rev-parse --git-dir`/hooks/commit-msg ; mkdir -p $(dirname $f) ; curl -Lo $f https://gerrit-review.googlesource.com/tools/hooks/commit-msg ; chmod +x $f
+.. code-block:: bash
+
+   f=`git rev-parse --git-dir`/hooks/commit-msg ; mkdir -p $(dirname $f) ; curl -Lo $f https://gerrit-review.googlesource.com/tools/hooks/commit-msg ; chmod +x $f
 
 **Windows**
 
-Download `the Gerrit commit hook
-<https://gerrit-review.googlesource.com/tools/hooks/commit-msg>`_ and then copy
-it to the ``.git\hooks`` directory in the Pigweed repository.
+.. code-block:: batch
 
-.. code::
-
-  copy %HOMEPATH%\Downloads\commit-msg %HOMEPATH%\pigweed\.git\hooks\commit-msg
+   git rev-parse --git-dir > gitrepopath.txt & set /p "g="< gitrepopath.txt & del gitrepopath.txt & call set "f=%g%/hooks" & call mkdir "%f%" & call curl -Lo "%f%/commit-msg" https://gerrit-review.googlesource.com/tools/hooks/commit-msg
 
 Commit Message
 --------------
@@ -254,7 +250,7 @@
 
 Apache header for C and C++ files:
 
-.. code:: none
+.. code-block:: none
 
   // Copyright 2021 The Pigweed Authors
   //
@@ -272,7 +268,7 @@
 
 Apache header for Python and GN files:
 
-.. code:: none
+.. code-block:: none
 
   # Copyright 2020 The Pigweed Authors
   #
@@ -346,14 +342,14 @@
 
 Linux/macOS
 ^^^^^^^^^^^
-.. code:: bash
+.. code-block:: bash
 
   $ pw presubmit --install
 
 This will be effectively the same as running the following command before every
 ``git push``:
 
-.. code:: bash
+.. code-block:: bash
 
   $ pw presubmit
 
@@ -365,7 +361,7 @@
 If you ever need to bypass the presubmit hook (due to it being broken, for
 example) you may push using this command:
 
-.. code:: bash
+.. code-block:: bash
 
   $ git push origin HEAD:refs/for/main --no-verify
 
@@ -374,7 +370,7 @@
 When creating new feature branches, make sure to specify the upstream branch to
 track, e.g.
 
-.. code:: bash
+.. code-block:: bash
 
   $ git checkout -b myfeature origin/main
 
diff --git a/docs/editors.rst b/docs/editors.rst
index e0186f6..94ba020 100644
--- a/docs/editors.rst
+++ b/docs/editors.rst
@@ -171,14 +171,14 @@
 
 Once you have a compilation database, run this command to process it:
 
-.. code:: bash
+.. code-block:: bash
 
    pw ide cpp --process <path to compilation database>
 
 Or better yet, just let ``pw_ide`` find any compilation databases you have
 in your build and process them:
 
-.. code:: bash
+.. code-block:: bash
 
    pw ide cpp --process
 
@@ -202,18 +202,18 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 Discover which targets are available for code analysis:
 
-.. code::
+.. code-block::
 
    $ pw ide cpp --list
 
    C/C++ targets available for language server analysis:
-	         pw_strict_host_gcc_debug
-	         pw_strict_host_clang_debug
-	         stm32f429i_disc1_debug
+         pw_strict_host_gcc_debug
+         pw_strict_host_clang_debug
+         stm32f429i_disc1_debug
 
 Select the target you want to use for code analysis:
 
-.. code::
+.. code-block::
 
    $ pw ide cpp --set pw_strict_host_gcc_debug
 
@@ -221,7 +221,7 @@
 
 Check which target is currently used for code analysis:
 
-.. code::
+.. code-block::
 
    $ pw ide cpp
 
diff --git a/docs/embedded_cpp_guide.rst b/docs/embedded_cpp_guide.rst
index d5925b0..afe901a 100644
--- a/docs/embedded_cpp_guide.rst
+++ b/docs/embedded_cpp_guide.rst
@@ -117,14 +117,14 @@
 Pigweed compiles with a strict set of warnings. The warnings include the
 following:
 
-  * ``-Wall`` and ``-Wextra`` -- Standard sets of compilation warnings, which
-    are recommended for all projects.
-  * ``-Wimplicit-fallthrough`` -- Requires explicit ``[[fallthrough]]``
-    annotations for fallthrough between switch cases. Prevents unintentional
-    fallthroughs if a ``break`` or ``return`` is forgotten.
-  * ``-Wundef`` -- Requires macros to be defined before using them. This
-    disables the standard, problematic behavior that replaces undefined (or
-    misspelled) macros with ``0``.
+* ``-Wall`` and ``-Wextra`` -- Standard sets of compilation warnings, which
+  are recommended for all projects.
+* ``-Wimplicit-fallthrough`` -- Requires explicit ``[[fallthrough]]``
+  annotations for fallthrough between switch cases. Prevents unintentional
+  fallthroughs if a ``break`` or ``return`` is forgotten.
+* ``-Wundef`` -- Requires macros to be defined before using them. This
+  disables the standard, problematic behavior that replaces undefined (or
+  misspelled) macros with ``0``.
 
 Unused variable and function warnings
 -------------------------------------
@@ -133,46 +133,46 @@
 unused items. In some circumstances, these cannot be removed, so the warning
 must be silenced. This is done in one of the following ways:
 
-  1. When possible, delete unused variables, functions, or class definitions.
-  2. If an unused entity must remain in the code, avoid giving it a name. A
-     common situation that triggers unused parameter warnings is implementing a
-     virtual function or callback. In C++, function parameters may be unnamed.
-     If desired, the variable name can remain in the code as a comment.
+1. When possible, delete unused variables, functions, or class definitions.
+2. If an unused entity must remain in the code, avoid giving it a name. A
+   common situation that triggers unused parameter warnings is implementing a
+   virtual function or callback. In C++, function parameters may be unnamed.
+   If desired, the variable name can remain in the code as a comment.
 
-     .. code-block:: cpp
+   .. code-block:: cpp
 
-       class BaseCalculator {
-        public:
-         virtual int DoMath(int number_1, int number_2, int number_3) = 0;
-       };
+      class BaseCalculator {
+       public:
+        virtual int DoMath(int number_1, int number_2, int number_3) = 0;
+      };
 
-       class Calculator : public BaseCalculator {
-         int DoMath(int number_1, int /* number_2 */, int) override {
-           return number_1 * 100;
-         }
-       };
+      class Calculator : public BaseCalculator {
+        int DoMath(int number_1, int /* number_2 */, int) override {
+          return number_1 * 100;
+        }
+      };
 
-  3. In C++, annotate unused entities with `[[maybe_unused]]
-     <https://en.cppreference.com/w/cpp/language/attributes/maybe_unused>`_ to
-     silence warnings.
+3. In C++, annotate unused entities with `[[maybe_unused]]
+   <https://en.cppreference.com/w/cpp/language/attributes/maybe_unused>`_ to
+   silence warnings.
 
-     .. code-block:: cpp
+   .. code-block:: cpp
 
-       // This variable is unused in certain circumstances.
-       [[maybe_unused]] int expected_size = size * 4;
-       #if OPTION_1
-       DoThing1(expected_size);
-       #elif OPTION_2
-       DoThing2(expected_size);
-       #endif
+      // This variable is unused in certain circumstances.
+      [[maybe_unused]] int expected_size = size * 4;
+      #if OPTION_1
+      DoThing1(expected_size);
+      #elif OPTION_2
+      DoThing2(expected_size);
+      #endif
 
-  4. As a final option, cast unused variables to ``void`` to silence these
-     warnings. Use ``static_cast<void>(unused_var)`` in C++ or
-     ``(void)unused_var`` in C.
+4. As a final option, cast unused variables to ``void`` to silence these
+   warnings. Use ``static_cast<void>(unused_var)`` in C++ or
+   ``(void)unused_var`` in C.
 
-     In C, silencing warnings on unused functions may require compiler-specific
-     attributes (``__attribute__((unused))``). Avoid this by removing the
-     functions or compiling with C++ and using ``[[maybe_unused]]``.
+   In C, silencing warnings on unused functions may require compiler-specific
+   attributes (``__attribute__((unused))``). Avoid this by removing the
+   functions or compiling with C++ and using ``[[maybe_unused]]``.
 
 Dealing with ``nodiscard`` return values
 ----------------------------------------
@@ -183,8 +183,8 @@
 
 .. code-block:: cpp
 
-  // <tuple> defines std::ignore.
-  #include <tuple>
+   // <tuple> defines std::ignore.
+   #include <tuple>
 
-  DoThingWithStatus().IgnoreError();
-  std::ignore = DoThingWithReturnValue();
\ No newline at end of file
+   DoThingWithStatus().IgnoreError();
+   std::ignore = DoThingWithReturnValue();
diff --git a/docs/getting_started.rst b/docs/getting_started.rst
index 4a828b2..f57aecb 100644
--- a/docs/getting_started.rst
+++ b/docs/getting_started.rst
@@ -36,7 +36,7 @@
 #. Clone Pigweed and bootstrap the environment (compiler setup & more). **Be
    patient, this step downloads ~1GB of LLVM, GCC, and other tooling**.
 
-   .. code:: bash
+   .. code-block:: bash
 
      $ cd ~
      $ git clone https://pigweed.googlesource.com/pigweed/pigweed
@@ -48,14 +48,14 @@
 
 #. Configure the GN build.
 
-   .. code:: bash
+   .. code-block:: bash
 
      $ gn gen out
      Done. Made 1047 targets from 91 files in 114ms
 
 #. Start the watcher. The watcher will invoke Ninja to build all the targets
 
-   .. code:: bash
+   .. code-block:: bash
 
      $ pw watch
 
@@ -118,7 +118,7 @@
 To start using Pigweed on MacOS, you'll need to install XCode. Download it
 via the App Store, then install the relevant tools from the command line.
 
-.. code:: none
+.. code-block:: none
 
   $ xcode-select --install
 
@@ -137,6 +137,13 @@
 * Ensure that `Developer Mode
   <https://docs.microsoft.com/en-us/windows/apps/get-started/enable-your-device-for-development>`_
   is enabled.
+* Enable long file paths. This can be done using ``regedit`` or by running the
+  following command as an administrator:
+
+  .. code:: bat
+
+     REG ADD HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\FileSystem /v LongPathsEnabled /t REG_DWORD /d 1 /f
+
 
 If you plan to flash devices with firmware, you'll need to install OpenOCD and
 ensure it's on your system path.
@@ -150,7 +157,7 @@
 
 **Linux & macOS**
 
-.. code:: bash
+.. code-block:: bash
 
   $ git clone https://pigweed.googlesource.com/pigweed/pigweed ~/pigweed
   $ cd ~/pigweed
@@ -158,7 +165,7 @@
 
 **Windows**
 
-.. code:: batch
+.. code-block:: batch
 
   :: Run git commands from the shell you set up to use with Git during install.
   > git clone https://pigweed.googlesource.com/pigweed/pigweed %HOMEPATH%\pigweed
@@ -184,13 +191,13 @@
 
 **Linux & macOS**
 
-.. code:: bash
+.. code-block:: bash
 
   $ source ./activate.sh
 
 **Windows**
 
-.. code:: batch
+.. code-block:: batch
 
   > activate.bat
 
@@ -212,7 +219,7 @@
 
 Run GN as seen below:
 
-.. code:: bash
+.. code-block:: bash
 
   $ gn gen out
 
@@ -238,7 +245,7 @@
 
 Go ahead and start ``pw_watch``:
 
-.. code:: bash
+.. code-block:: bash
 
   $ pw watch
 
@@ -278,7 +285,7 @@
 
 Try running the ``pw_status`` test manually:
 
-.. code:: bash
+.. code-block:: bash
 
   $ ./out/pw_strict_host_{clang,gcc}_debug/obj/pw_status/test/status_test
 
@@ -295,13 +302,13 @@
 
 If you want to build JUST for the device, you can kick of watch with:
 
-.. code:: bash
+.. code-block:: bash
 
   $ pw watch stm32f429i
 
 This is equivalent to the following Ninja invocation:
 
-.. code:: bash
+.. code-block:: bash
 
   $ ninja -C out stm32f429i
 
@@ -334,7 +341,7 @@
 another window using the command below (remember, you'll need to activate the
 Pigweed environment first).
 
-.. code:: bash
+.. code-block:: bash
 
   $ stm32f429i_disc1_test_server
 
@@ -346,7 +353,7 @@
 Tell GN to use the testing server by enabling a build arg specific to the
 stm32f429i-disc1 target.
 
-.. code:: bash
+.. code-block:: bash
 
   $ gn args out
   # Append this line to the file that opens in your editor to tell GN to run
@@ -375,7 +382,7 @@
 
 You can explicitly build just the documentation with the command below.
 
-.. code:: bash
+.. code-block:: bash
 
   $ ninja -C out docs
 
@@ -391,7 +398,7 @@
 This can be done by appending the GN path to the target toolchain in parenthesis
 after the desired GN build step label as seen in the example below.
 
-.. code:: none
+.. code-block:: none
 
   $ gn outputs out "//pw_status:status_test.run(//targets/host/pigweed_internal:pw_strict_host_clang_debug)"
   pw_strict_host_clang_debug/obj/pw_status/status_test.run.pw_pystamp
@@ -406,7 +413,7 @@
 
 In macOS and Linux, ``xargs`` can be used to turn this into a single command:
 
-.. code:: bash
+.. code-block:: bash
 
   $ gn outputs out "//pw_status:status_test.run(//targets/host/pigweed_internal:pw_strict_host_clang_debug)" | xargs ninja -C out
 
@@ -418,7 +425,7 @@
 Check out the :ref:`module-pw_ide` for setting up editor configurations or run
 the following for a quick setup:
 
-.. code:: bash
+.. code-block:: bash
 
   pw ide sync
 
diff --git a/docs/layout/page.html b/docs/layout/page.html
index 15ba7bb..347cc9a 100644
--- a/docs/layout/page.html
+++ b/docs/layout/page.html
@@ -17,17 +17,9 @@
 
 {% block supermain %}
   <div class="pw-main">
-    {% if body %}
-      {{ parse_body(body).topnav }}
-    {% endif %}
     <div class="pw-content">
       {{ super() }}
     </div>
   </div>
 {% endblock supermain %}
 
-{% block content %}
-  {% if body %}
-    {{ parse_body(body).body_without_topnav }}
-  {% endif %}
-{% endblock content %}
diff --git a/docs/module_structure.rst b/docs/module_structure.rst
index c952ecc..e9dee9d 100644
--- a/docs/module_structure.rst
+++ b/docs/module_structure.rst
@@ -13,97 +13,97 @@
 
 Example module structure
 ------------------------
-.. code-block:: python
+.. code-block:: text
 
-  pw_foo/...
+   pw_foo/...
 
-    docs.rst         # Docs landing page (required)
-    concepts.rst     # Conceptual docs (optional)
-    design.rst       # Design docs (optional)
-    guides.rst       # How-to guides (optional)
-    api.rst          # API reference (optional)
-    cli.rst          # CLI reference (optional)
-    gui.rst          # GUI reference (optional)
-    tutorials/*.rst  # Tutorials (optional)
+     docs.rst         # Docs landing page (required)
+     concepts.rst     # Conceptual docs (optional)
+     design.rst       # Design docs (optional)
+     guides.rst       # How-to guides (optional)
+     api.rst          # API reference (optional)
+     cli.rst          # CLI reference (optional)
+     gui.rst          # GUI reference (optional)
+     tutorials/*.rst  # Tutorials (optional)
 
-    BUILD.gn   # GN build required
-    BUILD      # Bazel build required
+     BUILD.gn   # GN build required
+     BUILD      # Bazel build required
 
-    # C++ public headers; the repeated module name is required
-    public/pw_foo/foo.h
-    public/pw_foo/baz.h
+     # C++ public headers; the repeated module name is required
+     public/pw_foo/foo.h
+     public/pw_foo/baz.h
 
-    # Exposed private headers go under internal/
-    public/pw_foo/internal/bar.h
-    public/pw_foo/internal/qux.h
+     # Exposed private headers go under internal/
+     public/pw_foo/internal/bar.h
+     public/pw_foo/internal/qux.h
 
-    # Public override headers must go in 'public_overrides'
-    public_overrides/gtest/gtest.h
-    public_overrides/string.h
+     # Public override headers must go in 'public_overrides'
+     public_overrides/gtest/gtest.h
+     public_overrides/string.h
 
-    # Private headers go into <module>_*/...
-    pw_foo_internal/zap.h
-    pw_foo_private/zip.h
-    pw_foo_secret/alxx.h
+     # Private headers go into <module>_*/...
+     pw_foo_internal/zap.h
+     pw_foo_private/zip.h
+     pw_foo_secret/alxx.h
 
-    # C++ implementations go in the root
-    foo_impl.cc
-    foo.cc
-    baz.cc
-    bar.cc
-    zap.cc
-    zip.cc
-    alxx.cc
+     # C++ implementations go in the root
+     foo_impl.cc
+     foo.cc
+     baz.cc
+     bar.cc
+     zap.cc
+     zip.cc
+     alxx.cc
 
-    # C++ tests also go in the root
-    foo_test.cc
-    bar_test.cc
-    zip_test.cc
+     # C++ tests also go in the root
+     foo_test.cc
+     bar_test.cc
+     zip_test.cc
 
-    # Python files go into 'py/<module>/...'
-    py/BUILD.gn     # Python packages are declared in GN using pw_python_package
-    py/setup.py     # Python files are structured as standard Python packages
-    py/foo_test.py  # Tests go in py/ but outside of the Python package
-    py/bar_test.py
-    py/pw_foo/__init__.py
-    py/pw_foo/__main__.py
-    py/pw_foo/bar.py
-    py/pw_foo/py.typed  # Indicates that this package has type annotations
+     # Python files go into 'py/<module>/...'
+     py/BUILD.gn     # Python packages are declared in GN using pw_python_package
+     py/setup.py     # Python files are structured as standard Python packages
+     py/foo_test.py  # Tests go in py/ but outside of the Python package
+     py/bar_test.py
+     py/pw_foo/__init__.py
+     py/pw_foo/__main__.py
+     py/pw_foo/bar.py
+     py/pw_foo/py.typed  # Indicates that this package has type annotations
 
-    # Rust crates go into 'rust/...'
-    rust/BUILD.bazel
-    rust/crate_one.rs          # Single file crates are in rust/<crate_name>.rs
-    rust/crate_two/lib.rs      # Multi-file crate's top level source in:
-                               #   rust/<crate>/lib.rs
-    rust/crate_two/mod_one.rs  # Multi-file crate's modules in:
-    rust/crate_two/mod_two.rs  #   rust/<crate>/<module_name>.rs
-                               # Prefer not using mod.rs files.
+     # Rust crates go into 'rust/...'
+     rust/BUILD.bazel
+     rust/crate_one.rs          # Single file crates are in rust/<crate_name>.rs
+     rust/crate_two/lib.rs      # Multi-file crate's top level source in:
+                                #   rust/<crate>/lib.rs
+     rust/crate_two/mod_one.rs  # Multi-file crate's modules in:
+     rust/crate_two/mod_two.rs  #   rust/<crate>/<module_name>.rs
+                                # Prefer not using mod.rs files.
 
-    # Go files go into 'go/...'
-    go/...
+     # Go files go into 'go/...'
+     go/...
 
-    # Examples go in examples/, mixing different languages
-    examples/demo.py
-    examples/demo.cc
-    examples/demo.go
-    examples/BUILD.gn
-    examples/BUILD
+     # Examples go in examples/, mixing different languages
+     examples/demo.py
+     examples/demo.cc
+     examples/demo.go
+     examples/BUILD.gn
+     examples/BUILD
 
-    # Size reports go under size_report/
-    size_report/BUILD.gn
-    size_report/base.cc
-    size_report/use_case_a.cc
-    size_report/use_case_b.cc
+     # Size reports go under size_report/
+     size_report/BUILD.gn
+     size_report/base.cc
+     size_report/use_case_a.cc
+     size_report/use_case_b.cc
 
-    # Protobuf definition files go into <module>_protos/...
-    pw_foo_protos/foo.proto
-    pw_foo_protos/internal/zap.proto
+     # Protobuf definition files go into <module>_protos/...
+     pw_foo_protos/foo.proto
+     pw_foo_protos/internal/zap.proto
 
-    # Other directories are fine, but should be private.
-    data/...
-    graphics/...
-    collection_of_tests/...
-    code_relating_to_subfeature/...
+     # Other directories are fine, but should be private.
+     data/...
+     graphics/...
+     collection_of_tests/...
+     code_relating_to_subfeature/...
 
 Module name
 -----------
@@ -133,10 +133,10 @@
 
 .. code-block::
 
-  pw_foo/...
-    public/pw_foo/foo.h
-    public/pw_foo/a_header.h
-    public/pw_foo/baz.h
+   pw_foo/...
+     public/pw_foo/foo.h
+     public/pw_foo/a_header.h
+     public/pw_foo/baz.h
 
 For headers that must be exposed due to C++ limitations (i.e. are included from
 the public interface, but are not intended for use), place the headers in a
@@ -145,9 +145,9 @@
 
 .. code-block::
 
-  pw_foo/...
-    public/pw_foo/internal/secret.h
-    public/pw_foo/internal/business.h
+   pw_foo/...
+     public/pw_foo/internal/secret.h
+     public/pw_foo/internal/business.h
 
 .. note::
 
@@ -174,15 +174,15 @@
 
 .. code-block::
 
-  pw_unit_test/...
+   pw_unit_test/...
 
-    public_overrides/gtest
-    public_overrides/gtest/gtest.h
+     public_overrides/gtest
+     public_overrides/gtest/gtest.h
 
-    public/pw_unit_test
-    public/pw_unit_test/simple_printing_event_handler.h
-    public/pw_unit_test/event_handler.h
-    public/pw_unit_test/internal/framework.h
+     public/pw_unit_test
+     public/pw_unit_test/simple_printing_event_handler.h
+     public/pw_unit_test/event_handler.h
+     public/pw_unit_test/internal/framework.h
 
 Note that the overrides are in a separate directory ``public_overrides``.
 
@@ -195,12 +195,12 @@
 
 .. code-block::
 
-  pw_unit_test/...
-    main.cc
-    framework.cc
-    test.gni
-    BUILD.gn
-    README.md
+   pw_unit_test/...
+     main.cc
+     framework.cc
+     test.gni
+     BUILD.gn
+     README.md
 
 .. _module-structure-compile-time-configuration:
 
@@ -241,16 +241,16 @@
 
 .. code-block::
 
-  pw_foo/...
+   pw_foo/...
 
-    # Publicly accessible configuration header
-    public/pw_foo/config.h
+     # Publicly accessible configuration header
+     public/pw_foo/config.h
 
-    # Internal configuration header that is included by other module headers
-    public/pw_foo/internal/config.h
+     # Internal configuration header that is included by other module headers
+     public/pw_foo/internal/config.h
 
-    # Internal configuration header
-    pw_foo_private/config.h
+     # Internal configuration header
+     pw_foo_private/config.h
 
 The configuration header is provided by a build system library. This library
 acts as a :ref:`facade<docs-module-structure-facades>`. The facade uses a
@@ -260,37 +260,37 @@
 
 .. code-block::
 
-  declare_args() {
-    # The build target that overrides the default configuration options for this
-    # module. This should point to a source set that provides defines through a
-    # public config (which may -include a file or add defines directly).
-    pw_foo_CONFIG = pw_build_DEFAULT_MODULE_CONFIG
-  }
+   declare_args() {
+     # The build target that overrides the default configuration options for this
+     # module. This should point to a source set that provides defines through a
+     # public config (which may -include a file or add defines directly).
+     pw_foo_CONFIG = pw_build_DEFAULT_MODULE_CONFIG
+   }
 
-  # An example source set for each potential config header location follows.
+   # An example source set for each potential config header location follows.
 
-  # Publicly accessible configuration header (most common)
-  pw_source_set("config") {
-    public = [ "public/pw_foo/config.h" ]
-    public_configs = [ ":public_include_path" ]
-    public_deps = [ pw_foo_CONFIG ]
-  }
+   # Publicly accessible configuration header (most common)
+   pw_source_set("config") {
+     public = [ "public/pw_foo/config.h" ]
+     public_configs = [ ":public_include_path" ]
+     public_deps = [ pw_foo_CONFIG ]
+   }
 
-  # Internal configuration header that is included by other module headers
-  pw_source_set("config") {
-    sources = [ "public/pw_foo/internal/config.h" ]
-    public_configs = [ ":public_include_path" ]
-    public_deps = [ pw_foo_CONFIG ]
-    visibility = [":*"]  # Only allow this module to depend on ":config"
-    friend = [":*"]  # Allow this module to access the config.h header.
-  }
+   # Internal configuration header that is included by other module headers
+   pw_source_set("config") {
+     sources = [ "public/pw_foo/internal/config.h" ]
+     public_configs = [ ":public_include_path" ]
+     public_deps = [ pw_foo_CONFIG ]
+     visibility = [":*"]  # Only allow this module to depend on ":config"
+     friend = [":*"]  # Allow this module to access the config.h header.
+   }
 
-  # Internal configuration header
-  pw_source_set("config") {
-    public = [ "pw_foo_private/config.h" ]
-    public_deps = [ pw_foo_CONFIG ]
-    visibility = [":*"]  # Only allow this module to depend on ":config"
-  }
+   # Internal configuration header
+   pw_source_set("config") {
+     public = [ "pw_foo_private/config.h" ]
+     public_deps = [ pw_foo_CONFIG ]
+     visibility = [":*"]  # Only allow this module to depend on ":config"
+   }
 
 Overriding configuration
 ^^^^^^^^^^^^^^^^^^^^^^^^
@@ -311,32 +311,32 @@
 
 .. code-block::
 
-  # In the toolchain, set either pw_build_DEFAULT_MODULE_CONFIG or pw_foo_CONFIG
-  pw_build_DEFAULT_MODULE_CONFIG = get_path_info(":define_overrides", "abspath")
+   # In the toolchain, set either pw_build_DEFAULT_MODULE_CONFIG or pw_foo_CONFIG
+   pw_build_DEFAULT_MODULE_CONFIG = get_path_info(":define_overrides", "abspath")
 
-  # This configuration sets PW_FOO_INPUT_BUFFER_SIZE_BYTES using the -D flag.
-  pw_source_set("define_overrides") {
-    public_configs = [ ":define_options" ]
-  }
+   # This configuration sets PW_FOO_INPUT_BUFFER_SIZE_BYTES using the -D flag.
+   pw_source_set("define_overrides") {
+     public_configs = [ ":define_options" ]
+   }
 
-  config("define_options") {
-    defines = [ "PW_FOO_INPUT_BUFFER_SIZE_BYTES=256" ]
-  }
+   config("define_options") {
+     defines = [ "PW_FOO_INPUT_BUFFER_SIZE_BYTES=256" ]
+   }
 
-  # This configuration sets PW_FOO_INPUT_BUFFER_SIZE_BYTES in a header file.
-  pw_source_set("include_overrides") {
-    public_configs = [ ":set_options_in_header_file" ]
+   # This configuration sets PW_FOO_INPUT_BUFFER_SIZE_BYTES in a header file.
+   pw_source_set("include_overrides") {
+     public_configs = [ ":set_options_in_header_file" ]
 
-    # Header file with #define PW_FOO_INPUT_BUFFER_SIZE_BYTES 256
-    sources = [ "my_config_overrides.h" ]
-  }
+     # Header file with #define PW_FOO_INPUT_BUFFER_SIZE_BYTES 256
+     sources = [ "my_config_overrides.h" ]
+   }
 
-  config("set_options_in_header_file") {
-    cflags = [
-      "-include",
-      rebase_path("my_config_overrides.h", root_build_dir),
-    ]
-  }
+   config("set_options_in_header_file") {
+     cflags = [
+       "-include",
+       rebase_path("my_config_overrides.h", root_build_dir),
+     ]
+   }
 
 .. admonition:: Why this config pattern is preferred
 
@@ -409,19 +409,19 @@
 
 .. code-block::
 
-  # pw_foo contains 2 facades, foo and bar
-  pw_foo/...
-    # Public headers
-    # public/pw_foo/foo.h #includes pw_foo_backend/foo.h
-    # public/pw_foo/bar.h #includes pw_foo_backend/bar.h
-    public/pw_foo/foo.h
-    public/pw_foo/bar.h
+   # pw_foo contains 2 facades, foo and bar
+   pw_foo/...
+     # Public headers
+     # public/pw_foo/foo.h #includes pw_foo_backend/foo.h
+     # public/pw_foo/bar.h #includes pw_foo_backend/bar.h
+     public/pw_foo/foo.h
+     public/pw_foo/bar.h
 
-  pw_foo_backend/...
+   pw_foo_backend/...
 
-    # Public override headers for facade1 and facade2 go in separate folders
-    foo_public_overrides/pw_foo_backend/foo.h
-    bar_public_overrides/pw_foo_backend/bar.h
+     # Public override headers for facade1 and facade2 go in separate folders
+     foo_public_overrides/pw_foo_backend/foo.h
+     bar_public_overrides/pw_foo_backend/bar.h
 
 Documentation
 -------------
@@ -475,7 +475,7 @@
 
 10. Add the new module to CMake build
 
-   - In ``/CMakeLists.txt`` add ``add_subdirectory(pw_new)``
+    - In ``/CMakeLists.txt`` add ``add_subdirectory(pw_new)``
 
 11. Run :ref:`module-pw_module-module-check`
 
diff --git a/docs/python_build.rst b/docs/python_build.rst
index ba30abf..2be9cb0 100644
--- a/docs/python_build.rst
+++ b/docs/python_build.rst
@@ -166,10 +166,10 @@
 
    .. code-block::
 
-     pw_build_PIP_REQUIREMENTS = [
-       # Project specific requirements
-       "//tools/requirements.txt",
-     ]
+      pw_build_PIP_REQUIREMENTS = [
+        # Project specific requirements
+        "//tools/requirements.txt",
+      ]
 
    See the :ref:`docs-python-build-python-gn-structure` section below for a full
    code listing.
diff --git a/docs/style_guide.rst b/docs/style_guide.rst
index 536e9de..1986623 100644
--- a/docs/style_guide.rst
+++ b/docs/style_guide.rst
@@ -225,7 +225,7 @@
 .. admonition:: **Yes**: Always use braces for line conditionals and loops:
    :class: checkmark
 
-   .. code:: cpp
+   .. code-block:: cpp
 
       while (SomeCondition()) {
         x += 2;
@@ -238,7 +238,7 @@
 .. admonition:: **No**: Missing braces
    :class: error
 
-   .. code:: cpp
+   .. code-block:: cpp
 
       while (SomeCondition())
         x += 2;
@@ -248,7 +248,7 @@
 .. admonition:: **No**: Statement on same line as condition
    :class: error
 
-   .. code:: cpp
+   .. code-block:: cpp
 
       while (SomeCondition()) { x += 2; }
       if (OtherCondition()) { DoTheThing(); }
@@ -259,7 +259,7 @@
 .. admonition:: **Yes**:
    :class: checkmark
 
-   .. code:: cpp
+   .. code-block:: cpp
 
       while (true) {
         DoSomethingForever();
@@ -268,7 +268,7 @@
 .. admonition:: **No**:
    :class: error
 
-   .. code:: cpp
+   .. code-block:: cpp
 
       for (;;) {
         DoSomethingForever();
@@ -308,7 +308,7 @@
    at the bottom and de-dentend.
    :class: checkmark
 
-   .. code:: cpp
+   .. code-block:: cpp
 
       Status DoSomething(Parameter parameter) {
         // Parameter validation first; detecting incoming use errors.
@@ -339,7 +339,7 @@
    the early bail structure; so pay close attention.
    :class: error
 
-   .. code:: cpp
+   .. code-block:: cpp
 
       Status DoSomething(Parameter parameter) {
         // Parameter validation first; detecting incoming use errors.
@@ -365,7 +365,7 @@
    the bottom and de-dentend.
    :class: checkmark
 
-   .. code:: cpp
+   .. code-block:: cpp
 
       for (int i = 0; i < LoopSize(); ++i) {
         // Early skip of item based on edge condition.
@@ -390,7 +390,7 @@
    main purpose of the loop versus what is edge case handling.
    :class: error
 
-   .. code:: cpp
+   .. code-block:: cpp
 
       for (int i = 0; i < LoopSize(); ++i) {
         if (CommonCase()) {
@@ -420,7 +420,7 @@
 .. admonition:: **Yes**: No else after return or continue
    :class: checkmark
 
-   .. code:: cpp
+   .. code-block:: cpp
 
       // Note lack of else block due to return.
       if (Failure()) {
@@ -443,7 +443,7 @@
 .. admonition:: **No**: Else after return needlessly creeps right
    :class: error
 
-   .. code:: cpp
+   .. code-block:: cpp
 
       if (Failure()) {
         DoTheThing();
@@ -859,11 +859,11 @@
 #. Groups are placed the following order with a blank line separating each
    grouping.
 
-    * "set noparent" line
-    * "include" lines
-    * "file:" lines
-    * user grants (some examples: "*", "foo@example.com")
-    * "per-file:" lines
+   * "set noparent" line
+   * "include" lines
+   * "file:" lines
+   * user grants (some examples: "*", "foo@example.com")
+   * "per-file:" lines
 
 This plugin will, by default, act upon any file named "OWNERS".
 
@@ -989,7 +989,7 @@
 Use headings according to the following hierarchy, with the shown characters
 for the ReST heading syntax.
 
-.. code:: rst
+.. code-block:: rst
 
    ==================================
    Document Title: Two Bars of Equals
@@ -1030,7 +1030,7 @@
 .. admonition:: **Yes**: No blank after heading
    :class: checkmark
 
-   .. code:: rst
+   .. code-block:: rst
 
       Here is a heading
       -----------------
@@ -1039,7 +1039,7 @@
 .. admonition:: **No**: Unnecessary blank line
    :class: error
 
-   .. code:: rst
+   .. code-block:: rst
 
       Here is a heading
       -----------------
@@ -1051,7 +1051,7 @@
 .. admonition:: **Yes**: Just one blank after section content before the next heading
    :class: checkmark
 
-   .. code:: rst
+   .. code-block:: rst
 
       There is some text here in the section before the next. It's just here to
       illustrate the spacing standard. Note that there is just one blank line
@@ -1064,7 +1064,7 @@
 .. admonition:: **No**: Extra blank lines
    :class: error
 
-   .. code:: rst
+   .. code-block:: rst
 
       There is some text here in the section before the next. It's just here to
       illustrate the spacing standard. Note that there are too many blank lines
@@ -1084,7 +1084,7 @@
 .. admonition:: **Yes**: Three space indent for directives; and nested
    :class: checkmark
 
-   .. code:: none
+   .. code-block:: none
 
       Here is a paragraph that has some content. After this content is a
       directive.
@@ -1105,7 +1105,7 @@
    for directives
    :class: error
 
-   .. code:: none
+   .. code-block:: none
 
       Here is a paragraph with some content.
 
@@ -1121,7 +1121,7 @@
 .. admonition:: **No**: Missing blank between directive and content.
    :class: error
 
-   .. code:: none
+   .. code-block:: none
 
       Here is a paragraph with some content.
 
@@ -1628,7 +1628,7 @@
 .. admonition:: **Yes**:
    :class: checkmark
 
-   .. code:: none
+   .. code-block:: none
 
       pw_some_module: Short capitalized description
 
@@ -1641,7 +1641,7 @@
 .. admonition:: **Yes**: Small number of modules affected; use {} syntax.
    :class: checkmark
 
-   .. code:: none
+   .. code-block:: none
 
       pw_{foo, bar, baz}: Change something in a few places
 
@@ -1652,14 +1652,14 @@
    nested, so they get a ``/`` character.
    :class: checkmark
 
-   .. code:: none
+   .. code-block:: none
 
       targets/xyz123: Tweak support for XYZ's PQR
 
 .. admonition:: **Yes**: Uses imperative style for subject and text.
    :class: checkmark
 
-   .. code:: none
+   .. code-block:: none
 
       pw_something: Add foo and bar functions
 
@@ -1668,7 +1668,7 @@
 .. admonition:: **No**: Uses non-imperative style for subject and text.
    :class: error
 
-   .. code:: none
+   .. code-block:: none
 
       pw_something: Adds more things
 
@@ -1679,7 +1679,7 @@
    single CL. Prefer smaller CLs, but larger CLs are a practical reality.
    :class: checkmark
 
-   .. code:: none
+   .. code-block:: none
 
       pw_complicated_module: Pre-work for refactor
 
@@ -1694,7 +1694,7 @@
 .. admonition:: **No**: Run on paragraph instead of bulleted list
    :class: error
 
-   .. code:: none
+   .. code-block:: none
 
       pw_foo: Many things in a giant BWOT
 
@@ -1706,7 +1706,7 @@
 .. admonition:: **No**: Doesn't capitalize the subject
    :class: error
 
-   .. code:: none
+   .. code-block:: none
 
       pw_foo: do a thing
 
@@ -1716,7 +1716,7 @@
    word is a lowercase identifier.
    :class: checkmark
 
-   .. code:: none
+   .. code-block:: none
 
       pw_foo: std::unique_lock cleanup
 
@@ -1727,28 +1727,28 @@
    However, imperative style subjects often have the identifier elsewhere in
    the subject; for example:
 
-   .. code:: none
+   .. code-block:: none
 
      pw_foo: Improve use of std::unique_lock
 
 .. admonition:: **No**: Uses a non-standard ``[]`` to indicate module:
    :class: error
 
-   .. code:: none
+   .. code-block:: none
 
       [pw_foo]: Do a thing
 
 .. admonition:: **No**: Has a period at the end of the subject
    :class: error
 
-   .. code:: none
+   .. code-block:: none
 
       pw_bar: Do something great.
 
 .. admonition:: **No**: Puts extra stuff after the module which isn't a module.
    :class: error
 
-   .. code:: none
+   .. code-block:: none
 
       pw_bar/byte_builder: Add more stuff to builder
 
@@ -1757,7 +1757,7 @@
 We support a number of `git footers`_ in the commit message, such as ``Bug:
 123`` in the message below:
 
-.. code:: none
+.. code-block:: none
 
    pw_something: Add foo and bar functions
 
@@ -1769,7 +1769,7 @@
   bug will be automatically updated when the change is submitted. When a change
   is relevant to more than one bug, include multiple ``Bug`` lines, like so:
 
-  .. code:: none
+  .. code-block:: none
 
       pw_something: Add foo and bar functions
 
@@ -1779,7 +1779,7 @@
 * ``Fixed`` or ``Fixes``: Like ``Bug``, but automatically closes the bug when
   submitted.
 
-  .. code:: none
+  .. code-block:: none
 
       pw_something: Fix incorrect use of foo
 
@@ -1809,150 +1809,164 @@
 
 Grouping related content with tabs
 ==================================
-Use the ``tabs`` directive to group related content together. This feature is
-powered by `sphinx-tabs <https://sphinx-tabs.readthedocs.io>`_.
+Use the ``tab-set`` directive to group related content together. This feature is
+powered by `sphinx-design Tabs
+<https://sphinx-design.readthedocs.io/en/furo-theme/tabs.html>`_
 
 Tabs for code-only content
 --------------------------
 Use the ``tabs`` and ``code-tab`` directives together. Example:
 
-.. code:: none
+.. code-block:: rst
 
-   .. tabs::
+   .. tab-set-code::
 
-      .. code-tab:: c++
+      .. code-block:: c++
 
          // C++ code...
 
-      .. code-tab:: py
+      .. code-block:: python
 
          # Python code...
 
 Rendered output:
 
-.. tabs::
+.. tab-set-code::
 
-   .. code-tab:: c++
+   .. code-block:: c++
 
       // C++ code...
 
-   .. code-tab:: py
+   .. code-block:: python
 
       # Python code...
 
 Tabs for all other content
 --------------------------
-Use the ``tabs`` and ``group-tab`` directives together. Example:
+Use the ``tabs`` and ``tab-item`` directives together. Example:
 
-.. code:: none
+.. code-block:: rst
 
-   .. tabs::
+   .. tab-set::
 
-      .. group-tab:: Linux
+      .. tab-item:: Linux
 
          Linux instructions...
 
-      .. group-tab:: Windows
+      .. tab-item:: Windows
 
          Windows instructions...
 
 Rendered output:
 
-.. tabs::
+.. tab-set::
 
-   .. group-tab:: Linux
+   .. tab-item:: Linux
 
       Linux instructions...
 
-   .. group-tab:: Windows
+   .. tab-item:: Windows
 
       Windows instructions...
 
 Tab synchronization
 -------------------
-Tabs are synchronized based on ``group-tab`` and ``code-tab`` values. Example:
+Tabs are synchronized in two ways:
 
-.. code:: none
+1. ``tab-set-code::`` ``code-block`` languages names.
+2. ``tab-item::`` ``:sync:`` values.
 
-   .. tabs::
+For Example:
 
-      .. code-tab:: c++
+.. code-block:: rst
+
+   .. tabs-set-code::
+
+      .. code-block:: c++
 
          // C++ code...
 
-      .. code-tab:: py
+      .. code-block:: py
 
          # Python code...
 
-   .. tabs::
+   .. tabs-set-code::
 
-      .. code-tab:: c++
+      .. code-block:: c++
 
          // More C++ code...
 
-      .. code-tab:: py
+      .. code-block:: py
 
          # More Python code...
 
-   .. tabs::
+   .. tab-set::
 
-      .. group-tab:: Linux
+      .. tab-item:: Linux
+         :sync: key1
 
          Linux instructions...
 
-      .. group-tab:: Windows
+      .. tab-item:: Windows
+         :sync: key2
 
          Windows instructions...
 
-   .. tabs::
+   .. tab-set::
 
-      .. group-tab:: Linux
+      .. tab-item:: Linux
+         :sync: key1
 
          More Linux instructions...
 
-      .. group-tab:: Windows
+      .. tab-item:: Windows
+         :sync: key2
 
          More Windows instructions...
 
 Rendered output:
 
-.. tabs::
+.. tab-set-code::
 
-   .. code-tab:: c++
+   .. code-block:: c++
 
       // C++ code...
 
-   .. code-tab:: py
+   .. code-block:: py
 
       # Python code...
 
-.. tabs::
+.. tab-set-code::
 
-   .. code-tab:: c++
+   .. code-block:: c++
 
       // More C++ code...
 
-   .. code-tab:: py
+   .. code-block:: py
 
       # More Python code...
 
-.. tabs::
+.. tab-set::
 
-   .. group-tab:: Linux
+   .. tab-item:: Linux
+      :sync: key1
 
       Linux instructions...
 
-   .. group-tab:: Windows
+   .. tab-item:: Windows
+      :sync: key2
 
       Windows instructions...
 
-.. tabs::
+.. tab-set::
 
-   .. group-tab:: Linux
+   .. tab-item:: Linux
+      :sync: key1
 
       More Linux instructions...
 
-   .. group-tab:: Windows
+   .. tab-item:: Windows
+      :sync: key2
 
       More Windows instructions...
 
diff --git a/docs/targets.rst b/docs/targets.rst
index 3988aa4..00bb6a7 100644
--- a/docs/targets.rst
+++ b/docs/targets.rst
@@ -69,31 +69,31 @@
 
 .. code-block::
 
-  import("//build_overrides/pigweed.gni")
+   import("//build_overrides/pigweed.gni")
 
-  import("$dir_pw_toolchain/arm_gcc/toolchains.gni")
-  import("$dir_pw_toolchain/generate_toolchain.gni")
+   import("$dir_pw_toolchain/arm_gcc/toolchains.gni")
+   import("$dir_pw_toolchain/generate_toolchain.gni")
 
-  my_target_scope = {
-    # Use Pigweed's Cortex M4 toolchain as a base.
-    _toolchain_base = pw_toolchain_arm_gcc.cortex_m4f_debug
+   my_target_scope = {
+     # Use Pigweed's Cortex M4 toolchain as a base.
+     _toolchain_base = pw_toolchain_arm_gcc.cortex_m4f_debug
 
-    # Forward everything except the defaults scope from that toolchain.
-    forward_variables_from(_toolchain_base, "*", [ "defaults" ])
+     # Forward everything except the defaults scope from that toolchain.
+     forward_variables_from(_toolchain_base, "*", [ "defaults" ])
 
-    defaults = {
-      # Forward everything from the base toolchain's defaults.
-      forward_variables_from(_toolchain_base.defaults, "*")
+     defaults = {
+       # Forward everything from the base toolchain's defaults.
+       forward_variables_from(_toolchain_base.defaults, "*")
 
-      # Extend with custom build arguments for the target.
-      pw_log_BACKEND = dir_pw_log_tokenized
-    }
-  }
+       # Extend with custom build arguments for the target.
+       pw_log_BACKEND = dir_pw_log_tokenized
+     }
+   }
 
-  # Create the actual GN toolchain from the scope.
-  generate_toolchain("my_target") {
-    forward_variables_from(my_target_scope, "*")
-  }
+   # Create the actual GN toolchain from the scope.
+   generate_toolchain("my_target") {
+     forward_variables_from(my_target_scope, "*")
+   }
 
 Upstream targets
 ================
diff --git a/package.json b/package.json
index de73e03..0fecc53 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
 {
   "name": "pigweedjs",
-  "version": "0.0.10",
+  "version": "0.0.11",
   "description": "An open source collection of embedded-targeted libraries",
   "author": "The Pigweed Authors",
   "license": "Apache-2.0",
diff --git a/pw_allocator/docs.rst b/pw_allocator/docs.rst
index 667ca83..7f3f9ef 100644
--- a/pw_allocator/docs.rst
+++ b/pw_allocator/docs.rst
@@ -31,7 +31,7 @@
 User can enable heap poisoning by enabling the ``pw_allocator_POISON_HEAP``
 build arg.
 
-.. code:: sh
+.. code-block:: sh
 
   $ gn args out
   # Modify and save the args file to use heap poison.
@@ -60,7 +60,7 @@
 
 The heap visualizer can be launched from a shell using the Pigweed environment.
 
-.. code:: sh
+.. code-block:: sh
 
   $ pw heap-viewer --dump-file <directory of dump file> --heap-low-address
   <hex address of heap lower address> --heap-high-address <hex address of heap
@@ -74,7 +74,7 @@
   represented as ``f <memory address>``. For example, a dump file should look
   like:
 
-  .. code:: sh
+  .. code-block:: sh
 
     m 20 0x20004450  # malloc 20 bytes, the pointer is 0x20004450
     m 8 0x2000447c   # malloc 8 bytes, the pointer is 0x2000447c
@@ -85,13 +85,13 @@
 
 - ``--heap-low-address`` is the start of the heap. For example:
 
-  .. code:: sh
+  .. code-block:: sh
 
     --heap-low-address 0x20004440
 
 - ``--heap-high-address`` is the end of the heap. For example:
 
-  .. code:: sh
+  .. code-block:: sh
 
     --heap-high-address 0x20006040
 
diff --git a/pw_analog/BUILD.gn b/pw_analog/BUILD.gn
index 90ba89a..a60e8a5 100644
--- a/pw_analog/BUILD.gn
+++ b/pw_analog/BUILD.gn
@@ -90,7 +90,6 @@
 pw_doc_group("docs") {
   sources = [
     "docs.rst",
-    "public/pw_analog/analog_input.h",
     "public/pw_analog/analog_input_gmock.h",
     "public/pw_analog/microvolt_input.h",
     "public/pw_analog/microvolt_input_gmock.h",
diff --git a/pw_analog/docs.rst b/pw_analog/docs.rst
index 6c861c5..927b1eb 100644
--- a/pw_analog/docs.rst
+++ b/pw_analog/docs.rst
@@ -44,9 +44,8 @@
 
 pw::analog::AnalogInput
 =======================
-.. literalinclude:: public/pw_analog/analog_input.h
-   :start-after: #pragma once
-   :end-before: }  // namespace pw::analog
+.. doxygenclass:: pw::analog::AnalogInput
+   :members:
 
 pw::analog::GmockAnalogInput
 ============================
diff --git a/pw_analog/public/pw_analog/analog_input.h b/pw_analog/public/pw_analog/analog_input.h
index 39024a0..0eaea1a 100644
--- a/pw_analog/public/pw_analog/analog_input.h
+++ b/pw_analog/public/pw_analog/analog_input.h
@@ -18,56 +18,58 @@
 
 namespace pw::analog {
 
-// Base interface for getting ADC samples from one ADC channel in a thread
-// safe manner.
-//
-// The ADC backend interface is up to the user to define and implement for now.
-// This gives the flexibility for the ADC driver implementation.
-//
-// AnalogInput controls a specific input/channel where the ADC peripheral may be
-// shared across multiple channels that may be controlled by multiple threads.
-// The implementer of this pure virtual interface is responsible for ensuring
-// thread safety and access at the driver level.
+/// Base interface for getting analog-to-digital (ADC) samples from one ADC
+/// channel in a thread-safe manner.
+///
+/// The ADC backend interface is up to the user to define and implement for now.
+/// This gives flexibility for the ADC driver implementation.
+///
+/// `AnalogInput` controls a specific input / channel where the ADC peripheral
+/// may be shared across multiple channels that may be controlled by multiple
+/// threads. The implementer of this pure virtual interface is responsible for
+/// ensuring thread safety and access at the driver level.
 class AnalogInput {
  public:
-  // Limits struct that specifies the min and max of the sample range.
-  // These values do not change at run time.
+  /// Specifies the sample range.
+  /// These values do not change at runtime.
   struct Limits {
+    /// The minimum of the sample range.
     int32_t min;
+    /// The maximum of the sample range.
     int32_t max;
   };
 
   virtual ~AnalogInput() = default;
 
-  // Blocks until the specified timeout duration has elapsed or the ADC sample
-  // has been returned, whichever comes first.
-  //
-  // This method is thread safe.
-  //
-  // Returns:
-  //   Sample.
-  //   ResourceExhuasted: ADC peripheral in use.
-  //   DeadlineExceedded: Timed out waiting for a sample.
-  //   Other statuses left up to the implementer.
+  /// Blocks until the specified timeout duration has elapsed or the ADC sample
+  /// has been returned, whichever comes first.
+  ///
+  /// This method is thread safe.
+  ///
+  /// @returns
+  /// * A sample on success.
+  /// * @pw_status{RESOURCE_EXHAUSTED} - ADC peripheral in use.
+  /// * @pw_status{DEADLINE_EXCEEDED} - Timed out waiting for a sample.
+  /// * Other statuses left up to the implementer.
   Result<int32_t> TryReadFor(chrono::SystemClock::duration timeout) {
     return TryReadUntil(chrono::SystemClock::TimePointAfterAtLeast(timeout));
   }
 
-  // Blocks until the deadline time has been reached or the ADC sample
-  // has been returned, whichever comes first.
-  //
-  // This method is thread safe.
-  //
-  // Returns:
-  //   Sample.
-  //   ResourceExhuasted: ADC peripheral in use.
-  //   DeadlineExceedded: Timed out waiting for a sample.
-  //   Other statuses left up to the implementer.
+  /// Blocks until the deadline time has been reached or the ADC sample
+  /// has been returned, whichever comes first.
+  ///
+  /// This method is thread safe.
+  ///
+  /// @returns
+  /// * A sample on success.
+  /// * @pw_status{RESOURCE_EXHAUSTED} - ADC peripheral in use.
+  /// * @pw_status{DEADLINE_EXCEEDED} - Timed out waiting for a sample.
+  /// * Other statuses left up to the implementer.
   virtual Result<int32_t> TryReadUntil(
       chrono::SystemClock::time_point deadline) = 0;
 
-  // Returns the range of the ADC sample.
-  // These values do not change at run time.
+  /// @returns The range of the ADC sample. These values do not change at
+  /// runtime.
   virtual Limits GetLimits() const = 0;
 };
 
diff --git a/pw_android_toolchain/docs.rst b/pw_android_toolchain/docs.rst
index 8dbc661..1a79c26 100644
--- a/pw_android_toolchain/docs.rst
+++ b/pw_android_toolchain/docs.rst
@@ -49,7 +49,7 @@
 
 For example:
 
-.. code::
+.. code-block::
 
    import("//build_overrides/pigweed.gni")
 
@@ -85,7 +85,7 @@
 ``current_cpu`` is set. If any toolchain scope in the list does not set it, a
 toolchain for each supported target will be generated.
 
-.. code::
+.. code-block::
 
    # Generate arm_*, arm64_*, x64_*, and x86_* for each scope in the list.
    pw_generate_android_toolchains("target_toolchains) {
diff --git a/pw_assert/BUILD.bazel b/pw_assert/BUILD.bazel
index c68a47b..4d9fcfc 100644
--- a/pw_assert/BUILD.bazel
+++ b/pw_assert/BUILD.bazel
@@ -119,6 +119,12 @@
     ],
 )
 
+alias(
+    name = "backend_impl_multiplexer",
+    actual = "@pigweed//pw_assert_basic:impl",
+    visibility = ["@pigweed_config//:__pkg__"],
+)
+
 pw_cc_test(
     name = "assert_facade_test",
     srcs = [
diff --git a/pw_assert/docs.rst b/pw_assert/docs.rst
index f411e81..1658578 100644
--- a/pw_assert/docs.rst
+++ b/pw_assert/docs.rst
@@ -490,6 +490,8 @@
 this, assert backends may avoid declaring explicit dependencies, instead relying
 on include paths to access header files.
 
+GN
+--
 In GN, the ``pw_assert`` backend's full implementation with true dependencies is
 made available through the ``$dir_pw_assert:impl`` group. When
 ``pw_assert_BACKEND`` is set, ``$dir_pw_assert:impl`` must be listed in the
@@ -506,6 +508,31 @@
 ``public_deps``. In this case, GN header checking can be disabled with
 ``check_includes = false``.
 
+Bazel
+-----
+In Bazel, assert backends may break dependency cycles by placing the full
+implementation in an ``impl`` target, like ``//pw_assert_basic:impl`` or
+``//pw_assert_tokenized:impl``. The ``@pigweed_config//pw_assert_backend_impl``
+label flag should be set to the ``impl`` target required by the assert backend
+used by the platform.
+
+You must add a dependency on the ``@pigweed_config//:pw_assert_backend_impl``
+target to any binary using ``pw_assert``.  You can do this in a few ways:
+
+1.  Use ``pw_cc_binary``, one of the :ref:`module-pw_build-bazel-wrapper-rules`
+    provided by Pigweed, instead of native ``cc_binary``. This wrapper adds the
+    required dependency.
+
+1.  Use `link_extra_lib
+    <https://bazel.build/reference/be/c-cpp#cc_binary.link_extra_lib>`_: set
+    the ``@bazel_tools//tools/cpp:link_extra_lib`` label flag to point to
+    ``@pigweed_config//:pw_assert_backend_impl``, probably using `bazelrc
+    <https://bazel.build/run/bazelrc>`_. Note that this is only supported in
+    Bazel 7.0.0 or newer.
+
+1.  Add ``@pigweed_config//:pw_assert_backend_impl`` directly to the ``deps``
+    of every embedded ``cc_binary`` in your project.
+
 .. _module-pw_assert-backend_api:
 
 -----------
diff --git a/pw_assert/public/pw_assert/internal/libc_assert.h b/pw_assert/public/pw_assert/internal/libc_assert.h
index 865650e..11a5a58 100644
--- a/pw_assert/public/pw_assert/internal/libc_assert.h
+++ b/pw_assert/public/pw_assert/internal/libc_assert.h
@@ -23,7 +23,7 @@
 #include "pw_preprocessor/util.h"
 
 #ifdef __cplusplus
-}  // extern "C++"
+}       // extern "C++"
 #endif  // __cplusplus
 
 // Provide static_assert() on >=C11
diff --git a/pw_assert_basic/BUILD.bazel b/pw_assert_basic/BUILD.bazel
index 7308a58..e3e8444 100644
--- a/pw_assert_basic/BUILD.bazel
+++ b/pw_assert_basic/BUILD.bazel
@@ -14,6 +14,7 @@
 
 load(
     "//pw_build:pigweed.bzl",
+    "pw_cc_facade",
     "pw_cc_library",
 )
 
@@ -21,8 +22,15 @@
 
 licenses(["notice"])
 
+# Note: to avoid circular dependencies, this target only includes the headers
+# for pw_assert_basic. The source file and its dependencies are in the separate
+# ":impl" target.
+#
+# If you point the @pigweed_config//pw_assert_backend to //pw_assert_basic,
+# then @pigweed_config//pw_assert_backend_impl should point to
+# //pw_assert_basic:impl.
 pw_cc_library(
-    name = "headers",
+    name = "pw_assert_basic",
     hdrs = [
         "public/pw_assert_basic/assert_basic.h",
         "public_overrides/pw_assert_backend/check_backend.h",
@@ -32,24 +40,28 @@
         "public_overrides",
     ],
     deps = [
+        ":handler_facade",
         "//pw_preprocessor",
     ],
 )
 
 pw_cc_library(
-    name = "pw_assert_basic",
+    name = "impl",
     srcs = [
         "assert_basic.cc",
     ],
     deps = [
-        ":headers",
+        ":pw_assert_basic",
         ":pw_assert_basic_handler",
         "//pw_assert:facade",
         "//pw_preprocessor",
     ],
+    # Other libraries may not always depend on this library, even if it is
+    # necessary at link time.
+    alwayslink = 1,
 )
 
-pw_cc_library(
+pw_cc_facade(
     name = "handler_facade",
     hdrs = [
         "public/pw_assert_basic/handler.h",
@@ -66,9 +78,12 @@
     ],
     deps = [
         ":handler_facade",
-        ":headers",
+        ":pw_assert_basic",
         "//pw_preprocessor",
         "//pw_string:builder",
         "//pw_sys_io",
     ],
+    # Other libraries may not always depend on this library, even if it is
+    # necessary at link time.
+    alwayslink = 1,
 )
diff --git a/pw_assert_log/BUILD.bazel b/pw_assert_log/BUILD.bazel
index e5b7947..2f58bdf 100644
--- a/pw_assert_log/BUILD.bazel
+++ b/pw_assert_log/BUILD.bazel
@@ -56,3 +56,6 @@
         "//pw_preprocessor",
     ],
 )
+
+# There is no "impl" target: pw_assert_log doesn't have potential circular
+# dependencies.
diff --git a/pw_assert_tokenized/BUILD.bazel b/pw_assert_tokenized/BUILD.bazel
index c3c4914..080bda8 100644
--- a/pw_assert_tokenized/BUILD.bazel
+++ b/pw_assert_tokenized/BUILD.bazel
@@ -21,17 +21,20 @@
 
 licenses(["notice"])
 
+# Note: to avoid circular dependencies, this target only includes the headers
+# for pw_assert_tokenized. The source file and its dependencies are in the separate
+# ":impl" target.
+#
+# If you point the @pigweed_config//pw_assert_backend to //pw_assert_tokenized,
+# then @pigweed_config//pw_assert_backend_impl should point to
+# //pw_assert_tokenized:impl.
 pw_cc_library(
     name = "pw_assert_tokenized",
-    srcs = [
-        "log_handler.cc",
-    ],
     hdrs = [
         "assert_public_overrides/pw_assert_backend/assert_backend.h",
         "check_public_overrides/pw_assert_backend/check_backend.h",
         "public/pw_assert_tokenized/assert_tokenized.h",
         "public/pw_assert_tokenized/check_tokenized.h",
-        "public/pw_assert_tokenized/handler.h",
     ],
     includes = [
         "assert_public_overrides",
@@ -39,11 +42,37 @@
         "public",
     ],
     deps = [
-        "//pw_assert",
-        "//pw_base64",
-        "//pw_bytes",
+        ":handler",
+        "//pw_assert:facade",
         "//pw_log_tokenized",
         "//pw_preprocessor",
         "//pw_tokenizer",
     ],
 )
+
+pw_cc_library(
+    name = "handler",
+    hdrs = [
+        "public/pw_assert_tokenized/handler.h",
+    ],
+    includes = ["public"],
+    deps = [
+        "//pw_preprocessor",
+    ],
+)
+
+pw_cc_library(
+    name = "impl",
+    srcs = [
+        "log_handler.cc",
+    ],
+    deps = [
+        ":handler",
+        "//pw_base64",
+        "//pw_bytes",
+        "//pw_log",
+        "//pw_log_tokenized",
+        "//pw_span",
+    ],
+    alwayslink = 1,
+)
diff --git a/pw_assert_tokenized/docs.rst b/pw_assert_tokenized/docs.rst
index 2458b5a..59ba703 100644
--- a/pw_assert_tokenized/docs.rst
+++ b/pw_assert_tokenized/docs.rst
@@ -18,16 +18,20 @@
   number of the assert statement. By default, it is passed to the logging system
   to produce a string like this:
 
-    PW_ASSERT() or PW_DASSERT() failure at
-    pw_result/public/pw_result/result.h:63
+  .. code-block:: text
+
+     PW_ASSERT() or PW_DASSERT() failure at
+     pw_result/public/pw_result/result.h:63
 
 * **PW_CHECK_\*()**: The ``PW_CHECK_*()`` macros work in contexts where
   tokenization is fully supported, so they are able to capture the CHECK
   statement expression and any provided string literal in addition to the file
   name in the pw_log_tokenized key/value format:
 
-    "■msg♦Check failure: \*unoptimizable >= 0, Ensure this CHECK logic
-    stays■module♦KVS■file♦pw_kvs/size_report/base.cc"
+  .. code-block:: text
+
+     "■msg♦Check failure: \*unoptimizable >= 0, Ensure this CHECK logic
+     stays■module♦KVS■file♦pw_kvs/size_report/base.cc"
 
   Evaluated values of ``PW_CHECK_*()`` statements are not captured, and any
   string formatting arguments are also not captured. This minimizes call-site
@@ -66,29 +70,29 @@
 
 .. code-block::
 
-  pw_executable("main") {
-    deps = [
-      # ...
-    ]
-    sources = [ "main.cc" ]
-  }
+   pw_executable("main") {
+     deps = [
+       # ...
+     ]
+     sources = [ "main.cc" ]
+   }
 
-  pw_tokenizer_database("log_tokens") {
-    database = "tools/tokenized_logs.csv"
-    deps = [
-      ":source_file_names",
-      ":main",
-    ]
-    optional_paths = [ "$root_build_dir/**/*.elf" ]
-    input_databases = [ "$target_gen_dir/source_file_names.json" ]
-  }
+   pw_tokenizer_database("log_tokens") {
+     database = "tools/tokenized_logs.csv"
+     deps = [
+       ":source_file_names",
+       ":main",
+     ]
+     optional_paths = [ "$root_build_dir/**/*.elf" ]
+     input_databases = [ "$target_gen_dir/source_file_names.json" ]
+   }
 
-  # Extracts all source/header file names from "main" and its transitive
-  # dependencies for tokenization.
-  pw_relative_source_file_names("source_file_names") {
-    deps = [ ":main" ]
-    outputs = [ "$target_gen_dir/source_file_names.json" ]
-  }
+   # Extracts all source/header file names from "main" and its transitive
+   # dependencies for tokenization.
+   pw_relative_source_file_names("source_file_names") {
+     deps = [ ":main" ]
+     outputs = [ "$target_gen_dir/source_file_names.json" ]
+   }
 
 
 .. warning::
diff --git a/pw_assert_zephyr/docs.rst b/pw_assert_zephyr/docs.rst
index b70a15e..1277892 100644
--- a/pw_assert_zephyr/docs.rst
+++ b/pw_assert_zephyr/docs.rst
@@ -17,5 +17,5 @@
 Zephyr's assert configs can be used to control the behavior via CONFIG_ASSERT_
 and CONFIG_ASSERT_LEVEL_.
 
-.. _CONFIG_ASSERT: https://docs.zephyrproject.org/latest/reference/kconfig/CONFIG_ASSERT.html#std-kconfig-CONFIG_ASSERT
-.. _CONFIG_ASSERT_LEVEL: https://docs.zephyrproject.org/latest/reference/kconfig/CONFIG_ASSERT_LEVEL.html#std-kconfig-CONFIG_ASSERT_LEVEL
+.. _CONFIG_ASSERT: https://docs.zephyrproject.org/latest/kconfig.html#CONFIG_ASSERT
+.. _CONFIG_ASSERT_LEVEL: https://docs.zephyrproject.org/latest/kconfig.html#CONFIG_ASSERT_LEVEL
diff --git a/pw_async_basic/docs.rst b/pw_async_basic/docs.rst
index 0363a26..7692407 100644
--- a/pw_async_basic/docs.rst
+++ b/pw_async_basic/docs.rst
@@ -20,20 +20,20 @@
 
 .. code-block::
 
-  pw_async_TASK_BACKEND="$dir_pw_async_basic:task"
-  pw_async_FAKE_DISPATCHER_BACKEND="$dir_pw_async_basic:fake_dispatcher"
+   pw_async_TASK_BACKEND="$dir_pw_async_basic:task"
+   pw_async_FAKE_DISPATCHER_BACKEND="$dir_pw_async_basic:fake_dispatcher"
 
 
 Next, create a target that depends on ``//pw_async_basic:dispatcher``:
 
 .. code-block::
 
-  pw_executable("hello_world") {
-    sources = [ "hello_world.cc" ]
-    deps = [
-      "//pw_async_basic:dispatcher",
-    ]
-  }
+   pw_executable("hello_world") {
+     sources = [ "hello_world.cc" ]
+     deps = [
+       "//pw_async_basic:dispatcher",
+     ]
+   }
 
 Next, construct and use a ``BasicDispatcher``.
 
diff --git a/pw_base64/base64.cc b/pw_base64/base64.cc
index 08d2325..862aebb 100644
--- a/pw_base64/base64.cc
+++ b/pw_base64/base64.cc
@@ -145,14 +145,18 @@
   return static_cast<size_t>(binary - static_cast<uint8_t*>(output)) - pad;
 }
 
+extern "C" bool pw_Base64IsValidChar(char base64_char) {
+  return !(base64_char < kMinValidChar || base64_char > kMaxValidChar ||
+           CharToBits(base64_char) == kX /* invalid char */);
+}
+
 extern "C" bool pw_Base64IsValid(const char* base64_data, size_t base64_size) {
   if (base64_size % kEncodedGroupSize != 0) {
     return false;
   }
 
   for (size_t i = 0; i < base64_size; ++i) {
-    if (base64_data[i] < kMinValidChar || base64_data[i] > kMaxValidChar ||
-        CharToBits(base64_data[i]) == kX /* invalid char */) {
+    if (!pw_Base64IsValidChar(base64_data[i])) {
       return false;
     }
   }
diff --git a/pw_base64/public/pw_base64/base64.h b/pw_base64/public/pw_base64/base64.h
index 6b42d7e..dc5be2d 100644
--- a/pw_base64/public/pw_base64/base64.h
+++ b/pw_base64/public/pw_base64/base64.h
@@ -53,6 +53,9 @@
                        size_t base64_size_bytes,
                        void* output);
 
+// Returns true if provided char is a valid Base64 character.
+bool pw_Base64IsValidChar(char base64_char);
+
 // Returns true if the provided string is valid Base64 encoded data. Accepts
 // either the standard (+/) or URL-safe (-_) alphabets.
 //
@@ -193,6 +196,12 @@
   return pw_Base64IsValid(base64.data(), base64.size());
 }
 
+/// @param[in] base64 The character to check. Can be encoded with either the
+/// standard (`+/`) or URL-safe (`-_`) alphabet.
+///
+/// @returns `true` if the provided character is a valid Base64 character.
+inline bool IsValidChar(char base64) { return pw_Base64IsValidChar(base64); }
+
 }  // namespace pw::base64
 
 #endif  // __cplusplus
diff --git a/pw_bloat/docs.rst b/pw_bloat/docs.rst
index 85441e8..a9da474 100644
--- a/pw_bloat/docs.rst
+++ b/pw_bloat/docs.rst
@@ -121,37 +121,37 @@
   sources that override the global ones (if specified).
 
 
-.. code::
+.. code-block::
 
-  import("$dir_pw_bloat/bloat.gni")
+   import("$dir_pw_bloat/bloat.gni")
 
-  executable("empty_base") {
-    sources = [ "empty_main.cc" ]
-  }
+   executable("empty_base") {
+     sources = [ "empty_main.cc" ]
+   }
 
-  executable("hello_world_printf") {
-    sources = [ "hello_printf.cc" ]
-  }
+   executable("hello_world_printf") {
+     sources = [ "hello_printf.cc" ]
+   }
 
-  executable("hello_world_iostream") {
-    sources = [ "hello_iostream.cc" ]
-  }
+   executable("hello_world_iostream") {
+     sources = [ "hello_iostream.cc" ]
+   }
 
-  pw_size_diff("my_size_report") {
-    base = ":empty_base"
-    data_sources = "symbols,segments"
-    binaries = [
-      {
-        target = ":hello_world_printf"
-        label = "Hello world using printf"
-      },
-      {
-        target = ":hello_world_iostream"
-        label = "Hello world using iostream"
-        data_sources = "symbols"
-      },
-    ]
-  }
+   pw_size_diff("my_size_report") {
+     base = ":empty_base"
+     data_sources = "symbols,segments"
+     binaries = [
+       {
+         target = ":hello_world_printf"
+         label = "Hello world using printf"
+       },
+       {
+         target = ":hello_world_iostream"
+         label = "Hello world using iostream"
+         data_sources = "symbols"
+       },
+     ]
+   }
 
 A sample ``pw_size_diff`` ReST size report table can be found within module
 docs. For example, see the :ref:`pw_checksum-size-report` section of the
@@ -169,60 +169,60 @@
 * ``data_sources``: Optional list of data sources to organize outputs.
 * ``source_filter``: Optional regex to filter labels in the output.
 
-.. code::
+.. code-block::
 
-  import("$dir_pw_bloat/bloat.gni")
+   import("$dir_pw_bloat/bloat.gni")
 
-  executable("hello_world_iostream") {
-    sources = [ "hello_iostream.cc" ]
-  }
+   executable("hello_world_iostream") {
+     sources = [ "hello_iostream.cc" ]
+   }
 
-  pw_size_report("hello_world_iostream_size_report") {
-    target = ":hello_iostream"
-    data_sources = "segments,symbols"
-    source_filter = "pw::hello"
-  }
+   pw_size_report("hello_world_iostream_size_report") {
+     target = ":hello_iostream"
+     data_sources = "segments,symbols"
+     source_filter = "pw::hello"
+   }
 
 Sample Single Binary ASCII Table Generated
 
 .. code-block::
 
-  ┌─────────────┬──────────────────────────────────────────────────┬──────┐
-  │segment_names│                      symbols                     │ sizes│
-  ├═════════════┼══════════════════════════════════════════════════┼══════┤
-  │FLASH        │                                                  │12,072│
-  │             │pw::kvs::KeyValueStore::InitializeMetadata()      │   684│
-  │             │pw::kvs::KeyValueStore::Init()                    │   456│
-  │             │pw::kvs::internal::EntryCache::Find()             │   444│
-  │             │pw::kvs::FakeFlashMemory::Write()                 │   240│
-  │             │pw::kvs::internal::Entry::VerifyChecksumInFlash() │   228│
-  │             │pw::kvs::KeyValueStore::GarbageCollectSector()    │   220│
-  │             │pw::kvs::KeyValueStore::RemoveDeletedKeyEntries() │   220│
-  │             │pw::kvs::KeyValueStore::AppendEntry()             │   204│
-  │             │pw::kvs::KeyValueStore::Get()                     │   194│
-  │             │pw::kvs::internal::Entry::Read()                  │   188│
-  │             │pw::kvs::ChecksumAlgorithm::Finish()              │    26│
-  │             │pw::kvs::internal::Entry::ReadKey()               │    26│
-  │             │pw::kvs::internal::Sectors::BaseAddress()         │    24│
-  │             │pw::kvs::ChecksumAlgorithm::Update()              │    20│
-  │             │pw::kvs::FlashTestPartition()                     │     8│
-  │             │pw::kvs::FakeFlashMemory::Disable()               │     6│
-  │             │pw::kvs::FakeFlashMemory::Enable()                │     6│
-  │             │pw::kvs::FlashMemory::SelfTest()                  │     6│
-  │             │pw::kvs::FlashPartition::Init()                   │     6│
-  │             │pw::kvs::FlashPartition::sector_size_bytes()      │     6│
-  │             │pw::kvs::FakeFlashMemory::IsEnabled()             │     4│
-  ├─────────────┼──────────────────────────────────────────────────┼──────┤
-  │RAM          │                                                  │ 1,424│
-  │             │test_kvs                                          │   992│
-  │             │pw::kvs::(anonymous namespace)::test_flash        │   384│
-  │             │pw::kvs::(anonymous namespace)::test_partition    │    24│
-  │             │pw::kvs::FakeFlashMemory::no_errors_              │    12│
-  │             │borrowable_kvs                                    │     8│
-  │             │kvs_entry_count                                   │     4│
-  ├═════════════┼══════════════════════════════════════════════════┼══════┤
-  │Total        │                                                  │13,496│
-  └─────────────┴──────────────────────────────────────────────────┴──────┘
+   ┌─────────────┬──────────────────────────────────────────────────┬──────┐
+   │segment_names│                      symbols                     │ sizes│
+   ├═════════════┼══════════════════════════════════════════════════┼══════┤
+   │FLASH        │                                                  │12,072│
+   │             │pw::kvs::KeyValueStore::InitializeMetadata()      │   684│
+   │             │pw::kvs::KeyValueStore::Init()                    │   456│
+   │             │pw::kvs::internal::EntryCache::Find()             │   444│
+   │             │pw::kvs::FakeFlashMemory::Write()                 │   240│
+   │             │pw::kvs::internal::Entry::VerifyChecksumInFlash() │   228│
+   │             │pw::kvs::KeyValueStore::GarbageCollectSector()    │   220│
+   │             │pw::kvs::KeyValueStore::RemoveDeletedKeyEntries() │   220│
+   │             │pw::kvs::KeyValueStore::AppendEntry()             │   204│
+   │             │pw::kvs::KeyValueStore::Get()                     │   194│
+   │             │pw::kvs::internal::Entry::Read()                  │   188│
+   │             │pw::kvs::ChecksumAlgorithm::Finish()              │    26│
+   │             │pw::kvs::internal::Entry::ReadKey()               │    26│
+   │             │pw::kvs::internal::Sectors::BaseAddress()         │    24│
+   │             │pw::kvs::ChecksumAlgorithm::Update()              │    20│
+   │             │pw::kvs::FlashTestPartition()                     │     8│
+   │             │pw::kvs::FakeFlashMemory::Disable()               │     6│
+   │             │pw::kvs::FakeFlashMemory::Enable()                │     6│
+   │             │pw::kvs::FlashMemory::SelfTest()                  │     6│
+   │             │pw::kvs::FlashPartition::Init()                   │     6│
+   │             │pw::kvs::FlashPartition::sector_size_bytes()      │     6│
+   │             │pw::kvs::FakeFlashMemory::IsEnabled()             │     4│
+   ├─────────────┼──────────────────────────────────────────────────┼──────┤
+   │RAM          │                                                  │ 1,424│
+   │             │test_kvs                                          │   992│
+   │             │pw::kvs::(anonymous namespace)::test_flash        │   384│
+   │             │pw::kvs::(anonymous namespace)::test_partition    │    24│
+   │             │pw::kvs::FakeFlashMemory::no_errors_              │    12│
+   │             │borrowable_kvs                                    │     8│
+   │             │kvs_entry_count                                   │     4│
+   ├═════════════┼══════════════════════════════════════════════════┼══════┤
+   │Total        │                                                  │13,496│
+   └─────────────┴──────────────────────────────────────────────────┴──────┘
 
 
 Size reports are typically included in ReST documentation, as described in
@@ -248,17 +248,17 @@
 * ``deps``: List of ``pw_size_report`` targets whose data to collect.
 * ``output``: Path to the output JSON file.
 
-.. code::
+.. code-block::
 
-  import("$dir_pw_bloat/bloat.gni")
+   import("$dir_pw_bloat/bloat.gni")
 
-  pw_size_report_aggregation("image_sizes") {
-     deps = [
-       ":app_image_size_report",
-       ":bootloader_image_size_report",
-     ]
-     output = "$root_gen_dir/artifacts/image_sizes.json"
-  }
+   pw_size_report_aggregation("image_sizes") {
+      deps = [
+        ":app_image_size_report",
+        ":bootloader_image_size_report",
+      ]
+      output = "$root_gen_dir/artifacts/image_sizes.json"
+   }
 
 Documentation integration
 =========================
@@ -270,7 +270,7 @@
 For example, the ``simple_bloat_loop`` and ``simple_bloat_function`` size
 reports under ``//pw_bloat/examples`` are imported into this file as follows:
 
-.. code:: rst
+.. code-block:: rst
 
   Simple bloat loop example
   ^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -313,16 +313,16 @@
 
 .. code-block::
 
-    84.2%  1023Ki    FLASH
-      94.2%   963Ki    Free space
-       5.8%  59.6Ki    Used space
-    15.8%   192Ki    RAM
-     100.0%   192Ki    Used space
-     0.0%     512    VECTOR_TABLE
-      96.9%     496    Free space
-       3.1%      16    Used space
-     0.0%       0    Not resident in memory
-       NAN%       0    Used space
+   84.2%  1023Ki    FLASH
+     94.2%   963Ki    Free space
+      5.8%  59.6Ki    Used space
+   15.8%   192Ki    RAM
+    100.0%   192Ki    Used space
+    0.0%     512    VECTOR_TABLE
+     96.9%     496    Free space
+      3.1%      16    Used space
+    0.0%       0    Not resident in memory
+      NAN%       0    Used space
 
 
 ``utilization`` data source
@@ -345,125 +345,125 @@
 
 .. code-block::
 
-  MEMORY
-  {
-    FLASH(rx) : \
-      ORIGIN = PW_BOOT_FLASH_BEGIN, \
-      LENGTH = PW_BOOT_FLASH_SIZE
-    RAM(rwx) : \
-      ORIGIN = PW_BOOT_RAM_BEGIN, \
-      LENGTH = PW_BOOT_RAM_SIZE
-  }
+   MEMORY
+   {
+     FLASH(rx) : \
+       ORIGIN = PW_BOOT_FLASH_BEGIN, \
+       LENGTH = PW_BOOT_FLASH_SIZE
+     RAM(rwx) : \
+       ORIGIN = PW_BOOT_RAM_BEGIN, \
+       LENGTH = PW_BOOT_RAM_SIZE
+   }
 
-  SECTIONS
-  {
-    /* Main executable code. */
-    .code : ALIGN(4)
-    {
-      /* Application code. */
-      *(.text)
-      *(.text*)
-      KEEP(*(.init))
-      KEEP(*(.fini))
+   SECTIONS
+   {
+     /* Main executable code. */
+     .code : ALIGN(4)
+     {
+       /* Application code. */
+       *(.text)
+       *(.text*)
+       KEEP(*(.init))
+       KEEP(*(.fini))
 
-      . = ALIGN(4);
-      /* Constants.*/
-      *(.rodata)
-      *(.rodata*)
-    } >FLASH
+       . = ALIGN(4);
+       /* Constants.*/
+       *(.rodata)
+       *(.rodata*)
+     } >FLASH
 
-    /* Explicitly initialized global and static data. (.data)*/
-    .static_init_ram : ALIGN(4)
-    {
-      *(.data)
-      *(.data*)
-      . = ALIGN(4);
-    } >RAM AT> FLASH
+     /* Explicitly initialized global and static data. (.data)*/
+     .static_init_ram : ALIGN(4)
+     {
+       *(.data)
+       *(.data*)
+       . = ALIGN(4);
+     } >RAM AT> FLASH
 
-    /* Zero initialized global/static data. (.bss) */
-    .zero_init_ram (NOLOAD) : ALIGN(4)
-    {
-      *(.bss)
-      *(.bss*)
-      *(COMMON)
-      . = ALIGN(4);
-    } >RAM
-  }
+     /* Zero initialized global/static data. (.bss) */
+     .zero_init_ram (NOLOAD) : ALIGN(4)
+     {
+       *(.bss)
+       *(.bss*)
+       *(COMMON)
+       . = ALIGN(4);
+     } >RAM
+   }
 
 Could be modified as follows enable ``Free Space`` reporting:
 
 .. code-block::
 
-  MEMORY
-  {
-    FLASH(rx) : ORIGIN = PW_BOOT_FLASH_BEGIN, LENGTH = PW_BOOT_FLASH_SIZE
-    RAM(rwx) : ORIGIN = PW_BOOT_RAM_BEGIN, LENGTH = PW_BOOT_RAM_SIZE
+   MEMORY
+   {
+     FLASH(rx) : ORIGIN = PW_BOOT_FLASH_BEGIN, LENGTH = PW_BOOT_FLASH_SIZE
+     RAM(rwx) : ORIGIN = PW_BOOT_RAM_BEGIN, LENGTH = PW_BOOT_RAM_SIZE
 
-    /* Each memory region above has an associated .*.unused_space section that
-     * overlays the unused space at the end of the memory segment. These
-     * segments are used by pw_bloat.bloaty_config to create the utilization
-     * data source for bloaty size reports.
-     *
-     * These sections MUST be located immediately after the last section that is
-     * placed in the respective memory region or lld will issue a warning like:
-     *
-     *   warning: ignoring memory region assignment for non-allocatable section
-     *      '.VECTOR_TABLE.unused_space'
-     *
-     * If this warning occurs, it's also likely that LLD will have created quite
-     * large padded regions in the ELF file due to bad cursor operations. This
-     * can cause ELF files to balloon from hundreds of kilobytes to hundreds of
-     * megabytes.
-     *
-     * Attempting to add sections to the memory region AFTER the unused_space
-     * section will cause the region to overflow.
-     */
-  }
+     /* Each memory region above has an associated .*.unused_space section that
+      * overlays the unused space at the end of the memory segment. These
+      * segments are used by pw_bloat.bloaty_config to create the utilization
+      * data source for bloaty size reports.
+      *
+      * These sections MUST be located immediately after the last section that is
+      * placed in the respective memory region or lld will issue a warning like:
+      *
+      *   warning: ignoring memory region assignment for non-allocatable section
+      *      '.VECTOR_TABLE.unused_space'
+      *
+      * If this warning occurs, it's also likely that LLD will have created quite
+      * large padded regions in the ELF file due to bad cursor operations. This
+      * can cause ELF files to balloon from hundreds of kilobytes to hundreds of
+      * megabytes.
+      *
+      * Attempting to add sections to the memory region AFTER the unused_space
+      * section will cause the region to overflow.
+      */
+   }
 
-  SECTIONS
-  {
-    /* Main executable code. */
-    .code : ALIGN(4)
-    {
-      /* Application code. */
-      *(.text)
-      *(.text*)
-      KEEP(*(.init))
-      KEEP(*(.fini))
+   SECTIONS
+   {
+     /* Main executable code. */
+     .code : ALIGN(4)
+     {
+       /* Application code. */
+       *(.text)
+       *(.text*)
+       KEEP(*(.init))
+       KEEP(*(.fini))
 
-      . = ALIGN(4);
-      /* Constants.*/
-      *(.rodata)
-      *(.rodata*)
-    } >FLASH
+       . = ALIGN(4);
+       /* Constants.*/
+       *(.rodata)
+       *(.rodata*)
+     } >FLASH
 
-    /* Explicitly initialized global and static data. (.data)*/
-    .static_init_ram : ALIGN(4)
-    {
-      *(.data)
-      *(.data*)
-      . = ALIGN(4);
-    } >RAM AT> FLASH
+     /* Explicitly initialized global and static data. (.data)*/
+     .static_init_ram : ALIGN(4)
+     {
+       *(.data)
+       *(.data*)
+       . = ALIGN(4);
+     } >RAM AT> FLASH
 
-    /* Defines a section representing the unused space in the FLASH segment.
-     * This MUST be the last section assigned to the FLASH region.
-     */
-    PW_BLOAT_UNUSED_SPACE(FLASH)
+     /* Defines a section representing the unused space in the FLASH segment.
+      * This MUST be the last section assigned to the FLASH region.
+      */
+     PW_BLOAT_UNUSED_SPACE(FLASH)
 
-    /* Zero initialized global/static data. (.bss). */
-    .zero_init_ram (NOLOAD) : ALIGN(4)
-    {
-      *(.bss)
-      *(.bss*)
-      *(COMMON)
-      . = ALIGN(4);
-    } >RAM
+     /* Zero initialized global/static data. (.bss). */
+     .zero_init_ram (NOLOAD) : ALIGN(4)
+     {
+       *(.bss)
+       *(.bss*)
+       *(COMMON)
+       . = ALIGN(4);
+     } >RAM
 
-    /* Defines a section representing the unused space in the RAM segment. This
-     * MUST be the last section assigned to the RAM region.
-     */
-    PW_BLOAT_UNUSED_SPACE(RAM)
-  }
+     /* Defines a section representing the unused space in the RAM segment. This
+      * MUST be the last section assigned to the RAM region.
+      */
+     PW_BLOAT_UNUSED_SPACE(RAM)
+   }
 
 The preprocessor macro ``PW_BLOAT_UNUSED_SPACE`` is defined in
 ``pw_bloat/bloat_macros.ld``. To use these macros include this file in your
@@ -518,7 +518,7 @@
 
 .. code-block::
 
-  PW_BLOAT_MEMORY_REGION(FLASH)
+   PW_BLOAT_MEMORY_REGION(FLASH)
 
 As another example, if you have two aliased memory regions (``DCTM`` and
 ``ITCM``) into the same effective memory named you'd like to call ``RAM``, then
@@ -526,5 +526,6 @@
 
 .. code-block::
 
-  PW_BLOAT_MEMORY_REGION_MAP(RAM, ITCM)
-  PW_BLOAT_MEMORY_REGION_MAP(RAM, DTCM)
+   PW_BLOAT_MEMORY_REGION_MAP(RAM, ITCM)
+   PW_BLOAT_MEMORY_REGION_MAP(RAM, DTCM)
+
diff --git a/pw_bloat/py/label_test.py b/pw_bloat/py/label_test.py
index 2e22368..07bfe83 100644
--- a/pw_bloat/py/label_test.py
+++ b/pw_bloat/py/label_test.py
@@ -51,24 +51,24 @@
 
     def test_data_source_single_insert_total_size(self):
         ds_map = DataSourceMap(['a', 'b', 'c'])
-        ds_map.insert_label_hierachy(['FLASH', '.code', 'main()'], 30)
+        ds_map.insert_label_hierarchy(['FLASH', '.code', 'main()'], 30)
         self.assertEqual(ds_map.get_total_size(), 30)
 
     def test_data_source_multiple_insert_total_size(self):
         ds_map = DataSourceMap(['a', 'b', 'c'])
-        ds_map.insert_label_hierachy(['FLASH', '.code', 'main()'], 30)
-        ds_map.insert_label_hierachy(['RAM', '.code', 'foo()'], 100)
+        ds_map.insert_label_hierarchy(['FLASH', '.code', 'main()'], 30)
+        ds_map.insert_label_hierarchy(['RAM', '.code', 'foo()'], 100)
         self.assertEqual(ds_map.get_total_size(), 130)
 
     def test_parsing_generator_three_datasource_names(self):
         ds_map = DataSourceMap(['a', 'b', 'c'])
         for label in LIST_LABELS:
-            ds_map.insert_label_hierachy(
+            ds_map.insert_label_hierarchy(
                 [label.parents[0], label.parents[1], label.name], label.size
             )
         list_labels_three = [*LIST_LABELS, Label(name='total', size=350)]
-        for label_hiearchy in ds_map.labels():
-            self.assertIn(label_hiearchy, list_labels_three)
+        for label_hierarchy in ds_map.labels():
+            self.assertIn(label_hierarchy, list_labels_three)
         self.assertEqual(ds_map.get_total_size(), 350)
 
     def test_parsing_generator_two_datasource_names(self):
@@ -79,18 +79,18 @@
             Label(name='bar()', size=220, parents=tuple(['RAM'])),
         ]
         for label in ds_label_list:
-            ds_map.insert_label_hierachy(
+            ds_map.insert_label_hierarchy(
                 [label.parents[0], label.name], label.size
             )
         list_labels_two = [*ds_label_list, Label(name='total', size=350)]
-        for label_hiearchy in ds_map.labels():
-            self.assertIn(label_hiearchy, list_labels_two)
+        for label_hierarchy in ds_map.labels():
+            self.assertIn(label_hierarchy, list_labels_two)
         self.assertEqual(ds_map.get_total_size(), 350)
 
     def test_parsing_generator_specified_datasource_1(self):
         ds_map = DataSourceMap(['a', 'b', 'c'])
         for label in LIST_LABELS:
-            ds_map.insert_label_hierachy(
+            ds_map.insert_label_hierarchy(
                 [label.parents[0], label.parents[1], label.name], label.size
             )
         list_labels_ds_b = [
@@ -98,14 +98,14 @@
             Label(name='.heap', size=320, parents=tuple(['RAM'])),
         ]
         list_labels_ds_b += [Label(name='total', size=350)]
-        for label_hiearchy in ds_map.labels(1):
-            self.assertIn(label_hiearchy, list_labels_ds_b)
+        for label_hierarchy in ds_map.labels(1):
+            self.assertIn(label_hierarchy, list_labels_ds_b)
         self.assertEqual(ds_map.get_total_size(), 350)
 
     def test_parsing_generator_specified_datasource_str_2(self):
         ds_map = DataSourceMap(['a', 'b', 'c'])
         for label in LIST_LABELS:
-            ds_map.insert_label_hierachy(
+            ds_map.insert_label_hierarchy(
                 [label.parents[0], label.parents[1], label.name], label.size
             )
         list_labels_ds_a = [
@@ -113,14 +113,14 @@
             Label(name='RAM', size=320, parents=tuple([])),
         ]
         list_labels_ds_a += [Label(name='total', size=350)]
-        for label_hiearchy in ds_map.labels(0):
-            self.assertIn(label_hiearchy, list_labels_ds_a)
+        for label_hierarchy in ds_map.labels(0):
+            self.assertIn(label_hierarchy, list_labels_ds_a)
         self.assertEqual(ds_map.get_total_size(), 350)
 
     def test_parsing_generator_specified_datasource_int(self):
         ds_map = DataSourceMap(['a', 'b', 'c'])
         for label in LIST_LABELS:
-            ds_map.insert_label_hierachy(
+            ds_map.insert_label_hierarchy(
                 [label.parents[0], label.parents[1], label.name], label.size
             )
         list_labels_ds_a = [
@@ -128,14 +128,14 @@
             Label(name='RAM', size=320, parents=tuple([])),
         ]
         list_labels_ds_a += [Label(name='total', size=350)]
-        for label_hiearchy in ds_map.labels(0):
-            self.assertIn(label_hiearchy, list_labels_ds_a)
+        for label_hierarchy in ds_map.labels(0):
+            self.assertIn(label_hierarchy, list_labels_ds_a)
         self.assertEqual(ds_map.get_total_size(), 350)
 
     def test_parsing_generator_specified_datasource_int_2(self):
         ds_map = DataSourceMap(['a', 'b', 'c'])
         for label in LIST_LABELS:
-            ds_map.insert_label_hierachy(
+            ds_map.insert_label_hierarchy(
                 [label.parents[0], label.parents[1], label.name], label.size
             )
         list_labels_ds_b = [
@@ -143,21 +143,21 @@
             Label(name='.heap', size=320, parents=tuple(['RAM'])),
         ]
         list_labels_ds_b += [Label(name='total', size=350)]
-        for label_hiearchy in ds_map.labels(1):
-            self.assertIn(label_hiearchy, list_labels_ds_b)
+        for label_hierarchy in ds_map.labels(1):
+            self.assertIn(label_hierarchy, list_labels_ds_b)
         self.assertEqual(ds_map.get_total_size(), 350)
 
     def test_diff_same_ds_labels_diff_sizes(self):
         """Same map with different sizes."""
         ds_map = DataSourceMap(['a', 'b', 'c'])
         for label in LIST_LABELS:
-            ds_map.insert_label_hierachy(
+            ds_map.insert_label_hierarchy(
                 [label.parents[0], label.parents[1], label.name], label.size
             )
 
         ds_map2 = DataSourceMap(['a', 'b', 'c'])
         for label in LIST_LABELS:
-            ds_map2.insert_label_hierachy(
+            ds_map2.insert_label_hierarchy(
                 [label.parents[0], label.parents[1], label.name],
                 label.size + 10,
             )
@@ -185,28 +185,30 @@
 
         ds_map_diff = ds_map.diff(ds_map2)
 
-        for label_hiearchy in ds_map_diff.labels():
-            self.assertIn(label_hiearchy, list_labels_ds_b)
+        for label_hierarchy in ds_map_diff.labels():
+            self.assertIn(label_hierarchy, list_labels_ds_b)
 
     def test_diff_missing_ds_labels_diff_sizes(self):
         """Different map with different sizes."""
         ds_map = DataSourceMap(['a', 'b', 'c'])
         for label in LIST_LABELS:
-            ds_map.insert_label_hierachy(
+            ds_map.insert_label_hierarchy(
                 [label.parents[0], label.parents[1], label.name], label.size
             )
 
         ds_map2 = DataSourceMap(['a', 'b', 'c'])
         for label in LIST_LABELS[:-1]:
-            ds_map2.insert_label_hierachy(
+            ds_map2.insert_label_hierarchy(
                 [label.parents[0], label.parents[1], label.name],
                 label.size + 20,
             )
-        ds_map2.insert_label_hierachy(
+        ds_map2.insert_label_hierarchy(
             [label.parents[0], label.parents[1], 'foobar()'], label.size + 20
         )
 
-        ds_map2.insert_label_hierachy(["LOAD #5", 'random_load', 'func()'], 250)
+        ds_map2.insert_label_hierarchy(
+            ["LOAD #5", 'random_load', 'func()'], 250
+        )
 
         list_labels_ds_b = [
             Label(
@@ -234,8 +236,8 @@
 
         ds_map_diff = ds_map2.diff(ds_map)
 
-        for label_hiearchy in ds_map_diff.labels(0):
-            self.assertIn(label_hiearchy, list_labels_ds_b)
+        for label_hierarchy in ds_map_diff.labels(0):
+            self.assertIn(label_hierarchy, list_labels_ds_b)
 
 
 if __name__ == '__main__':
diff --git a/pw_bloat/py/pw_bloat/label.py b/pw_bloat/py/pw_bloat/label.py
index d23615e..2fc663f 100644
--- a/pw_bloat/py/pw_bloat/label.py
+++ b/pw_bloat/py/pw_bloat/label.py
@@ -109,7 +109,7 @@
 class DataSourceMap:
     """Module to store an array of DataSources and capacities.
 
-    An organize way to store a hierachy of labels and their sizes.
+    An organized way to store a hierarchy of labels and their sizes.
     Includes a capacity array to hold regex patterns for applying
     capacities to matching label names.
 
@@ -125,7 +125,7 @@
         vmsize_index = top_row.index('vmsize')
         ds_map_tsv = cls(top_row[:vmsize_index])
         for row in reader:
-            ds_map_tsv.insert_label_hierachy(
+            ds_map_tsv.insert_label_hierarchy(
                 row[:vmsize_index], int(row[vmsize_index])
             )
         return ds_map_tsv
@@ -143,25 +143,25 @@
             child_label in self._data_sources[ds_index][parent_label]
         )
 
-    def insert_label_hierachy(
+    def insert_label_hierarchy(
         self,
         label_hierarchy: Iterable[str],
         size: int,
         diff_exist: Optional[bool] = None,
     ) -> None:
-        """Insert a hierachy of labels with its size."""
+        """Insert a hierarchy of labels with its size."""
 
         # Insert initial '__base__' data source that holds the
         # running total size.
         self._data_sources[0].add_label(
             '__base__', self._BASE_TOTAL_LABEL, size
         )
-        complete_label_hierachy = [self._BASE_TOTAL_LABEL, *label_hierarchy]
-        for index in range(len(complete_label_hierachy) - 1):
-            if complete_label_hierachy[index]:
+        complete_label_hierarchy = [self._BASE_TOTAL_LABEL, *label_hierarchy]
+        for index in range(len(complete_label_hierarchy) - 1):
+            if complete_label_hierarchy[index]:
                 self._data_sources[index + 1].add_label(
-                    complete_label_hierachy[index],
-                    complete_label_hierachy[index + 1],
+                    complete_label_hierarchy[index],
+                    complete_label_hierarchy[index + 1],
                     size,
                     diff_exist,
                 )
@@ -181,7 +181,7 @@
         for b_label in base.labels(last_data_source):
             if last_data_source > 0:
                 curr_parent = b_label.parents[-1]
-            lb_hierachy_names = [*b_label.parents, b_label.name]
+            lb_hierarchy_names = [*b_label.parents, b_label.name]
 
             # Check if label exists in target binary DataSourceMap.
             # Subtract base from target size and insert diff size
@@ -196,16 +196,16 @@
                 ) - b_label.size
 
                 if diff_size:
-                    diff_dsm.insert_label_hierachy(
-                        lb_hierachy_names, diff_size, True
+                    diff_dsm.insert_label_hierarchy(
+                        lb_hierarchy_names, diff_size, True
                     )
                 else:
-                    diff_dsm.insert_label_hierachy(lb_hierachy_names, 0, True)
+                    diff_dsm.insert_label_hierarchy(lb_hierarchy_names, 0, True)
 
             # label is not present in target - insert with negative size
             else:
-                diff_dsm.insert_label_hierachy(
-                    lb_hierachy_names, -1 * b_label.size, False
+                diff_dsm.insert_label_hierarchy(
+                    lb_hierarchy_names, -1 * b_label.size, False
                 )
 
         # Iterate through all of target labels
@@ -218,7 +218,7 @@
             if not base.label_exists(
                 parent_data_source_index, curr_parent, t_label.name
             ):
-                diff_dsm.insert_label_hierachy(
+                diff_dsm.insert_label_hierarchy(
                     [*t_label.parents, f"{t_label.name}"], t_label.size, False
                 )
 
diff --git a/pw_bloat/py/pw_bloat/label_output.py b/pw_bloat/py/pw_bloat/label_output.py
index 938cc90..0ee44ab 100644
--- a/pw_bloat/py/pw_bloat/label_output.py
+++ b/pw_bloat/py/pw_bloat/label_output.py
@@ -201,7 +201,7 @@
 
     def create_table(self) -> str:
         """Parse DataSourceMap to create ASCII table."""
-        curr_lb_hierachy = None
+        curr_lb_hierarchy = None
         last_diff_name = ''
         if self._diff_mode:
             self._ascii_table_rows.extend([*self._label_title_row()])
@@ -219,7 +219,7 @@
 
             has_entries = True
 
-            new_lb_hierachy = tuple(
+            new_lb_hierarchy = tuple(
                 [
                     *self._get_ds_label_size(curr_label.parents),
                     self._LabelContent(
@@ -230,9 +230,9 @@
                 ]
             )
             diff_list = self._diff_label_names(
-                curr_lb_hierachy, new_lb_hierachy
+                curr_lb_hierarchy, new_lb_hierarchy
             )
-            curr_lb_hierachy = new_lb_hierachy
+            curr_lb_hierarchy = new_lb_hierarchy
 
             if curr_label.parents and curr_label.parents[0] == last_diff_name:
                 continue
diff --git a/pw_blob_store/docs.rst b/pw_blob_store/docs.rst
index 57f211f..e881014 100644
--- a/pw_blob_store/docs.rst
+++ b/pw_blob_store/docs.rst
@@ -47,15 +47,15 @@
 
 .. code-block:: cpp
 
-  BlobStore::BlobWriterWithBuffer writer(my_blob_store);
-  writer.Open();
-  writer.Write(my_data);
+   BlobStore::BlobWriterWithBuffer writer(my_blob_store);
+   writer.Open();
+   writer.Write(my_data);
 
-  // ...
+   // ...
 
-  // A close is implied when a BlobWriter is destroyed. Manually closing a
-  // BlobWriter enables error handling on Close() failure.
-  writer.Close();
+   // A close is implied when a BlobWriter is destroyed. Manually closing a
+   // BlobWriter enables error handling on Close() failure.
+   writer.Close();
 
 Erasing a BlobStore
 ===================
@@ -83,13 +83,13 @@
 
 .. code-block:: cpp
 
-  constexpr size_t kMaxFileNameLength = 48;
-  BlobStore::BlobWriterWithBuffer<kMaxFileNameLength> writer(my_blob_store);
-  writer.Open();
-  writer.SetFileName("stonks.jpg");
-  writer.Write(my_data);
-  // ...
-  writer.Close();
+   constexpr size_t kMaxFileNameLength = 48;
+   BlobStore::BlobWriterWithBuffer<kMaxFileNameLength> writer(my_blob_store);
+   writer.Open();
+   writer.SetFileName("stonks.jpg");
+   writer.Write(my_data);
+   // ...
+   writer.Close();
 
 Reading from a BlobStore
 ------------------------
@@ -97,12 +97,12 @@
 readers/writers may be open/active if a ``BlobWriter`` is opened on a blob
 store.
 
-  0) Create BlobReader instance
-  1) BlobReader::Open()
-  2) Read data using BlobReader::Read() or
-     BlobReader::GetMemoryMappedBlob(). BlobReader is seekable. Use
-     BlobReader::Seek() to read from a desired offset.
-  3) BlobReader::Close()
+0) Create BlobReader instance
+1) BlobReader::Open()
+2) Read data using BlobReader::Read() or
+   BlobReader::GetMemoryMappedBlob(). BlobReader is seekable. Use
+   BlobReader::Seek() to read from a desired offset.
+3) BlobReader::Close()
 
 --------------------------
 FileSystem RPC integration
@@ -119,6 +119,5 @@
 
 .. include:: blob_size
 
-
 .. note::
   The documentation for this module is currently incomplete.
diff --git a/pw_bluetooth/BUILD.gn b/pw_bluetooth/BUILD.gn
index 7f7ea2c..9ef081c 100644
--- a/pw_bluetooth/BUILD.gn
+++ b/pw_bluetooth/BUILD.gn
@@ -80,21 +80,60 @@
     visibility = [ ":*" ]
   }
 
-  emboss_cc_library("emboss_hci") {
+  emboss_cc_library("emboss_hci_common") {
     public_configs = [ ":emboss_include_path" ]
-    source = "public/pw_bluetooth/hci.emb"
+    source = "public/pw_bluetooth/hci_common.emb"
   }
 
-  emboss_cc_library("emboss_vendor") {
+  emboss_cc_library("emboss_hci_commands") {
     public_configs = [ ":emboss_include_path" ]
-    source = "public/pw_bluetooth/vendor.emb"
-    imports = [ "public/pw_bluetooth/hci.emb" ]
-    deps = [ ":emboss_hci" ]
+    source = "public/pw_bluetooth/hci_commands.emb"
+    imports = [ "public/pw_bluetooth/hci_common.emb" ]
+    deps = [ ":emboss_hci_common" ]
+  }
+
+  emboss_cc_library("emboss_hci_events") {
+    public_configs = [ ":emboss_include_path" ]
+    source = "public/pw_bluetooth/hci_events.emb"
+    imports = [ "public/pw_bluetooth/hci_common.emb" ]
+    deps = [ ":emboss_hci_common" ]
+  }
+
+  emboss_cc_library("emboss_hci_vendor") {
+    public_configs = [ ":emboss_include_path" ]
+    source = "public/pw_bluetooth/hci_vendor.emb"
+    imports = [ "public/pw_bluetooth/hci_common.emb" ]
+    deps = [ ":emboss_hci_common" ]
+  }
+
+  emboss_cc_library("emboss_hci_test") {
+    public_configs = [ ":emboss_include_path" ]
+    source = "public/pw_bluetooth/hci_test.emb"
+    imports = [ "public/pw_bluetooth/hci_common.emb" ]
+    deps = [ ":emboss_hci_common" ]
+  }
+
+  group("emboss_hci_group") {
+    public_configs = [ ":emboss_include_path" ]
+    public_deps = [
+      ":emboss_hci_commands",
+      ":emboss_hci_common",
+      ":emboss_hci_events",
+      ":emboss_hci_vendor",
+    ]
   }
 } else {
-  group("emboss_hci") {
+  group("emboss_hci_common") {
   }
-  group("emboss_vendor") {
+  group("emboss_hci_commands") {
+  }
+  group("emboss_hci_events") {
+  }
+  group("emboss_hci_vendor") {
+  }
+  group("emboss_hci_test") {
+  }
+  group("emboss_hci_group") {
   }
 }
 
@@ -131,11 +170,12 @@
 
 pw_test("emboss_test") {
   enable_if = dir_pw_third_party_emboss != ""
-  configs = [ "$dir_pigweed/third_party/emboss:flags" ]
   sources = [ "emboss_test.cc" ]
   deps = [
-    ":emboss_hci",
-    ":emboss_vendor",
+    ":emboss_hci_commands",
+    ":emboss_hci_events",
+    ":emboss_hci_test",
+    ":emboss_hci_vendor",
   ]
 }
 
diff --git a/pw_bluetooth/emboss_test.cc b/pw_bluetooth/emboss_test.cc
index 9db81fb..ba7706c 100644
--- a/pw_bluetooth/emboss_test.cc
+++ b/pw_bluetooth/emboss_test.cc
@@ -12,8 +12,10 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 #include "gtest/gtest.h"
-#include "pw_bluetooth/hci.emb.h"
-#include "pw_bluetooth/vendor.emb.h"
+#include "pw_bluetooth/hci_commands.emb.h"
+#include "pw_bluetooth/hci_events.emb.h"
+#include "pw_bluetooth/hci_test.emb.h"
+#include "pw_bluetooth/hci_vendor.emb.h"
 
 namespace pw::bluetooth {
 namespace {
diff --git a/pw_bluetooth/public/pw_bluetooth/hci.emb b/pw_bluetooth/public/pw_bluetooth/hci.emb
deleted file mode 100644
index 9e2f789..0000000
--- a/pw_bluetooth/public/pw_bluetooth/hci.emb
+++ /dev/null
@@ -1,3910 +0,0 @@
-# Copyright 2023 The Pigweed Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-#     https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-# This file contains Emboss definitions for Host Controller Interface packets
-# and types found in the Bluetooth Core Specification. The Emboss compiler is
-# used to generate a C++ header from this file.
-
-[$default byte_order: "LittleEndian"]
-[(cpp) namespace: "pw::bluetooth::emboss"]
-# =========================== Constants =================================
-
-
-enum CodingFormat:
-  -- Coding formats from assigned numbers.
-  -- (https://www.bluetooth.com/specifications/assigned-numbers/host-controller-interface)
-  [maximum_bits: 8]
-  U_LAW           = 0x00
-  A_LAW           = 0x01
-  CVSD            = 0x02
-  TRANSPARENT     = 0x03
-  LINEAR_PCM      = 0x04
-  MSBC            = 0x05
-  LC3             = 0x06
-  G729A           = 0x07
-  VENDOR_SPECIFIC = 0xFF
-
-
-enum GenericEnableParam:
-  -- Binary values that can be generically passed to HCI commands that expect a
-  -- 1-octet boolean "enable"/"disable" parameter.
-  [maximum_bits: 8]
-  DISABLE = 0x00
-  ENABLE  = 0x01
-
-
-enum CoreSpecificationVersion:
-  -- Bluetooth Core Specification version
-  [maximum_bits: 8]
-  V1_0B    = 0x00  # v1.0b
-  V1_1     = 0x01  # v1.1
-  V1_2     = 0x02  # v1.2
-  V2_0_EDR = 0x03  # v2.0+EDR
-  V2_1_EDR = 0x04  # v2.0+EDR
-  V3_0_HS  = 0x05  # v3.0+HS
-  V4_0     = 0x06  # v4.0
-  V4_1     = 0x07  # v4.1
-  V4_2     = 0x08  # v4.2
-  V5_0     = 0x09  # v5.0
-  V5_1     = 0x0A  # v5.1
-  V5_2     = 0x0B  # v5.2
-  V5_3     = 0x0C  # v5.3
-  V5_4     = 0x0D  # v5.4
-
-
-enum InquiryAccessCode:
-  -- General- and Device-specific Inquiry Access Codes (DIACs) for use in Inquiry
-  -- command LAP fields.
-  -- (https://www.bluetooth.com/specifications/assigned-numbers/baseband)
-  [maximum_bits: 24]
-  GIAC = 0x9E8B33
-    -- General Inquiry Access Code
-
-  LIAC = 0x9E8B00
-    -- Limited Dedicated Inquiry Access Code
-
-
-enum PcmDataFormat:
-  -- PCM data formats from assigned numbers.
-  -- (https://www.bluetooth.com/specifications/assigned-numbers/host-controller-interface)
-  [maximum_bits: 8]
-  NOT_APPLICABLE  = 0x00
-  ONES_COMPLEMENT = 0x01
-  TWOS_COMPLEMENT = 0x02
-  SIGN_MAGNITUDE  = 0x03
-  UNSIGNED        = 0x04
-
-
-enum ScoDataPath:
-  [maximum_bits: 8]
-  HCI             = 0x00
-  AUDIO_TEST_MODE = 0xFF
-    -- 0x01 - 0xFE specify the logical channel number (vendor specific)
-
-
-enum ConnectionRole:
-  [maximum_bits: 8]
-  CENTRAL    = 0x00
-  PERIPHERAL = 0x01
-
-
-enum PageTimeout:
-  [maximum_bits: 16]
-  MIN     = 0x0001
-  MAX     = 0xFFFF
-  DEFAULT = 0x2000
-
-
-enum ScanInterval:
-  -- The minimum and maximum range values for Page and Inquiry Scan Interval (in time slices)
-  -- Page Scan Interval: (see Core Spec v5.0, Vol 2, Part E, Section 7.3.19)
-  -- Inquiry Scan Interval: (see Core Spec v5.0, Vol 2, Part E, Section 7.3.21)
-  [maximum_bits: 16]
-  MIN = 0x0012
-  MAX = 0x1000
-
-
-enum ScanWindow:
-  -- The minimum and maximum range valeus for Page and Inquiry Scan Window (in time slices)
-  -- Page Scan Window: (see Core Spec v5.0, Vol 2, Part E, Section 7.3.19)
-  -- Inquiry Scan Window: (see Core Spec v5.0, Vol 2, Part E, Section 7.3.21)
-  [maximum_bits: 16]
-  MIN = 0x0011
-  MAX = 0x1000
-
-
-enum StatusCode:
-  -- HCI Error Codes. Refer to Core Spec v5.0, Vol 2, Part D for definitions and
-  -- descriptions. All enum values are in increasing numerical order, however the
-  -- values are listed below for clarity.
-  [maximum_bits: 8]
-  SUCCESS                                           = 0x00
-  UNKNOWN_COMMAND                                   = 0x01
-  UNKNOWN_CONNECTION_ID                             = 0x02
-  HARDWARE_FAILURE                                  = 0x03
-  PAGE_TIMEOUT                                      = 0x04
-  AUTHENTICATION_FAILURE                            = 0x05
-  PIN_OR_KEY_MISSING                                = 0x06
-  MEMORY_CAPACITY_EXCEEDED                          = 0x07
-  CONNECTION_TIMEOUT                                = 0x08
-  CONNECTION_LIMIT_EXCEEDED                         = 0x09
-  SYNCHRONOUS_CONNECTION_LIMIT_EXCEEDED             = 0x0A
-  CONNECTION_ALREADY_EXISTS                         = 0x0B
-  COMMAND_DISALLOWED                                = 0x0C
-  CONNECTION_REJECTED_LIMITED_RESOURCES             = 0x0D
-  CONNECTION_REJECTED_SECURITY                      = 0x0E
-  CONNECTION_REJECTED_BAD_BD_ADDR                   = 0x0F
-  CONNECTION_ACCEPT_TIMEOUT_EXCEEDED                = 0x10
-  UNSUPPORTED_FEATURE_OR_PARAMETER                  = 0x11
-  INVALID_HCI_COMMAND_PARAMETERS                    = 0x12
-  REMOTE_USER_TERMINATED_CONNECTION                 = 0x13
-  REMOTE_DEVICE_TERMINATED_CONNECTION_LOW_RESOURCES = 0x14
-  REMOTE_DEVICE_TERMINATED_CONNECTION_POWER_OFF     = 0x15
-  CONNECTION_TERMINATED_BY_LOCAL_HOST               = 0x16
-  REPEATED_ATTEMPTS                                 = 0x17
-  PAIRING_NOT_ALLOWED                               = 0x18
-  UNKNOWN_LMP_PDU                                   = 0x19
-  UNSUPPORTED_REMOTE_FEATURE                        = 0x1A
-  SCO_OFFSET_REJECTED                               = 0x1B
-  SCO_INTERVAL_REJECTED                             = 0x1C
-  SCO_AIRMODE_REJECTED                              = 0x1D
-  INVALID_LMP_OR_LL_PARAMETERS                      = 0x1E
-  UNSPECIFIED_ERROR                                 = 0x1F
-  UNSUPPORTED_LMP_OR_LL_PARAMETER_VALUE             = 0x20
-  ROLE_CHANGE_NOT_ALLOWED                           = 0x21
-  LMP_OR_LL_RESPONSE_TIMEOUT                        = 0x22
-  LMP_ERROR_TRANSACTION_COLLISION                   = 0x23
-  LMP_PDU_NOT_ALLOWED                               = 0x24
-  ENCRYPTION_MODE_NOT_ACCEPTABLE                    = 0x25
-  LINK_KEY_CANNOT_BE_CHANGED                        = 0x26
-  REQUESTED_QOS_NOT_SUPPORTED                       = 0x27
-  INSTANT_PASSED                                    = 0x28
-  PAIRING_WITH_UNIT_KEY_NOT_SUPPORTED               = 0x29
-  DIFFERENT_TRANSACTION_COLLISION                   = 0x2A
-  RESERVED_0                                        = 0x2B
-  QOS_UNACCEPTABLE_PARAMETER                        = 0x2C
-  QOS_REJECTED                                      = 0x2D
-  CHANNEL_CLASSIFICATION_NOT_SUPPORTED              = 0x2E
-  INSUFFICIENT_SECURITY                             = 0x2F
-  PARAMETER_OUT_OF_MANDATORY_RANGE                  = 0x30
-  RESERVED_1                                        = 0x31
-  ROLE_SWITCH_PENDING                               = 0x32
-  RESERVED_2                                        = 0x33
-  RESERVED_SLOT_VIOLATION                           = 0x34
-  ROLE_SWITCH_FAILED                                = 0x35
-  EXTENDED_INQUIRY_RESPONSE_TOO_LARGE               = 0x36
-  SECURE_SIMPLE_PAIRING_NOT_SUPPORTED_BY_HOST       = 0x37
-  HOST_BUSY_PAIRING                                 = 0x38
-  CONNECTION_REJECTED_NO_SUITABLE_CHANNEL_FOUND     = 0x39
-  CONTROLLER_BUSY                                   = 0x3A
-  UNACCEPTABLE_CONNECTION_PARAMETERS                = 0x3B
-  DIRECTED_ADVERTISING_TIMEOUT                      = 0x3C
-  CONNECTION_TERMINATED_MIC_FAILURE                 = 0x3D
-  CONNECTION_FAILED_TO_BE_ESTABLISHED               = 0x3E
-  MAC_CONNECTION_FAILED                             = 0x3F
-  COARSE_CLOCK_ADJUSTMENT_REJECTED                  = 0x40
-  # 5.0
-  TYPE_0_SUBMAP_NOT_DEFINED                         = 0x41
-  UNKNOWN_ADVERTISING_IDENTIFIER                    = 0x42
-  LIMIT_REACHED                                     = 0x43
-  OPERATION_CANCELLED_BY_HOST                       = 0x44
-
-
-bits ScoPacketType:
-  -- Bitmask of SCO packet types.
-  # SCO packet types
-  0     [+1]  Flag  hv1
-  $next [+1]  Flag  hv2
-  $next [+1]  Flag  hv3
-  # eSCO packet types
-  $next [+1]  Flag  ev3
-  $next [+1]  Flag  ev4
-  $next [+1]  Flag  ev5
-  $next [+1]  Flag  not_2_ev3
-  $next [+1]  Flag  not_3_ev3
-  $next [+1]  Flag  not_2_ev5
-  $next [+1]  Flag  not_3_ev5
-  $next [+6]  UInt  padding
-
-
-bits PacketType:
-  -- Bitmask values for supported Packet Types
-  -- Used for HCI_Create_Connection and HCI_Change_Connection_Packet_Type
-  -- All other bits reserved for future use.
-  1  [+1]  Flag  disable_2_dh1
-  2  [+1]  Flag  disable_3_dh1
-  3  [+1]  Flag  enable_dm1     # Note: always on in >= v1.2
-  4  [+1]  Flag  enable_dh1
-  8  [+1]  Flag  disable_2_dh3
-  9  [+1]  Flag  disable_3_dh3
-  10 [+1]  Flag  enable_dm3
-  11 [+1]  Flag  enable_dh3
-  12 [+1]  Flag  disable_2_dh5
-  13 [+1]  Flag  disable_3_dh5
-  14 [+1]  Flag  enable_dm5
-  15 [+1]  Flag  enable_dh5
-
-
-enum PageScanRepetitionMode:
-  -- The page scan repetition mode, representing a maximum time between Page Scans.
-  -- (See Core Spec v5.0, Volume 2, Part B, Section 8.3.1)
-  [maximum_bits: 8]
-  R0_ = 0x00  # Continuous Scan
-  R1_ = 0x01  # <= 1.28s
-  R2_ = 0x02  # <= 2.56s
-
-
-bits ClockOffset:
-  -- Clock Offset. The lower 15 bits are set to the clock offset as retrieved
-  -- by an Inquiry. The highest bit is set to 1 if the rest of this parameter
-  -- is valid.
-  15 [+1]     Flag  valid
-  if valid:
-    0  [+15]  UInt  clock_offset
-
-
-struct BdAddr:
-  -- Bluetooth Device Address
-  0 [+6]  UInt  bd_addr
-
-
-enum IoCapability:
-  -- All other values reserved for future use.
-  [maximum_bits: 8]
-  DISPLAY_ONLY       = 0x00
-  DISPLAY_YES_NO     = 0x01
-  KEYBOARD_ONLY      = 0x02
-  NO_INPUT_NO_OUTPUT = 0x03
-
-
-enum OobDataPresent:
-  -- Whether there is out-of-band data present, and what type.
-  -- All other values reserved for future use.
-  [maximum_bits: 8]
-  NOT_PRESENT   = 0x00
-  P192_         = 0x01
-  P256_         = 0x02
-  P192_AND_P256 = 0x03
-
-# inclusive-language: disable
-
-
-enum AuthenticationRequirements:
-  -- All options without MITM do not require MITM protection, and a numeric
-  -- comparison with automatic accept is allowed.
-  -- All options with MITM do require MITM protection, and IO capabilities should
-  -- be used to determine the authentication procedure.
-  [maximum_bits: 8]
-  NO_BONDING             = 0x00
-  MITM_NO_BONDING        = 0x01
-  DEDICATED_BONDING      = 0x02
-  MITM_DEDICATED_BONDING = 0x03
-  GENERAL_BONDING        = 0x04
-  MITM_GENERAL_BONDING   = 0x05
-
-# inclusive-language: enable
-
-
-bits ScanEnableBits:
-  -- Bitmask Values for the Scan_Enable parameter in a
-  -- HCI_(Read,Write)_Scan_Enable command.
-  0     [+1]  Flag  inquiry
-    -- Inquiry scan enabled
-
-  $next [+1]  Flag  page
-    -- Page scan enabled
-
-  $next [+6]  UInt  padding
-
-
-enum InquiryScanType:
-  [maximum_bits: 8]
-  STANDARD   = 0x00
-    -- Standard scan (Default) (Mandatory)
-
-  INTERLACED = 0x01
-
-
-struct LocalName:
-  0 [+248]  UInt:8[248]  local_name
-
-
-struct ExtendedInquiryResponse:
-  0 [+240]  UInt:8[240]  extended_inquiry_response
-
-
-enum LEExtendedDuplicateFilteringOption:
-  -- Possible values that can be used for the |filter_duplicates| parameter in a
-  -- HCI_LE_Set_Extended_Scan_Enable command.
-  [maximum_bits: 8]
-  DISABLED                           = 0x00
-  ENABLED                            = 0x01
-  ENABLED_RESET_FOR_EACH_SCAN_PERIOD = 0x02
-    -- Duplicate advertisements in a single scan period should not be sent to the
-    -- Host in advertising report events; this setting shall only be used if the
-    -- Period parameter is non-zero.
-
-
-enum MajorDeviceClass:
-  [maximum_bits: 5]
-  MISCELLANEOUS = 0x00
-  COMPUTER      = 0x01
-  PHONE         = 0x02
-  LAN           = 0x03
-  AUDIO_VIDEO   = 0x04
-  PERIPHERAL    = 0x05
-  IMAGING       = 0x06
-  WEARABLE      = 0x07
-  TOY           = 0x08
-  HEALTH        = 0x09
-  UNCATEGORIZED = 0x1F
-
-
-bits MajorServiceClasses:
-  0     [+1]  Flag  limited_discoverable_mode
-  $next [+1]  Flag  le_audio
-  $next [+1]  Flag  reserved
-  $next [+1]  Flag  positioning
-  $next [+1]  Flag  networking
-  $next [+1]  Flag  rendering
-  $next [+1]  Flag  capturing
-  $next [+1]  Flag  object_transfer
-  $next [+1]  Flag  audio
-  $next [+1]  Flag  telephony
-  $next [+1]  Flag  information
-
-
-enum ComputerMinorDeviceClass:
-  [maximum_bits: 6]
-  UNCATEGORIZED       = 0x00
-  DESKTOP_WORKSTATION = 0x01
-  SERVER_CLASS        = 0x02
-  LAPTOP              = 0x03
-  HANDHELD_PC         = 0x04
-  PALM_SIZE_PC        = 0x05
-  WEARABLE            = 0x06
-  TABLET              = 0x07
-
-
-enum PhoneMinorDeviceClass:
-  [maximum_bits: 6]
-  UNCATEGORIZED               = 0x00
-  CELLULAR                    = 0x01
-  CORDLESS                    = 0x02
-  SMARTPHONE                  = 0x03
-  WIRED_MODEM_OR_VOID_GATEWAY = 0x04
-  COMMON_ISDN_ACCESS          = 0x05
-
-
-enum LANMinorDeviceClass:
-  [maximum_bits: 6]
-  FULLY_AVAILABLE      = 0x00
-  UTILIZED_1_TO_17     = 0x08
-  UTILIZED_17_TO_33    = 0x10
-  UTILIZED_33_TO_50    = 0x18
-  UTILIZED_50_TO_67    = 0x20
-  UTILIZED_67_TO_83    = 0x28
-  UTILIZED_83_TO_99    = 0x30
-  NO_SERVICE_AVAILABLE = 0x38
-
-
-enum AudioVideoMinorDeviceClass:
-  [maximum_bits: 6]
-  UNCATEGORIZED                 = 0x00
-  WEARABLE_HEADSET_DEVICE       = 0x01
-  HANDS_FREE_DEVICE             = 0x02
-  RESERVED_0                    = 0x03
-  MICROPHONE                    = 0x04
-  LOUDSPEAKER                   = 0x05
-  HEADPHONES                    = 0x06
-  PORTABLE_AUDIO                = 0x07
-  CAR_AUDIO                     = 0x08
-  SET_TOP_BOX                   = 0x09
-  HIFI_AUDIO_DEVICE             = 0x0A
-  VCR                           = 0x0B
-  VIDEO_CAMERA                  = 0x0C
-  CAMCORDER                     = 0x0D
-  VIDEO_MONITOR                 = 0x0E
-  VIDEO_DISPLAY_AND_LOUDSPEAKER = 0x0F
-  VIDEO_CONFERENCING            = 0x10
-  RESERVED_1                    = 0x11
-  GAMING_TOY                    = 0x12
-
-
-enum PeripheralMinorDeviceClass0:
-  [maximum_bits: 4]
-  UNCATEGORIZED                  = 0x00
-  JOYSTICK                       = 0x01
-  GAMEPAD                        = 0x02
-  REMOTE_CONTROL                 = 0x03
-  SENSING_DEVICE                 = 0x04
-  DIGITIZER_TABLET               = 0x05
-  CARD_READER                    = 0x06
-  DIGITAL_PEN                    = 0x07
-  HANDHELD_SCANNER               = 0x08
-  HANDHELD_GESTURAL_INPUT_DEVICE = 0x09
-
-
-enum PeripheralMinorDeviceClass1:
-  [maximum_bits: 2]
-  UNCATEGORIZED                  = 0x00
-  KEYBOARD                       = 0x01
-  POINTING_DEVICE                = 0x02
-  COMBO_KEYBOARD_POINTING_DEVICE = 0x03
-
-
-bits PeripheralMinorDeviceClass:
-  0     [+4]  PeripheralMinorDeviceClass0  device_class_0
-  $next [+2]  PeripheralMinorDeviceClass1  device_class_1
-
-
-enum ImagingMinorDeviceClass:
-  [maximum_bits: 2]
-  UNCATEGORIZED = 0x00
-
-
-bits ImagingMinorDeviceClassBits:
-  0     [+2]  ImagingMinorDeviceClass  device_class
-  $next [+1]  Flag                     display
-  $next [+1]  Flag                     camera
-  $next [+1]  Flag                     scanner
-  $next [+1]  Flag                     printer
-
-
-enum WearableMinorDeviceClass:
-  [maximum_bits: 6]
-  WRISTWATCH = 0x01
-  PAGER      = 0x02
-  JACKET     = 0x03
-  HELMET     = 0x04
-  GLASSES    = 0x05
-
-
-enum ToyMinorDeviceClass:
-  [maximum_bits: 6]
-  ROBOT      = 0x01
-  VEHICLE    = 0x02
-  DOLL       = 0x03
-  CONTROLLER = 0x04
-  GAME       = 0x05
-
-
-enum HealthMinorDeviceClass:
-  [maximum_bits: 6]
-  UNDEFINED                 = 0x00
-  BLOOD_PRESSURE_MONITOR    = 0x01
-  THERMOMETER               = 0x02
-  WEIGHING_SCALE            = 0x03
-  GLUCOSE_METER             = 0x04
-  PULSE_OXIMETER            = 0x05
-  HEART_PULSE_RATE_MONITOR  = 0x06
-  HEALTH_DATA_DISPLAY       = 0x07
-  STEP_COUNTER              = 0x08
-  BODY_COMPOSITION_ANALYZER = 0x09
-  PEAK_FLOW_MONITOR         = 0x0A
-  MEDICATION_MONITOR        = 0x0B
-  KNEE_PROSTHESIS           = 0x0C
-  ANKLE_PROSTHESIS          = 0x0D
-  GENERIC_HEALTH_MANAGER    = 0x0E
-  PERSONAL_MOBILITY_DEVICE  = 0x0F
-
-
-bits ClassOfDevice:
-  -- Defined in Assigned Numbers for the Baseband
-  -- https://www.bluetooth.com/specifications/assigned-numbers/baseband
-  0     [+2]    UInt                         zero
-    [requires: this == 0]
-
-  if major_device_class == MajorDeviceClass.COMPUTER:
-    2     [+6]  ComputerMinorDeviceClass     computer_minor_device_class
-
-  if major_device_class == MajorDeviceClass.PHONE:
-    2     [+6]  PhoneMinorDeviceClass        phone_minor_device_class
-
-  if major_device_class == MajorDeviceClass.LAN:
-    2     [+6]  LANMinorDeviceClass          lan_minor_device_class
-
-  if major_device_class == MajorDeviceClass.AUDIO_VIDEO:
-    2     [+6]  AudioVideoMinorDeviceClass   audio_video_minor_device_class
-
-  if major_device_class == MajorDeviceClass.PERIPHERAL:
-    2     [+6]  PeripheralMinorDeviceClass   peripheral_minor_device_class
-
-  if major_device_class == MajorDeviceClass.IMAGING:
-    2     [+6]  ImagingMinorDeviceClassBits  imaging_minor_device_class
-
-  if major_device_class == MajorDeviceClass.WEARABLE:
-    2     [+6]  WearableMinorDeviceClass     wearable_minor_device_class
-
-  if major_device_class == MajorDeviceClass.TOY:
-    2     [+6]  ToyMinorDeviceClass          toy_minor_device_class
-
-  if major_device_class == MajorDeviceClass.HEALTH:
-    2     [+6]  HealthMinorDeviceClass       health_minor_device_class
-
-  8     [+5]    MajorDeviceClass             major_device_class
-  $next [+11]   MajorServiceClasses          major_service_classes
-
-
-enum LEPeriodicAdvertisingCreateSyncUseParams:
-  [maximum_bits: 1]
-
-  USE_PARAMS                   = 0x00
-    -- Use the Advertising_SID, Advertiser_Address_Type, and Adertiser_Address parameters to
-    -- determine which advertiser to listen to.
-
-  USE_PERIODIC_ADVERTISER_LIST = 0x01
-    -- Use the Periodic Advertiser List to determine which advertiser to listen to.
-
-
-bits LEPeriodicAdvertisingCreateSyncOptions:
-  -- First parameter to the LE Periodic Advertising Create Sync command
-
-  0     [+1]  LEPeriodicAdvertisingCreateSyncUseParams  advertiser_source
-
-  $next [+1]  Flag                                      enable_reporting
-    -- 0: Reporting initially enabled
-    -- 1: Reporting initially disabled
-
-  $next [+1]  Flag                                      enable_duplicate_filtering
-    -- 0: Duplicate filtering initially disabled
-    -- 1: Duplicate filtering initially enabled
-
-  $next [+5]  UInt                                      padding
-    -- Reserved for future use
-
-
-enum LEPeriodicAdvertisingAddressType:
-  -- Possible values that can be specified for the |advertiser_address_type| in an LE Periodic
-  -- Advertising Create Sync command.
-  [maximum_bits: 8]
-  PUBLIC = 0x00
-    -- Public Device Address or Public Identity Address
-
-  RANDOM = 0x01
-    -- Random Device Address or Random (static) Identity Address
-
-
-bits LEPeriodicAdvertisingSyncCTEType:
-  -- Bit definitions for a |sync_cte_type| field in an LE Periodic Advertising Create Sync command
-
-  0     [+1]  Flag  dont_sync_aoa
-    -- Do not sync to packets with an AoA Constant Tone Extension
-
-  $next [+1]  Flag  dont_sync_aod_1us
-    -- Do not sync to packets with an AoD Constant Tone Extension with 1 microsecond slots
-
-  $next [+1]  Flag  dont_sync_aod_2us
-    -- Do not sync to packets with an AoD Constant Tone Extension with 2 microsecond slots
-
-  $next [+1]  Flag  dont_sync_type_3
-    -- Do not sync to packets with a typoe 3 Constant Tone Extension (currently reserved for future
-    -- use)
-
-  $next [+1]  Flag  dont_sync_without_cte
-    -- Do not sync to packets without a Constant Tone Extension
-
-  $next [+3]  UInt  padding
-    -- Reserved for future use
-
-
-enum LEAddressType:
-  -- Possible values that can be reported for various |*_address_type| parameters in various LE packets.
-  [maximum_bits: 8]
-  PUBLIC          = 0x00
-    -- Public Device Address (default)
-
-  RANDOM          = 0x01
-    -- Random Device Address
-
-  PUBLIC_IDENTITY = 0x02
-    -- Public Identity Address (corresponds to Resolved Private Address)
-
-  RANDOM_IDENTITY = 0x03
-    -- Random (static) Identity Address (corresponds to Resolved Private Address)
-
-  ANONYMOUS       = 0xFF
-    -- No address provided (anonymous advertisement)
-    -- This is a special value that is only used in LE Advertising Report events.
-
-
-enum LEOwnAddressType:
-  -- Possible values that can be used for the |own_address_type| parameter in various LE packets.
-
-  [maximum_bits: 8]
-
-  PUBLIC                    = 0x00
-    -- Public Device Address
-
-  RANDOM                    = 0x01
-    -- Random Device Address
-
-  PRIVATE_DEFAULT_TO_PUBLIC = 0x02
-    -- Controller generates the Resolvable Private Address based on the local IRK from the resolving
-    -- list. If the resolving list contains no matching entry, then use the public address.
-
-  PRIVATE_DEFAULT_TO_RANDOM = 0x03
-    -- Controller generates the Resolvable Private Address based on the local IRK from the resolving
-    -- list. If the resolving list contains no matching entry, then use the random address from
-    -- LE_Set_Random_Address.
-
-
-enum LEDirectAddressType:
-  -- Possible values that can be reported for the |direct_address_type| parameter in LE Advertising Report events.
-  [maximum_bits: 8]
-  PUBLIC                                = 0x00
-    -- Public Device Address
-
-  PRIVATE                               = 0x01
-    -- Non-resolvable Private Address or Static Device Address
-
-  RESOLVABLE_PRIVATE_OWN_ADDRESS_PUBLIC = 0x02
-    -- Resolvable Private Address (resolved by Controller; Own_Address_Type was 0x00 or 0x02)
-
-  RESOLVABLE_PRIVATE_OWN_ADDRESS_RANDOM = 0x03
-    -- Resolvable Private Address (resolved by Controller; Own_Address_Type was 0x01 or 0x03)
-
-  RESOLVABLE_PRIVATE                    = 0xFE
-    -- Resolvable Private Address (Controller unable to resolve)
-
-
-enum LEPeerAddressType:
-  -- Possible values that can be used for the address_type parameters in various
-  -- HCI commands
-  [maximum_bits: 8]
-  PUBLIC    = 0x00
-  RANDOM    = 0x01
-  ANONYMOUS = 0xFF
-
-
-enum LEPeerAddressTypeNoAnon:
-  -- Possible values that can be used for the address_type parameters in various
-  -- HCI commands
-  [maximum_bits: 8]
-  PUBLIC    = 0x00
-  RANDOM    = 0x01
-  ANONYMOUS = 0xFF
-
-
-enum LEScanType:
-  -- Possible values that can be used for the |scan_type| parameter in various LE HCI commands.
-  [maximum_bits: 8]
-  PASSIVE = 0x00
-    -- Passive Scanning. No scanning PDUs shall be sent (default)
-
-  ACTIVE  = 0x01
-    -- Active scanning. Scanning PDUs may be sent.
-
-
-enum LEScanFilterPolicy:
-  -- Possible values that can be used for the |filter_policy| parameter in various LE HCI commands
-  [maximum_bits: 8]
-  BASIC_UNFILTERED    = 0x00
-  BASIC_FILTERED      = 0x01
-  EXTENDED_UNFILTERED = 0x02
-  EXTENDED_FILTERED   = 0x03
-
-
-bits LEScanPHYBits:
-  0     [+1]  Flag  le_1m
-    -- Scan advertisements on the LE 1M PHY
-
-  $next [+1]  Flag  padding1
-    -- Reserved for future use
-
-  $next [+1]  Flag  le_coded
-    -- Scan advertisements on the LE Coded PHY
-
-  $next [+5]  UInt  padding2
-    -- Reserved for future use
-
-
-enum LEPrivacyMode:
-  -- Possible values for the |privacy_mode| parameter in an LE Set Privacy Mode
-  -- command
-  [maximum_bits: 8]
-  NETWORK = 0x00
-    -- Use Network Privacy Mode for this peer device (default).
-
-  DEVICE  = 0x01
-    -- Use Device Privacy Mode for this peer device.
-
-
-enum InquiryMode:
-  [maximum_bits: 8]
-  STANDARD = 0x00
-    -- Standard Inquiry Result format (default)
-
-  RSSI     = 0x01
-    -- Inquiry Result format with RSSI
-
-  EXTENDED = 0x02
-    -- Inquiry Result format with RSSI or EIR format
-
-
-enum PageScanType:
-  [maximum_bits: 8]
-  STANDARD_SCAN   = 0x00
-    -- Standard scan (default) (mandatory)
-
-  INTERLACED_SCAN = 0x01
-    -- Interlaced scan (optional)
-
-
-bits LEEventMask:
-  0     [+1]  Flag  le_connection_complete
-  $next [+1]  Flag  le_advertising_report
-  $next [+1]  Flag  le_connection_update_complete
-  $next [+1]  Flag  le_read_remote_features_complete
-  $next [+1]  Flag  le_long_term_key_request
-  $next [+1]  Flag  le_remote_connection_parameter_request
-  $next [+1]  Flag  le_data_length_change
-  $next [+1]  Flag  le_read_local_p256_public_key_complete
-  $next [+1]  Flag  le_generate_dhkey_complete
-  $next [+1]  Flag  le_enhanced_connection_complete
-  $next [+1]  Flag  le_directed_advertising_report
-  $next [+1]  Flag  le_phy_update_complete
-  $next [+1]  Flag  le_extended_advertising_report
-  $next [+1]  Flag  le_periodic_advertising_sync_established
-  $next [+1]  Flag  le_periodic_advertising_report
-  $next [+1]  Flag  le_periodic_advertising_sync_lost
-  $next [+1]  Flag  le_extended_scan_timeout
-  $next [+1]  Flag  le_extended_advertising_set_terminated
-  $next [+1]  Flag  le_scan_request_received
-  $next [+1]  Flag  le_channel_selection_algorithm
-  $next [+1]  Flag  le_connectionless_iq_report
-  $next [+1]  Flag  le_connection_iq_report
-  $next [+1]  Flag  le_cte_request_failed
-  $next [+1]  Flag  le_periodic_advertising_sync_transfer_received_event
-  $next [+1]  Flag  le_cis_established_event
-  $next [+1]  Flag  le_cis_request_event
-  $next [+1]  Flag  le_create_big_complete_event
-  $next [+1]  Flag  le_terminate_big_complete_event
-  $next [+1]  Flag  le_big_sync_established_event
-  $next [+1]  Flag  le_big_sync_lost_event
-  $next [+1]  Flag  le_request_peer_sca_complete_event
-  $next [+1]  Flag  le_path_loss_threshold_event
-  $next [+1]  Flag  le_transmit_power_reporting_event
-  $next [+1]  Flag  le_biginfo_advertising_report_event
-  $next [+1]  Flag  le_subrate_change_event
-
-
-enum LEAdvertisingType:
-  [maximum_bits: 8]
-  CONNECTABLE_AND_SCANNABLE_UNDIRECTED = 0x00
-    -- ADV_IND
-
-  CONNECTABLE_HIGH_DUTY_CYCLE_DIRECTED = 0x01
-    -- ADV_DIRECT_IND
-
-  SCANNABLE_UNDIRECTED                 = 0x02
-    -- ADV_SCAN_IND
-
-  NOT_CONNECTABLE_UNDIRECTED           = 0x03
-    -- ADV_NONCONN_IND
-
-  CONNECTABLE_LOW_DUTY_CYCLE_DIRECTED  = 0x04
-    -- ADV_DIRECT_IND
-
-
-bits LEAdvertisingChannels:
-  0     [+1]  Flag  channel_37
-  $next [+1]  Flag  channel_38
-  $next [+1]  Flag  channel_39
-
-
-enum LEAdvertisingFilterPolicy:
-  [maximum_bits: 8]
-
-  ALLOW_ALL                                                  = 0x00
-    -- Process scan and connection requests from all devices (i.e., the Filter
-    -- Accept List is not in use) (default).
-
-  ALLOW_ALL_CONNECTIONS_AND_USE_FILTER_ACCEPT_LIST_FOR_SCANS = 0x01
-    -- Process connection requests from all devices and scan requests only from
-    -- devices that are in the Filter Accept List.
-
-  ALLOW_ALL_SCANS_AND_USE_FILTER_ACCEPT_LIST_FOR_CONNECTIONS = 0x02
-    -- Process scan requests from all devices and connection requests only from
-    -- devices that are in the Filter Accept List.
-
-  ALLOW_FILTER_ACCEPT_LIST_ONLY                              = 0x03
-    -- Process scan and connection requests only from devices in the Filter
-    -- Accept List.
-
-
-enum LEPrimaryAdvertisingPHY:
-  [maximum_bits: 8]
-  LE_1M       = 0x01
-  LE_CODED    = 0x03
-  LE_CODED_S2 = 0x04
-
-
-enum LESecondaryAdvertisingPHY:
-  [maximum_bits: 8]
-  NONE        = 0x00
-  LE_1M       = 0x01
-  LE_2M       = 0x02
-  LE_CODED    = 0x03
-  LE_CODED_S2 = 0x04
-
-
-enum LEAdvertisingDataStatus:
-  [maximum_bits: 2]
-  COMPLETE             = 0b00
-  INCOMPLETE           = 0b01
-  INCOMPLETE_TRUNCATED = 0b10
-
-
-bits LEExtendedAdvertisingEventType:
-  0     [+1]  Flag                     connectable
-  $next [+1]  Flag                     scannable
-  $next [+1]  Flag                     directed
-  $next [+1]  Flag                     scan_response
-  $next [+1]  Flag                     legacy
-  $next [+2]  LEAdvertisingDataStatus  data_status
-
-
-enum ScanRequestNotifications:
-  [maximum_bits: 8]
-  DISABLED = 0x00
-  ENABLED  = 0x01
-
-
-enum LESetExtendedAdvDataOp:
-  -- Potential values for the Operation parameter in a HCI_LE_Set_Extended_Advertising_Data command.
-  [maximum_bits: 8]
-  INTERMEDIATE_FRAGMENT = 0x00
-    -- Intermediate fragment of fragmented extended advertising data.
-
-  FIRST_FRAGMENT        = 0x01
-    -- First fragment of fragmented extended advertising data.
-
-  LAST_FRAGMENT         = 0x02
-    -- Last fragment of fragmented extended advertising data.
-
-  COMPLETE              = 0x03
-    -- Complete extended advertising data.
-
-  UNCHANGED_DATA        = 0x04
-    -- Unchanged data (just update the Advertising DID)
-
-
-enum LEExtendedAdvFragmentPreference:
-  -- Potential values for the Fragment_Preference parameter in a
-  -- HCI_LE_Set_Extended_Advertising_Data command.
-  [maximum_bits: 8]
-  MAY_FRAGMENT        = 0x00
-    -- The Controller may fragment all Host advertising data
-
-  SHOULD_NOT_FRAGMENT = 0x01
-    -- The Controller should not fragment or should minimize fragmentation of Host advertising data
-
-
-bits LEAdvertisingEventProperties:
-  -- The Advertising_Event_Properties bitfield values used in a HCI LE Set Extended Advertising
-  -- Parameters command.
-  0     [+1]  Flag  connectable
-  $next [+1]  Flag  scannable
-  $next [+1]  Flag  directed
-  $next [+1]  Flag  high_duty_cycle_directed_connectable
-  $next [+1]  Flag  use_legacy_pdus
-  $next [+1]  Flag  anonymous_advertising
-  $next [+1]  Flag  include_tx_power
-
-
-enum FlowControlMode:
-  [maximum_bits: 8]
-  PACKET_BASED     = 0x00
-  DATA_BLOCK_BASED = 0x01
-
-
-bits EventMaskPage2:
-  8  [+1]  Flag  number_of_completed_data_blocks_event
-  14 [+1]  Flag  triggered_clock_capture_event
-  15 [+1]  Flag  synchronization_train_complete_event
-  16 [+1]  Flag  synchronization_train_received_event
-  17 [+1]  Flag  connectionless_peripheral_broadcast_receive_event
-  18 [+1]  Flag  connectionless_peripheral_broadcast_timeout_event
-  19 [+1]  Flag  truncated_page_complete_event
-  20 [+1]  Flag  peripheral_page_response_timeout_event
-  21 [+1]  Flag  connectionless_peripheral_broadcast_channel_map_event
-  22 [+1]  Flag  inquiry_response_notification_event
-  23 [+1]  Flag  authenticated_payload_timeout_expired_event
-  24 [+1]  Flag  sam_status_change_event
-  25 [+1]  Flag  encryption_change_event_v2
-
-
-enum LinkType:
-  [maximum_bits: 8]
-  SCO  = 0x00
-  ACL  = 0x01
-  ESCO = 0x02
-
-
-enum EncryptionStatus:
-  OFF                                = 0x00
-  ON_WITH_E0_FOR_BREDR_OR_AES_FOR_LE = 0x01
-  ON_WITH_AES_FOR_BREDR              = 0x02
-
-
-bits LmpFeatures(page: UInt:8):
-  -- Bit mask of Link Manager Protocol features.
-  [requires: page <= 2]
-  if page == 0:
-    0  [+1]  Flag  three_slot_packets
-    1  [+1]  Flag  five_slot_packets
-    2  [+1]  Flag  encryption
-    3  [+1]  Flag  slot_offset
-    4  [+1]  Flag  timing_accuracy
-    5  [+1]  Flag  role_switch
-    6  [+1]  Flag  hold_mode
-    7  [+1]  Flag  sniff_mode
-    # 8: previously used
-    9  [+1]  Flag  power_control_requests
-    10 [+1]  Flag  channel_quality_driven_data_rate
-    11 [+1]  Flag  sco_link
-    12 [+1]  Flag  hv2_packets
-    13 [+1]  Flag  hv3_packets
-    14 [+1]  Flag  mu_law_log_synchronous_data
-    15 [+1]  Flag  a_law_log_synchronous_data
-    16 [+1]  Flag  cvsd_synchronous_data
-    17 [+1]  Flag  paging_parameter_negotiation
-    18 [+1]  Flag  power_control
-    19 [+1]  Flag  transparent_synchronous_data
-    20 [+3]  UInt  flow_control_lag
-    23 [+1]  Flag  broadcast_encryption
-    # 24: reserved for future use
-    25 [+1]  Flag  enhanced_data_rate_acl_2_mbs_mode
-    26 [+1]  Flag  enhanced_data_rate_acl_3_mbs_mode
-    27 [+1]  Flag  enhanced_inquiry_scan
-    28 [+1]  Flag  interlaced_inquiry_scan
-    29 [+1]  Flag  interlaced_page_scan
-    30 [+1]  Flag  rssi_with_inquiry_results
-    31 [+1]  Flag  extended_sco_link_ev3_packets
-    32 [+1]  Flag  ev4_packets
-    33 [+1]  Flag  ev5_packets
-    # 34: reserved for future use
-    35 [+1]  Flag  afh_capable_peripheral
-    36 [+1]  Flag  afh_classification_peripheral
-    37 [+1]  Flag  bredr_not_supported
-    38 [+1]  Flag  le_supported_controller
-    39 [+1]  Flag  three_slot_enhanced_data_rate_acl_packets
-    40 [+1]  Flag  five_slot_enhanced_data_rate_acl_packets
-    41 [+1]  Flag  sniff_subrating
-    42 [+1]  Flag  pause_encryption
-    43 [+1]  Flag  afh_capable_central
-    44 [+1]  Flag  afh_classification_central
-    45 [+1]  Flag  enhanced_data_rate_esco_2_mbs_mode
-    46 [+1]  Flag  enhanced_data_rate_esco_3_mbs_mode
-    47 [+1]  Flag  three_slot_enhanced_data_rate_esco_packets
-    48 [+1]  Flag  extended_inquiry_response
-    49 [+1]  Flag  simultaneous_le_and_bredr_to_same_device_capable_controller
-    # 50: reserved for future use
-    51 [+1]  Flag  secure_simple_pairing_controller_support
-    52 [+1]  Flag  encapsulated_pdu
-    53 [+1]  Flag  erroneous_data_reporting
-    54 [+1]  Flag  non_flushable_packet_boundary_flag
-    # 55: reserved for future use
-    56 [+1]  Flag  hci_link_supervision_timeout_changed_event
-    57 [+1]  Flag  variable_inquiry_tx_power_level
-    58 [+1]  Flag  enhanced_power_control
-    # 59-62: reserved for future use
-    63 [+1]  Flag  extended_features
-
-  if page == 1:
-    0  [+1]  Flag  secure_simple_pairing_host_support
-    1  [+1]  Flag  le_supported_host
-    # 2: previously used
-    3  [+1]  Flag  secure_connection_host_support
-
-  if page == 2:
-    0  [+1]  Flag  connectionless_peripheral_broadcast_transmitter_operation
-    1  [+1]  Flag  connectionless_peripheral_broadcast_receiver_operation
-    2  [+1]  Flag  synchronization_train
-    3  [+1]  Flag  synchronization_scan
-    4  [+1]  Flag  hci_inquiry_response_notification_event
-    5  [+1]  Flag  generalized_interlaced_scan
-    6  [+1]  Flag  coarse_clock_adjustment
-    # 7: reserved for future use
-    8  [+1]  Flag  secure_connections_controller_support
-    9  [+1]  Flag  ping
-    10 [+1]  Flag  slot_availability_mask
-    11 [+1]  Flag  train_nudging
-
-
-bits LEFeatureSet:
-  0     [+1]  Flag  le_encryption
-  $next [+1]  Flag  connection_parameters_request_procedure
-  $next [+1]  Flag  extended_reject_indication
-  $next [+1]  Flag  peripheral_initiated_features_exchange
-  $next [+1]  Flag  le_ping
-  $next [+1]  Flag  le_data_packet_length_extension
-  $next [+1]  Flag  ll_privacy
-  $next [+1]  Flag  extended_scanning_filter_policies
-  $next [+1]  Flag  le_2m_phy
-  $next [+1]  Flag  stable_modulation_index_transmitter
-  $next [+1]  Flag  stable_modulation_index_receiver
-  $next [+1]  Flag  le_coded_phy
-  $next [+1]  Flag  le_extended_advertising
-  $next [+1]  Flag  le_periodic_advertising
-  $next [+1]  Flag  channel_selection_algorithm_2
-  $next [+1]  Flag  le_power_class_1
-  $next [+1]  Flag  minimum_number_of_used_channels_procedure
-  $next [+1]  Flag  connection_cte_request
-  $next [+1]  Flag  connection_cte_response
-  $next [+1]  Flag  connectionless_cte_transmitter
-  $next [+1]  Flag  connectionless_cte_receiver
-  $next [+1]  Flag  antenna_switching_during_cte_transmission
-  $next [+1]  Flag  antenna_switching_during_cte_reception
-  $next [+1]  Flag  receiving_constant_tone_extensions
-  $next [+1]  Flag  periodic_advertising_sync_transfer_sender
-  $next [+1]  Flag  periodic_advertising_sync_transfer_recipient
-  $next [+1]  Flag  sleep_clock_accuracy_updates
-  $next [+1]  Flag  remote_public_key_validation
-  $next [+1]  Flag  connected_isochronous_stream_central
-  $next [+1]  Flag  connected_isochronous_stream_peripheral
-  $next [+1]  Flag  isochronous_broadcaster
-  $next [+1]  Flag  synchronized_receiver
-  $next [+1]  Flag  connected_isochronous_stream_host_support
-  $next [+1]  Flag  le_power_control_request_1
-  $next [+1]  Flag  le_power_control_request_2
-    -- Previous two bits shall always have the same value.
-
-  $next [+1]  Flag  le_path_loss_monitoring
-  $next [+1]  Flag  periodic_advertising_adi_support
-  $next [+1]  Flag  connection_subrating
-  $next [+1]  Flag  connection_subrating_host_support
-  $next [+1]  Flag  channel_classification
-  $next [+1]  Flag  advertising_coding_selection
-  $next [+1]  Flag  advertising_coding_selection_host_support
-  $next [+1]  Flag  reserved  # Bit 42 is skipped
-  $next [+1]  Flag  periodic_advertising_with_responses_advertiser
-  $next [+1]  Flag  periodic_advertising_with_responses_scanner
-
-
-enum LEClockAccuracy:
-  -- Possible values that can be reported for the |central_clock_accuracy| and
-  -- |advertiser_clock_accuracy| parameters.
-  [maximum_bits: 8]
-  PPM_500 = 0x00
-  PPM_250 = 0x01
-  PPM_150 = 0x02
-  PPM_100 = 0x03
-  PPM_75  = 0x04
-  PPM_50  = 0x05
-  PPM_30  = 0x06
-  PPM_20  = 0x07
-
-
-bits LECISPHYOptions:
-  -- Identifies PHYs that can be used for transmission
-  0     [+1]  Flag  le_1m
-  $next [+1]  Flag  le_2m
-  $next [+1]  Flag  le_coded
-  $next [+5]  UInt  padding
-
-
-struct LESetCIGParametersCISOptions:
-  -- Parameters for the CISes defined in a LESetCIGParametersCommand
-
-  0     [+1]  UInt             cis_id
-    -- Used to identify a CIS
-    [requires: 0x00 <= this <= 0xEF]
-
-  $next [+2]  UInt             max_sdu_c_to_p
-    -- Maximum size, in octets, of the payload from the Central's Host
-    [requires: 0x0000 <= this <= 0x0FFF]
-
-  $next [+2]  UInt             max_sdu_p_to_c
-    -- Maximum size, in octets, of the payload from the Peripheral's Host
-    [requires: 0x0000 <= this <= 0x0FFF]
-
-  $next [+1]  LECISPHYOptions  phy_c_to_p
-    -- Identifies which PHY to use for transmission from the Central to the Peripheral
-
-  $next [+1]  LECISPHYOptions  phy_p_to_c
-    -- Identifies which PHY to use for transmission from the Peripheral to the Central
-
-  $next [+1]  UInt             rtn_c_to_p
-    -- Number of times every CIS Data PDU should be retransmitted from the Central to the
-    -- Peripheral
-
-  $next [+1]  UInt             rtn_p_to_c
-    -- Number of times every CIS Data PDU should be retransmitted from the Peripheral to the
-    -- Central
-
-
-enum LESleepClockAccuracyRange:
-  -- Accuracy of the sleep clock, provided as a range
-  [maximum_bits: 8]
-  PPM_251_TO_500 = 0x00
-  PPM_151_TO_250 = 0x01
-  PPM_101_TO_150 = 0x02
-  PPM_76_TO_100  = 0x03
-  PPM_51_TO_75   = 0x04
-  PPM_31_TO_50   = 0x05
-  PPM_21_TO_30   = 0x06
-  PPM_0_TO_20    = 0x07
-
-
-enum LECISPacking:
-  -- Preferred method of arranging subevents of multiple CISes
-  [maximum_bits: 8]
-  SEQUENTIAL  = 0x00
-  INTERLEAVED = 0x01
-
-
-enum LECISFraming:
-  -- Format of CIS Data PDUs
-  [maximum_bits: 8]
-  UNFRAMED = 0x00
-  FRAMED   = 0x01
-
-
-enum DataPathDirection:
-  [maximum_bits: 8]
-  INPUT  = 0x00
-    -- Host to Controller
-
-  OUTPUT = 0x01
-    -- Controller to Host
-
-# ========================= HCI packet headers ==========================
-
-
-bits OpCodeBits:
-  # Emboss currently lacks support for default field values and cross-type integral equality.
-  # (https://github.com/google/emboss/issues/21)
-  # (https://github.com/google/emboss/issues/23)
-  # Upon the addition of these features, we will transition OpCodeBits to be a parameterized
-  # field which defaults for each HCI packet type to its corresponding OpCode.
-  0     [+10]  UInt  ocf
-  $next [+6]   UInt  ogf
-
-
-struct CommandHeader:
-  -- HCI Command packet header.
-  0     [+2]  OpCodeBits  opcode
-  $next [+1]  UInt        parameter_total_size
-
-
-struct EventHeader:
-  -- HCI Event packet header.
-  0     [+1]  UInt  event_code
-  $next [+1]  UInt  parameter_total_size
-
-# ========================= HCI Command packets =========================
-# Core Spec v5.3 Vol 4, Part E, Section 7
-
-# ========== 7.1 Link Control Commands ==========
-
-
-struct InquiryCommand:
-  -- 7.1.1 Inquiry command (v1.1) (BR/EDR)
-  -- HCI_Inquiry
-  --
-  -- Note: NO Command Complete; Sends Inquiry Complete at the end of the
-  -- inquiry to indicate it's completion. No Inquiry Complete event is sent if
-  -- Inquiry is cancelled.
-
-  let hdr_size = CommandHeader.$size_in_bytes
-
-  0     [+hdr_size]  CommandHeader      header
-
-  $next [+3]         InquiryAccessCode  lap
-    -- LAP (Lower Address Part)
-    -- In the range 0x9E8B00 - 0x9E8B3F, defined by the Bluetooth SIG in
-    -- Baseband Assigned Numbers.
-
-  $next [+1]         UInt               inquiry_length
-    -- Time before the inquiry is halted. Defined in 1.28s units.
-    -- Range: 0x01 to kInquiryLengthMax in hci_constants.h
-
-  $next [+1]         UInt               num_responses
-    -- Maximum number of responses before inquiry is halted.
-    -- Set to 0x00 for unlimited.
-
-
-struct InquiryCancelCommand:
-  -- 7.1.2 Inquiry Cancel command (v1.1) (BR/EDR)
-  -- HCI_Inquiry_Cancel
-  --
-  -- No command parameters
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-# 7.1.3 Periodic Inquiry Mode command
-# HCI_Periodic_Inquiry_Mode
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.1.4 Exit Periodic Inquiry Mode command
-# HCI_Exit_Periodic_Inquiry_Mode
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct CreateConnectionCommand:
-  -- 7.1.5 Create Connection (v1.1) (BR/EDR)
-  -- HCI_Create_Connection
-  --
-  -- NOTE on ReturnParams: No Command Complete event will be sent by the
-  -- Controller to indicate that this command has been completed. Instead, the
-  -- Connection Complete event will indicate that this command has been
-  -- completed.
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader           header
-  $next [+BdAddr.$size_in_bytes]  BdAddr                  bd_addr
-    -- BD_ADDR of the device to be connected
-
-  $next [+2]                      PacketType              packet_type
-    -- Mask of allowable packet types.
-
-  $next [+1]                      PageScanRepetitionMode  page_scan_repetition_mode
-    -- The Page Scan Repetition Mode of the remote device as retrieved by Inquiry.
-
-  $next [+1]                      UInt                    reserved
-    [requires: this == 0]
-
-  $next [+2]                      ClockOffset             clock_offset
-    -- Clock Offset. The lower 15 bits are set to the clock offset as retrieved
-    -- by an Inquiry. The highest bit is set to 1 if the rest of this parameter
-    -- is valid.
-
-  $next [+1]                      GenericEnableParam      allow_role_switch
-    -- Allow Role Switch.
-    -- Allowed values:
-    --  0x00 - No role switch allowed, this device will be the central
-    --  0x01 - Role switch allowed, this device may become peripheral during
-    --  connection setup
-
-
-struct DisconnectCommand:
-  -- 7.1.6 Disconnect command (v1.1) (BR/EDR & LE)
-  -- HCI_Disconnect
-  --
-  -- NOTE on ReturnParams: No Command Complete event will be sent by the
-  -- Controller to indicate that this command has been completed. Instead, the
-  -- Disconnection Complete event will indicate that this command has been
-  -- completed.
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           connection_handle
-    -- Connection_Handle (only the lower 12-bits are meaningful).
-    --   Range: 0x0000 to 0x0EFF
-
-  $next [+1]         StatusCode     reason
-    -- Reason for the disconnect.
-
-
-struct CreateConnectionCancelCommand:
-  -- 7.1.7 Create Connection Cancel command (v1.1) (BR/EDR)
-  -- HCI_Create_Connection_Cancel
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader  header
-  $next [+BdAddr.$size_in_bytes]  BdAddr         bd_addr
-    -- BD_ADDR of the Create Connection Command request
-
-
-struct AcceptConnectionRequestCommand:
-  -- 7.1.8 Accept Connection Request command (v1.1) (BR/EDR)
-  -- HCI_Accept_Connection_Request
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader   header
-  $next [+BdAddr.$size_in_bytes]  BdAddr          bd_addr
-    -- The 48-bit BD_ADDR of the remote device requesting the connection.
-
-  $next [+1]                      ConnectionRole  role
-
-
-struct RejectConnectionRequestCommand:
-  -- 7.1.9 Reject Connection Request command (v1.1) (BR/EDR)
-  -- HCI_Reject_Connection_Request
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader  header
-  $next [+BdAddr.$size_in_bytes]  BdAddr         bd_addr
-    -- The 48-bit BD_ADDR of the remote device requesting the connection.
-
-  $next [+1]                      StatusCode     reason
-    -- Must be one of CONNECTION_REJECTED* from StatusCode in this file
-
-
-struct LinkKey:
-  0 [+16]  UInt:8[16]  value
-
-
-struct LinkKeyRequestReplyCommand:
-  -- 7.1.10 Link Key Request Reply command (v1.1) (BR/EDR)
-  -- HCI_Link_Key_Request_Reply
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader  header
-  $next [+BdAddr.$size_in_bytes]  BdAddr         bd_addr
-    -- The 48-bit BD_ADDR of the remote device requesting the connection.
-
-  let bredr_link_key_size = LinkKey.$size_in_bytes
-  $next [+bredr_link_key_size]    LinkKey        link_key
-    -- Link key to use for the connection with the peer device.
-
-
-struct LinkKeyRequestNegativeReplyCommand:
-  -- 7.1.11 Link Key Request Negative Reply command (v1.1) (BR/EDR)
-  -- HCI_Link_Key_Request_Negative_Reply
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader  header
-  $next [+BdAddr.$size_in_bytes]  BdAddr         bd_addr
-    -- BD_ADDR of the peer device that the host does not have a link key for.
-
-# 7.1.12 PIN Code Request Reply command
-# HCI_PIN_Code_Request_Reply
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.1.13 PIN Code Request Negative Reply command
-# HCI_PIN_Code_Request_Negative_Reply
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.1.14 Change Connection Packet Type command
-# HCI_Change_Connection_Packet_Type
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct AuthenticationRequestedCommand:
-  -- 7.1.15 Authentication Requested command (v1.1) (BR/EDR)
-  -- HCI_Authentication_Requested
-  --
-  -- NOTE on ReturnParams: No Command Complete event will be sent by the
-  -- Controller to indicate that this command has been completed. Instead, the
-  -- Authentication Complete event will indicate that this command has been
-  -- completed.
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           connection_handle
-    -- Connection_Handle (only the lower 12-bits are meaningful).
-    --   Range: 0x0000 to 0x0EFF
-    -- Must be the handle of a connected ACL-U logical link.
-
-
-struct SetConnectionEncryptionCommand:
-  -- 7.1.16 Set Connection Encryption command (v1.1) (BR/EDR)
-  -- HCI_Set_Connection_Encryption
-  --
-  -- NOTE on ReturnParams: No Command Complete event will be sent by the
-  -- Controller to indicate that this command has been completed. Instead, the
-  -- Encryption Change event will indicate that this command has been completed.
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader       header
-  $next [+2]         UInt                connection_handle
-    -- Connection_Handle (only the lower 12-bits are meaningful).
-    --   Range: 0x0000 to 0x0EFF
-    -- Must be the handle of a connected ACL-U logical link.
-
-  $next [+1]         GenericEnableParam  encryption_enable
-    -- Whether link level encryption should be turned on or off.
-
-# 7.1.17 Change Connection Link Key command
-# HCI_Change_Connection_Link_Key
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.1.18 Link Key Selection command
-# HCI_Link_Key_Selection
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct RemoteNameRequestCommand:
-  -- 7.1.19 Remote Name Request command (v1.1) (BR/EDR)
-  -- HCI_Remote_Name_Request
-  --
-  -- NOTE on ReturnParams: No Command Complete event will be sent by the
-  -- Controller to indicate that this command has been completed. Instead, the
-  -- Remote Name Request Complete event will indicate that this command has been
-  -- completed.
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader           header
-  $next [+BdAddr.$size_in_bytes]  BdAddr                  bd_addr
-    -- Address of the device whose name is to be requested.
-
-  $next [+1]                      PageScanRepetitionMode  page_scan_repetition_mode
-    -- Page Scan Repetition Mode of the device, obtained by Inquiry.
-
-  $next [+1]                      UInt                    reserved
-    [requires: this == 0]
-
-  $next [+2]                      ClockOffset             clock_offset
-    -- Clock offset.  The lower 15 bits of this represent bits 16-2
-    -- of CLKNPeripheral-CLK, and the highest bit is set when the other
-    -- bits are valid.
-
-# 7.1.20 Remote Name Request Cancel command
-# HCI_Remote_Name_Request_Cancel
-
-
-struct ReadRemoteSupportedFeaturesCommand:
-  -- 7.1.21 Read Remote Supported Features command (v1.1) (BR/EDR)
-  -- HCI_Read_Remote_Supported_Features
-  --
-  -- NOTE on ReturnParams: No Command Complete event will be sent by the
-  -- Controller to indicate that this command has been completed. Instead, the
-  -- Read Remote Supported Features Complete event will indicate that this
-  -- command has been completed.
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           connection_handle
-    -- Connection_Handle (only the lower 12-bits are meaningful).
-    --   Range: 0x0000 to 0x0EFF
-    -- Must be the handle of a connected ACL-U logical link.
-
-
-struct ReadRemoteExtendedFeaturesCommand:
-  -- 7.1.22 Read Remote Extended Features command (v1.2) (BR/EDR)
-  -- HCI_Read_Remote_Extended_Features
-  --
-  -- NOTE on ReturnParams: No Command Complete event will be sent by the
-  -- Controller to indicate that this command has been completed. Instead, the
-  -- Read Remote Extended Features Complete event will indicate that this
-  -- command has been completed.
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           connection_handle
-    -- Connection_Handle (only the lower 12-bits are meaningful).
-    --   Range: 0x0000 to 0x0EFF
-    -- Must be the handle of a connected ACL-U logical link.
-
-  $next [+1]         UInt           page_number
-    -- Page of features to read.
-    -- Values:
-    --  - 0x00 standard features as if requested by Read Remote Supported Features
-    --  - 0x01-0xFF the corresponding features page (see Vol 2, Part C, Sec 3.3).
-
-
-struct ReadRemoteVersionInfoCommand:
-  -- 7.1.23 Read Remote Version Information command (v1.1) (BR/EDR & LE)
-  -- HCI_Read_Remote_Version_Information
-  --
-  -- NOTE on ReturnParams: No Command Complete event will be sent by the
-  -- Controller to indicate that this command has been completed. Instead, the
-  -- Read Remote Version Information Complete event will indicate that this
-  -- command has been completed.
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           connection_handle
-    -- Connection_Handle (only the lower 12-bits are meaningful).
-    --   Range: 0x0000 to 0x0EFF
-
-# 7.1.24 Read Clock Offset command
-# HCI_Read_Clock_Offset
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.1.25 Read LMP Handle command
-# HCI_Read_LMP_Handle
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.1.26 Setup Synchronous Connection command
-# HCI_Setup_Synchronous_Connection
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.1.27 Accept Synchronous Connection Request command
-# HCI_Accept_Synchronous_Connection_Request
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct RejectSynchronousConnectionRequestCommand:
-  -- 7.1.28 Reject Synchronous Connection command (BR/EDR)
-  -- HCI_Reject_Synchronous_Connection_Request
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader  header
-  $next [+BdAddr.$size_in_bytes]  BdAddr         bd_addr
-    -- Address of the remote device that sent the request.
-
-  $next [+1]                      StatusCode     reason
-    -- Reason the connection request was rejected.
-
-
-struct IoCapabilityRequestReplyCommand:
-  -- 7.1.29 IO Capability Request Reply command (v2.1 + EDR) (BR/EDR)
-  -- HCI_IO_Capability_Request_Reply
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader               header
-  $next [+BdAddr.$size_in_bytes]  BdAddr                      bd_addr
-    -- The BD_ADDR of the remote device involved in simple pairing process
-
-  $next [+1]                      IoCapability                io_capability
-    -- The IO capabilities of this device.
-
-  $next [+1]                      OobDataPresent              oob_data_present
-    -- Whether there is out-of-band data present, and what type.
-
-  $next [+1]                      AuthenticationRequirements  authentication_requirements
-    -- Authentication requirements of the host.
-
-
-struct UserConfirmationRequestReplyCommand:
-  -- 7.1.30 User Confirmation Request Reply command (v2.1 + EDR) (BR/EDR)
-  -- HCI_User_Confirmation_Request_Reply
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader  header
-  $next [+BdAddr.$size_in_bytes]  BdAddr         bd_addr
-    -- The BD_ADDR of the remote device involved in simple pairing process
-
-
-struct UserConfirmationRequestNegativeReplyCommand:
-  -- 7.1.31 User Confirmation Request Negative Reply command (v2.1 + EDR) (BR/EDR)
-  -- HCI_User_Confirmation_Request_Negative_Reply
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader  header
-  $next [+BdAddr.$size_in_bytes]  BdAddr         bd_addr
-    -- The BD_ADDR of the remote device involved in simple pairing process
-
-
-struct UserPasskeyRequestReplyCommand:
-  -- 7.1.32 User Passkey Request Reply command (v2.1 + EDR) (BR/EDR)
-  -- HCI_User_Passkey_Request_Reply
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader  header
-  $next [+BdAddr.$size_in_bytes]  BdAddr         bd_addr
-    -- The BD_ADDR of the remote device involved in simple pairing process
-
-  $next [+4]                      UInt           numeric_value
-    -- Numeric value (passkey) entered by user.
-    [requires: 0 <= this <= 999999]
-
-
-struct UserPasskeyRequestNegativeReplyCommand:
-  -- 7.1.33 User Passkey Request Negative Reply command (v2.1 + EDR) (BR/EDR)
-  -- HCI_User_Passkey_Request_Negative_Reply
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader  header
-  $next [+BdAddr.$size_in_bytes]  BdAddr         bd_addr
-    -- The BD_ADDR of the remote device involved in the simple pairing process.
-
-# 7.1.34 Remote OOB Data Request Reply command
-# HCI_Remote_OOB_Data_Request_Reply
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.1.35 Remote OOB Data Request Negative Reply command
-# HCI_Remote_OOB_Data_Request_Negative_Reply
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct IoCapabilityRequestNegativeReplyCommand:
-  -- 7.1.36 IO Capability Request Negative Reply command (v2.1 + EDR) (BR/EDR)
-  -- HCI_IO_Capability_Request_Negative_Reply
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader  header
-  $next [+BdAddr.$size_in_bytes]  BdAddr         bd_addr
-    -- The BD_ADDR of the remote device involved in the simple pairing process.
-
-  $next [+1]                      StatusCode     reason
-    -- Reason that Simple Pairing was rejected. See 7.1.36 for valid error codes.
-
-
-struct CodecId:
-  0     [+1]  CodingFormat  coding_format
-  $next [+2]  UInt          company_id
-    -- See assigned numbers.
-
-  $next [+2]  UInt          vendor_codec_id
-    -- Shall be ignored if |coding_format| is not VENDOR_SPECIFIC.
-
-
-struct SynchronousConnectionParameters:
-  -- Enhanced Setup Synchronous Connection Command (CSA2) (BR/EDR)
-
-  struct VendorCodingFormat:
-    0     [+1]  CodingFormat  coding_format
-    $next [+2]  UInt          company_id
-      -- See assigned numbers.
-
-    $next [+2]  UInt          vendor_codec_id
-      -- Shall be ignored if |coding_format| is not VENDOR_SPECIFIC.
-
-  enum ScoRetransmissionEffort:
-    [maximum_bits: 8]
-    NONE              = 0x00
-      -- SCO or eSCO
-
-    POWER_OPTIMIZED   = 0x01
-      -- eSCO only
-
-    QUALITY_OPTIMIZED = 0x02
-      -- eSCO only
-
-    DONT_CARE         = 0xFF
-      -- SCO or eSCO
-
-  0     [+4]         UInt                     transmit_bandwidth
-    -- Transmit bandwidth in octets per second.
-
-  $next [+4]         UInt                     receive_bandwidth
-    -- Receive bandwidth in octets per second.
-
-  let vcf_size = VendorCodingFormat.$size_in_bytes
-
-  $next [+vcf_size]  VendorCodingFormat       transmit_coding_format
-    -- Local Controller -> Remote Controller coding format.
-
-  $next [+vcf_size]  VendorCodingFormat       receive_coding_format
-    -- Remote Controller -> Local Controller coding format.
-
-  $next [+2]         UInt                     transmit_codec_frame_size_bytes
-
-  $next [+2]         UInt                     receive_codec_frame_size_bytes
-
-  $next [+4]         UInt                     input_bandwidth
-    -- Host->Controller data rate in octets per second.
-
-  $next [+4]         UInt                     output_bandwidth
-    -- Controller->Host data rate in octets per second.
-
-  $next [+vcf_size]  VendorCodingFormat       input_coding_format
-    -- Host->Controller coding format.
-
-  $next [+vcf_size]  VendorCodingFormat       output_coding_format
-    -- Controller->Host coding format.
-
-  $next [+2]         UInt                     input_coded_data_size_bits
-    -- Size, in bits, of the sample or framed data.
-
-  $next [+2]         UInt                     output_coded_data_size_bits
-    -- Size, in bits, of the sample or framed data.
-
-  $next [+1]         PcmDataFormat            input_pcm_data_format
-
-  $next [+1]         PcmDataFormat            output_pcm_data_format
-
-  $next [+1]         UInt                     input_pcm_sample_payload_msb_position
-    -- The number of bit positions within an audio sample that the MSB of
-    -- the sample is away from starting at the MSB of the data.
-
-  $next [+1]         UInt                     output_pcm_sample_payload_msb_position
-    -- The number of bit positions within an audio sample that the MSB of
-    -- the sample is away from starting at the MSB of the data.
-
-  $next [+1]         ScoDataPath              input_data_path
-
-  $next [+1]         ScoDataPath              output_data_path
-
-  $next [+1]         UInt                     input_transport_unit_size_bits
-    -- The number of bits in each unit of data received from the Host over the audio data transport.
-    -- 0 indicates "not applicable"  (implied by the choice of audio data transport).
-
-  $next [+1]         UInt                     output_transport_unit_size_bits
-    -- The number of bits in each unit of data sent to the Host over the audio data transport.
-    -- 0 indicates "not applicable"  (implied by the choice of audio data transport).
-
-  $next [+2]         UInt                     max_latency_ms
-    -- The value in milliseconds representing the upper limit of the sum of
-    -- the synchronous interval, and the size of the eSCO window, where the
-    -- eSCO window is the reserved slots plus the retransmission window.
-    -- Minimum: 0x0004
-    -- Don't care: 0xFFFF
-
-  $next [+2]         ScoPacketType            packet_types
-    -- Bitmask of allowed packet types.
-
-  $next [+1]         ScoRetransmissionEffort  retransmission_effort
-
-
-struct EnhancedSetupSynchronousConnectionCommand:
-  -- 7.1.45 Enhanced Setup Synchronous Connection command
-  -- HCI_Enhanced_Setup_Synchronous_Connection
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader                    header
-  $next [+2]         UInt                             connection_handle
-    -- The connection handle of the associated ACL link if creating a new (e)SCO connection, or the
-    -- handle of an existing eSCO link if updating connection parameters.
-
-  let scp_size = SynchronousConnectionParameters.$size_in_bytes
-  $next [+scp_size]  SynchronousConnectionParameters  connection_parameters
-
-
-struct EnhancedAcceptSynchronousConnectionRequestCommand:
-  -- 7.1.46 Enhanced Accept Synchronous Connection Request command (CSA2) (BR/EDR)
-  -- HCI_Enhanced_Accept_Synchronous_Connection_Request
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader                    header
-  $next [+BdAddr.$size_in_bytes]  BdAddr                           bd_addr
-    -- The 48-bit BD_ADDR of the remote device requesting the connection.
-
-  let scp_size = SynchronousConnectionParameters.$size_in_bytes
-  $next [+scp_size]               SynchronousConnectionParameters  connection_parameters
-
-# 7.1.47 Truncated Page command
-# HCI_Truncated_Page
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.1.48 Truncated Page Cancel command
-# HCI_Truncated_Page_Cancel
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.1.49 Set Connectionless Peripheral Broadcast command
-# HCI_Set_Connectionless_Peripheral_Broadcast
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.1.50 Set Connectionless Peripheral Broadcast Receive command
-# HCI_Set_Connectionless_Peripheral_Broadcast_Receive
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.1.51 Start Synchronization Train command
-# HCI_Start_Synchronization_Train
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.1.52 Receive Synchronization Train command
-# HCI_Receive_Synchronization_Train
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.1.53 Remote OOB Extended Data Request Reply command
-# HCI_Remote_OOB_Extended_Data_Request_Reply
-# TODO: b/265052417 - Definition needs to be added
-
-
-# ========== 7.3 Controller & Baseband Commands ==========
-
-
-struct SetEventMaskCommand:
-  -- 7.3.1 Set Event Mask command (v1.1)
-  -- HCI_Set_Event_Mask
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+8]         UInt           event_mask
-    -- 64-bit Bit mask used to control which HCI events are generated by the HCI for the
-    -- Host. See enum class EventMask in hci_constants.h
-
-
-struct ResetCommand:
-  -- 7.3.2 Reset command (v1.1)
-  -- HCI_Reset
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-# 7.3.3 Set Event Filter command
-# HCI_Set_Event_Filter
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.4 Flush command
-# HCI_Flush
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.5 Read PIN Type command
-# HCI_Read_PIN_Type
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.6 Write PIN Type command
-# HCI_Write_PIN_Type
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.8 Read Stored Link Key command
-# HCI_Read_Stored_Link_Key
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.9 Write Stored Link Key command
-# HCI_Write_Stored_Link_Key
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.10 Delete Stored Link Key command
-# HCI_Delete_Stored_Link_Key
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct WriteLocalNameCommand:
-  -- 7.3.11 Write Local Name command (v1.1) (BR/EDR)
-  -- HCI_Write_Local_Name
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]         CommandHeader  header
-  let local_name_size = LocalName.$size_in_bytes
-  $next [+local_name_size]  LocalName      local_name
-    -- A UTF-8 encoded User Friendly Descriptive Name for the device.
-    -- If the name contained in the parameter is shorter than 248 octets, the end
-    -- of the name is indicated by a NULL octet (0x00), and the following octets
-    -- (to fill up 248 octets, which is the length of the parameter) do not have
-    -- valid values.
-
-
-struct ReadLocalNameCommand:
-  -- 7.3.12 Read Local Name command (v1.1) (BR/EDR)
-  -- HCI_Read_Local_Name
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-# 7.3.13 Read Connection Accept Timeout command
-# HCI_Read_Connection_Accept_Timeout
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.14 Write Connection Accept Timeout command
-# HCI_Write_Connection_Accept_Timeout
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.15 Read Page Timeout command
-# HCI_Read_Page_Timeout
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct WritePageTimeoutCommand:
-  -- 7.3.16 Write Page Timeout command (v1.1) (BR/EDR)
-  -- HCI_Write_Page_Timeout
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           page_timeout
-    -- Page_Timeout, in time slices (0.625 ms)
-    -- Range: From MIN to MAX in PageTimeout in this file
-    [requires: 0x0001 <= this <= 0xFFFF]
-
-
-struct ReadScanEnableCommand:
-  -- 7.3.17 Read Scan Enable command (v1.1) (BR/EDR)
-  -- HCI_Read_Scan_Enable
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct WriteScanEnableCommand:
-  -- 7.3.18 Write Scan Enable command (v1.1) (BR/EDR)
-  -- HCI_Write_Scan_Enable
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader   header
-  $next [+1]         ScanEnableBits  scan_enable
-    -- Bit Mask of enabled scans. See enum class ScanEnableBits in this file
-    -- for how to construct this bitfield.
-
-
-struct ReadPageScanActivityCommand:
-  -- 7.3.19 Read Page Scan Activity command (v1.1) (BR/EDR)
-  -- HCI_Read_Page_Scan_Activity
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct WritePageScanActivityCommand:
-  -- 7.3.20 Write Page Scan Activity command (v1.1) (BR/EDR)
-  -- HCI_Write_Page_Scan_Activity
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           page_scan_interval
-    -- Page_Scan_Interval, in time slices (0.625ms)
-    -- Valid Range: MIN - MAX in ScanInterval in this file
-    [requires: 0x0012 <= this <= 0x1000]
-
-  $next [+2]         UInt           page_scan_window
-    -- Page_Scan_Window, in time slices
-    -- Valid Range: MIN - MAX in ScanWindow in this file
-    [requires: 0x0011 <= this <= 0x1000]
-
-
-struct ReadInquiryScanActivityCommand:
-  -- 7.3.21 Read Inquiry Scan Activity command (v1.1) (BR/EDR)
-  -- HCI_Read_Inquiry_Scan_Activity
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct WriteInquiryScanActivityCommand:
-  -- 7.3.22 Write Inquiry Scan Activity command (v1.1) (BR/EDR)
-  -- HCI_Write_Inquiry_Scan_Activity
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           inquiry_scan_interval
-    -- Inquiry_Scan_Interval, in time slices (0.625ms)
-    -- Valid Range: MIN - MAX in ScanInterval in this file
-    [requires: 0x0012 <= this <= 0x1000]
-
-  $next [+2]         UInt           inquiry_scan_window
-    -- Inquiry_Scan_Window, in time slices
-    -- Valid Range: MIN - MAX in ScanWindow in this file
-    [requires: 0x0011 <= this <= 0x1000]
-
-# 7.3.23 Read Authentication Enable command
-# HCI_Read_Authentication_Enable
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.24 Write Authentication Enable command
-# HCI_Write_Authentication_Enable
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct ReadClassOfDeviceCommand:
-  -- 7.3.25 Read Class of Device command (v1.1) (BR/EDR)
-  -- HCI_Read_Class_Of_Device
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct WriteClassOfDeviceCommand:
-  -- 7.3.26 Write Class Of Device command (v1.1) (BR/EDR)
-  -- HCI_Write_Class_Of_Device
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+3]         ClassOfDevice  class_of_device
-
-# 7.3.27 Read Voice Setting command
-# HCI_Read_Voice_Setting
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.28 Write Voice Setting command
-# HCI_Write_Voice_Setting
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.29 Read Automatic Flush Timeout command
-# HCI_Read_Automatic_Flush_Timeout
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct WriteAutomaticFlushTimeoutCommand:
-  -- 7.3.30 Write Automatic Flush Timeout command (v1.1) (BR/EDR)
-  -- HCI_Write_Automatic_Flush_Timeout
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           connection_handle
-    -- Connection_Handle (only the lower 12-bits are meaningful).
-    --   Range: 0x0000 to 0x0EFF
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-  $next [+2]         UInt           flush_timeout
-    -- The value for the Flush_Timeout configuration parameter (Core Spec v5.2, Vol 4, Part E, Sec 6.19).
-    -- Range: 0x0000 to 0x07FF. 0x0000 indicates infinite flush timeout (no automatic flush).
-    -- Time = flush_timeout * 0.625ms.
-    -- Time Range: 0.625ms to 1279.375ms.
-    [requires: 0x0000 <= this <= 0x07FF]
-
-# 7.3.31 Read Num Broadcast Retransmissions command
-# HCI_Read_Num_Broadcast_Retransmissions
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.32 Write Num Broadcast Retransmissions command
-# HCI_Write_Num_Broadcast_Retransmissions
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.33 Read Hold Mode Activity command
-# HCI_Read_Hold_Mode_Activity
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.34 Write Hold Mode Activity command
-# HCI_Write_Hold_Mode_Activity
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.35 Read Transmit Power Level command
-# HCI_Read_Transmit_Power_Level
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.36 Read Synchronous Flow Control Enable command
-# HCI_Read_Synchronous_Flow_Control_Enable
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct WriteSynchronousFlowControlEnableCommand:
-  -- 7.3.37 Write Synchonous Flow Control Enable command (BR/EDR)
-  -- HCI_Write_Synchronous_Flow_Control_Enable
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader       header
-  $next [+1]         GenericEnableParam  synchronous_flow_control_enable
-    -- If enabled, HCI_Number_Of_Completed_Packets events shall be sent from the controller
-    -- for synchronous connection handles.
-
-# 7.3.38 Set Controller To Host Flow Control command
-# HCI_Set_Controller_To_Host_Flow_Control
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.39 Host Buffer Size command
-# HCI_Host_Buffer_Size
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.40 Host Number Of Completed Packets command
-# HCI_Host_Number_Of_Completed_Packets
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.41 Read Link Supervision Timeout command
-# HCI_Read_Link_Supervision_Timeout
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.42 Write Link Supervision Timeout command
-# HCI_Write_Link_Supervision_Timeout
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.43 Read Number Of Supported IAC command
-# HCI_Read_Number_Of_Supported_IAC
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.44 Read Current IAC LAP command
-# HCI_Read_Current_IAC_LAP
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.45 Write Current IAC LAP command
-# HCI_Write_Current_IAC_LAP
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.46 Set AFH Host Channel Classification command
-# HCI_Set_AFH_Host_Channel_Classification
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.47 Read Inquiry Scan Type command
-# HCI_Read_Inquiry_Scan_Type
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct WriteInquiryScanTypeCommand:
-  -- 7.3.48 Write Inquiry Scan Type (v1.2) (BR/EDR)
-  -- HCI_Write_Inquiry_Scan_Type
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader    header
-  $next [+1]         InquiryScanType  inquiry_scan_type
-    -- See enum class InquiryScanType in this file for possible values
-
-
-struct ReadInquiryModeCommand:
-  -- 7.3.49 Read Inquiry Mode (v1.2) (BR/EDR)
-  -- HCI_Read_Inquiry_Mode
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct WriteInquiryModeCommand:
-  -- 7.3.50 Write Inquiry Mode (v1.2) (BR/EDR)
-  -- HCI_Write_Inquiry_Mode
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+1]         InquiryMode    inquiry_mode
-
-
-struct ReadPageScanTypeCommand:
-  -- 7.3.51 Read Page Scan Type (v1.2) (BR/EDR)
-  -- HCI_Read_Page_Scan_Type
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct WritePageScanTypeCommand:
-  -- 7.3.52 Write Page Scan Type (v1.2) (BR/EDR)
-  -- HCI_Write_Page_Scan_Type
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+1]         PageScanType   page_scan_type
-
-# 7.3.53 Read AFH Channel Assessment Mode command
-# HCI_Read_AFH_Channel_Assessment_Mode
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.54 Write AFH Channel Assessment Mode command
-# HCI_Write_AFH_Channel_Assessment_Mode
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.55 Read Extended Inquiry Response command
-# HCI_Read_Extended_Inquiry_Response
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct WriteExtendedInquiryResponseCommand:
-  -- 7.3.56 Write Extended Inquiry Response (v1.2) (BR/EDR)
-  -- HCI_Write_Extended_Inquiry_Response
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader            header
-  $next [+1]         UInt                     fec_required
-    -- If FEC Encoding is required. (v1.2) (7.3.56)
-
-  let eir_size = ExtendedInquiryResponse.$size_in_bytes
-  $next [+eir_size]  ExtendedInquiryResponse  extended_inquiry_response
-    -- Extended inquiry response data as defined in Vol 3, Part C, Sec 8
-
-# 7.3.57 Refresh Encryption Key command
-# HCI_Refresh_Encryption_Key
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct ReadSimplePairingModeCommand:
-  -- 7.3.58 Read Simple Pairing Mode (v2.1 + EDR) (BR/EDR)
-  -- HCI_Read_Simple_Pairing_Mode
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct WriteSimplePairingModeCommand:
-  -- 7.3.59 Write Simple Pairing Mode (v2.1 + EDR) (BR/EDR)
-  -- HCI_Write_Simple_Pairing_Mode
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader       header
-  $next [+1]         GenericEnableParam  simple_pairing_mode
-
-# 7.3.60 Read Local OOB Data command
-# HCI_Read_Local_OOB_Data
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.61 Read Inquiry Response Transmit Power Level command
-# HCI_Read_Inquiry_Response_Transmit_Power_Level
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.62 Write Inquiry Transmit Power Level command
-# HCI_Write_Inquiry_Transmit_Power_Level
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.63 Send Keypress Notification command
-# HCI_Send_Keypress_Notification
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.64 Read Default Erroneous Data Reporting command
-# HCI_Read_Default_Erroneous_Data_Reporting
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.65 Write Default Erroneous Data Reporting command
-# HCI_Write_Default_Erroneous_Data_Reporting
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.66 Enhanced Flush command
-# HCI_Enhanced_Flush
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct SetEventMaskPage2Command:
-  -- 7.3.69 Set Event Mask Page 2 command (v3.0 + HS)
-  -- HCI_Set_Event_Mask_Page_2
-  0     [+CommandHeader.$size_in_bytes]  CommandHeader   header
-  $next [+8]  bits:
-    0     [+26]                          EventMaskPage2  event_mask_page_2
-      -- Bit mask used to control which HCI events are generated by the HCI for the Host.
-
-
-struct ReadFlowControlModeCommand:
-  -- 7.3.72 Read Flow Control Mode command (v3.0 + HS) (BR/EDR)
-  -- HCI_Read_Flow_Control_Mode
-  0 [+CommandHeader.$size_in_bytes]  CommandHeader  header
-
-
-struct WriteFlowControlModeCommand:
-  -- 7.3.73 Write Flow Control Mode command (v3.0 + HS) (BR/EDR)
-  -- HCI_Write_Flow_Control_Mode
-  0     [+CommandHeader.$size_in_bytes]  CommandHeader    header
-  $next [+1]                             FlowControlMode  flow_control_mode
-
-# 7.3.74 Read Enhanced Transmit Power Level command
-# HCI_Read_Enhanced_Transmit_Power_Level
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct ReadLEHostSupportCommand:
-  -- 7.3.78 Read LE Host Support command (v4.0) (BR/EDR)
-  -- HCI_Read_LE_Host_Support
-  0 [+CommandHeader.$size_in_bytes]  CommandHeader  header
-
-
-struct WriteLEHostSupportCommand:
-  -- 7.3.79 Write LE Host Support command (v4.0) (BR/EDR)
-  -- HCI_Write_LE_Host_Support
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader       header
-  $next [+1]         GenericEnableParam  le_supported_host
-    -- Sets the LE Supported (Host) Link Manager Protocol feature bit.
-
-  $next [+1]         UInt                unused
-    -- Core Spec v5.0, Vol 2, Part E, Section 6.35: This parameter was named
-    -- "Simultaneous_LE_Host" and the value is set to "disabled(0x00)" and
-    -- "shall be ignored".
-    -- Core Spec v5.3, Vol 4, Part E, Section 7.3.79: This parameter was renamed
-    -- to "Unused" and "shall be ignored by the controller".
-
-# 7.3.80 Set MWS Channel Parameters command
-# HCI_Set_MWS_Channel_Parameters
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.81 Set External Frame Configuration command
-# HCI_Set_External_Frame_Configuration
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.82 Set MWS Signaling command
-# HCI_Set_MWS_Signaling
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.83 Set MWS Transport Layer command
-# HCI_Set_MWS_Transport_Layer
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.84 Set MWS Scan Frequency Table command
-# HCI_Set_MWS_Scan_Frequency_Table
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.85 Set MWS_PATTERN Configuration command
-# HCI_Set_MWS_PATTERN_Configuration
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.86 Set Reserved LT_ADDR command
-# HCI_Set_Reserved_LT_ADDR
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.87 Delete Reserved LT_ADDR command
-# HCI_Delete_Reserved_LT_ADDR
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.88 Set Connectionless Peripheral Broadcast Data command
-# HCI_Set_Connectionless_Peripheral_Broadcast_Data
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.89 Read Synchronization Train Parameters command
-# HCI_Read_Synchronization_Train_Parameters
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.90 Write Synchronization Train Parameters command
-# HCI_Write_Synchronization_Train_Parameters
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.91 Read Secure Connections Host Support command
-# HCI_Read_Secure_Connections_Host_Support
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct WriteSecureConnectionsHostSupportCommand:
-  -- 7.3.92 Write Secure Connections Host Support command
-  -- HCI_Write_Secure_Connections_Host_Support
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader       header
-  $next [+1]         GenericEnableParam  secure_connections_host_support
-
-
-struct ReadAuthenticatedPayloadTimeoutCommand:
-  -- 7.3.93 Read Authenticated Payload Timeout command (v4.1) (BR/EDR & LE)
-  -- HCI_Read_Authenticated_Payload_Timeout
-  0     [+CommandHeader.$size_in_bytes]  CommandHeader  header
-  $next [+2]                             UInt           connection_handle
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-
-struct WriteAuthenticatedPayloadTimeoutCommand:
-  -- 7.3.94 Write Authenticated Payload Timeout command (v4.1) (BR/EDR & LE)
-  -- HCI_Write_Authenticated_Payload_Timeout
-  0     [+CommandHeader.$size_in_bytes]  CommandHeader  header
-  $next [+2]                             UInt           connection_handle
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-  $next [+2]                             UInt           authenticated_payload_timeout
-    -- Default = 0x0BB8 (30 s)
-    -- Time = N * 10 ms
-    -- Time Range: 10 ms to 655,350 ms
-    [requires: 0x0001 <= this <= 0xFFFF]
-
-# 7.3.95 Read Local OOB Extended Data command
-# HCI_Read_Local_OOB_Extended_Data
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.96 Read Extended Page Timeout command
-# HCI_Read_Extended_Page_Timeout
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.97 Write Extended Page Timeout command
-# HCI_Write_Extended_Page_Timeout
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.98 Read Extended Inquiry Length command
-# HCI_Read_Extended_Inquiry_Length
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.99 Write Extended Inquiry Length command
-# HCI_Write_Extended_Inquiry_Length
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.100 Set Ecosystem Base Interval command
-# HCI_Set_Ecosystem_Base_Interval
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.101 Configure Data Path command
-# HCI_Configure_Data_Path
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.3.102 Set Min Encryption Key Size command
-# HCI_Set_Min_Encryption_Key_size
-# TODO: b/265052417 - Definition needs to be added
-
-
-# ========== 7.4 Informational Parameters ==========
-
-
-struct ReadLocalVersionInformationCommand:
-  -- 7.4.1 Read Local Version Information command (v1.1)
-  -- HCI_Read_Local_Version_Information
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct ReadLocalSupportedCommandsCommand:
-  -- 7.4.2 Read Local Supported Commands command (v1.2)
-  -- HCI_Read_Local_Supported_Commands
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct ReadLocalSupportedFeaturesCommand:
-  -- 7.4.3 Read Local Supported Features command (v1.1)
-  -- HCI_Read_Local_Supported_Features
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct ReadLocalExtendedFeaturesCommand:
-  -- 7.4.4 Read Local Extended Features command (v1.2) (BR/EDR)
-  -- HCI_Read_Local_Extended_Features
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+1]         UInt           page_number
-    -- 0x00: Requests the normal LMP features as returned by
-    -- Read_Local_Supported_Features.
-    -- 0x01-0xFF: Return the corresponding page of features.
-
-
-struct ReadBufferSizeCommand:
-  -- 7.4.5 Read Buffer Size command (v1.1)
-  -- HCI_Read_Buffer_Size
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct ReadBdAddrCommand:
-  -- 7.4.6 Read BD_ADDR command (v1.1) (BR/EDR, LE)
-  -- HCI_Read_BD_ADDR
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-# 7.4.7 Read Data Block Size command
-# HCI_Read_Data_Block_Size
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.4.8 Read Local Supported Codecs command
-# HCI_Read_Local_Supported_Codecs [v1][v2]
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.4.9 Read Local Simple Pairing Options command
-# HCI_Read_Local_Simple_Pairing_Options
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.4.10 Read Local Supported Codec Capabilities command
-# HCI_Read_Local_Supported_Codec_Capabilities
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.4.11 Read Local Supported Controller Delay command
-# HCI_Read_Local_Supported_Controller_Delay
-# TODO: b/265052417 - Definition needs to be added
-
-
-# ========== 7.5 Status Parameters ==========
-
-
-struct ReadEncryptionKeySizeCommand:
-  -- 7.5.6 Read Encryption Key Size (v1.1) (BR/EDR)
-  -- HCI_Read_Encryption_Key_Size
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           connection_handle
-    -- Identifies an active ACL link (only the lower 12 bits are meaningful).
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-# ========== 7.8 LE Controller Commands ==========
-
-
-struct LESetEventMaskCommand:
-  -- 7.8.1 LE Set Event Mask command (v4.0) (LE)
-  -- HCI_LE_Set_Event_Mask
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+8]  bits:
-    0     [+35]      LEEventMask    le_event_mask
-      -- Bitmask that indicates which LE events are generated by the HCI for the Host.
-
-
-struct LEReadBufferSizeCommandV1:
-  -- 7.8.2 LE Read Buffer Size command [v1] (v4.0) (LE)
-  -- HCI_LE_Read_Buffer_Size [v1]
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct LEReadBufferSizeCommandV2:
-  -- 7.8.2 LE Read Buffer Size command [v2] (v5.2) (LE)
-  -- HCI_LE_Read_Buffer_Size [v2]
-  -- Version 2 of this command changed the opcode and added ISO return
-  -- parameters.
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct LEReadLocalSupportedFeaturesCommand:
-  -- 7.8.3 LE Read Local Supported Features command (v4.0) (LE)
-  -- HCI_LE_Read_Local_Supported_Features
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct LESetRandomAddressCommand:
-  -- 7.8.4 LE Set Random Address command (v4.0) (LE)
-  -- HCI_LE_Set_Random_Address
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader  header
-  $next [+BdAddr.$size_in_bytes]  BdAddr         random_address
-
-
-struct LESetAdvertisingParametersCommand:
-  -- 7.8.5 LE Set Advertising Parameters command (v4.0) (LE)
-  -- HCI_LE_Set_Advertising_Parameters
-
-  [requires: advertising_interval_min <= advertising_interval_max]
-
-  let hdr_size = CommandHeader.$size_in_bytes
-
-  0     [+hdr_size]               CommandHeader              header
-
-  $next [+2]                      UInt                       advertising_interval_min
-    -- Default: 0x0800 (1.28 s)
-    -- Time: N * 0.625 ms
-    -- Time Range: 20 ms to 10.24 s
-    [requires: 0x0020 <= this <= 0x4000]
-
-  $next [+2]                      UInt                       advertising_interval_max
-    -- Default: 0x0800 (1.28 s)
-    -- Time: N * 0.625 ms
-    -- Time Range: 20 ms to 10.24 s
-    [requires: 0x0020 <= this <= 0x4000]
-
-  $next [+1]                      LEAdvertisingType          adv_type
-    -- Used to determine the packet type that is used for advertising when
-    -- advertising is enabled.
-
-  $next [+1]                      LEOwnAddressType           own_address_type
-
-  $next [+1]                      LEPeerAddressType          peer_address_type
-    -- ANONYMOUS address type not allowed.
-
-  $next [+BdAddr.$size_in_bytes]  BdAddr                     peer_address
-    -- Public Device Address, Random Device Address, Public Identity Address, or
-    -- Random (static) Identity Address of the device to be connected.
-
-  $next [+1]  bits:
-
-    0     [+3]                    LEAdvertisingChannels      advertising_channel_map
-      -- Indicates the advertising channels that shall be used when transmitting
-      -- advertising packets. At least 1 channel must be enabled.
-      -- Default: all channels enabled
-
-  $next [+1]                      LEAdvertisingFilterPolicy  advertising_filter_policy
-    -- This parameter shall be ignored when directed advertising is enabled.
-
-
-struct LEReadAdvertisingChannelTxPowerCommand:
-  -- 7.8.6 LE Read Advertising Channel Tx Power command (v4.0) (LE)
-  -- HCI_LE_Read_Advertising_Channel_Tx_Power
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct LESetAdvertisingDataCommand:
-  -- 7.8.7 LE Set Advertising Data command (v4.0) (LE)
-  -- HCI_LE_Set_Advertising_Data
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+1]         UInt           advertising_data_length
-    -- The number of significant octets in `advertising_data`.
-    [requires: 0x00 <= this <= 0x1F]
-
-  $next [+31]        UInt:8[31]     advertising_data
-    -- 31 octets of advertising data formatted as defined in Core Spec
-    -- v5.3, Vol 3, Part C, Section 11.
-    -- Default: All octets zero
-
-
-struct LESetScanResponseDataCommand:
-  -- 7.8.8 LE Set Scan Response Data command (v4.0) (LE)
-  -- HCI_LE_Set_Scan_Response_Data
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+1]         UInt           scan_response_data_length
-    -- The number of significant octets in `scan_response_data`.
-    [requires: 0x00 <= this <= 0x1F]
-
-  $next [+31]        UInt:8[31]     scan_response_data
-    -- 31 octets of scan response data formatted as defined in Core Spec
-    -- v5.3, Vol 3, Part C, Section 11.
-    -- Default: All octets zero
-
-
-struct LESetAdvertisingEnableCommand:
-  -- 7.8.9 LE Set Advertising Enable command (v4.0) (LE)
-  -- HCI_LE_Set_Advertising_Enable
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader       header
-  $next [+1]         GenericEnableParam  advertising_enable
-
-
-struct LESetScanParametersCommand:
-  -- 7.8.10 LE Set Scan Parameters command (v4.0) (LE)
-  -- HCI_LE_Set_Scan_Parameters
-
-  [requires: le_scan_window <= le_scan_interval]
-
-  let hdr_size = CommandHeader.$size_in_bytes
-
-  0     [+hdr_size]  CommandHeader       header
-
-  $next [+1]         LEScanType          le_scan_type
-    -- Controls the type of scan to perform.
-
-  $next [+2]         UInt                le_scan_interval
-    -- Default: 0x0010 (10ms)
-    -- Time: N * 0.625 ms
-    -- Time Range: 2.5 ms to 10.24 s
-    [requires: 0x0004 <= this <= 0x4000]
-
-  $next [+2]         UInt                le_scan_window
-    -- Default: 0x0010 (10ms)
-    -- Time: N * 0.625 ms
-    -- Time Range: 2.5ms to 10.24 s
-    [requires: 0x0004 <= this <= 0x4000]
-
-  $next [+1]         LEOwnAddressType    own_address_type
-    -- The type of address being used in the scan request packets.
-
-  $next [+1]         LEScanFilterPolicy  scanning_filter_policy
-
-
-struct LESetScanEnableCommand:
-  -- 7.8.11 LE Set Scan Enable command (v4.0) (LE)
-  -- HCI_LE_Set_Scan_Enable
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader       header
-  $next [+1]         GenericEnableParam  le_scan_enable
-  $next [+1]         GenericEnableParam  filter_duplicates
-    -- Controls whether the Link Layer should filter out duplicate advertising
-    -- reports to the Host, or if the Link Layer should generate advertising
-    -- reports for each packet received. Ignored if le_scan_enable is set to
-    -- disabled.
-    -- See Core Spec v5.3, Vol 6, Part B, Section 4.4.3.5
-
-
-struct LECreateConnectionCommand:
-  -- 7.8.12 LE Create Connection command (v4.0) (LE)
-  -- HCI_LE_Create_Connection
-
-  [requires: le_scan_window <= le_scan_interval && connection_interval_min <= connection_interval_max]
-
-  let hdr_size = CommandHeader.$size_in_bytes
-
-  0     [+hdr_size]               CommandHeader       header
-
-  $next [+2]                      UInt                le_scan_interval
-    -- The time interval from when the Controller started the last LE scan until
-    -- it begins the subsequent LE scan.
-    -- Time: N * 0.625 ms
-    -- Time Range: 2.5 ms to 10.24 s
-    [requires: 0x0004 <= this <= 0x4000]
-
-  $next [+2]                      UInt                le_scan_window
-    -- Amount of time for the duration of the LE scan.
-    -- Time: N * 0.625 ms
-    -- Time Range: 2.5 ms to 10.24 s
-    [requires: 0x0004 <= this <= 0x4000]
-
-  $next [+1]                      GenericEnableParam  initiator_filter_policy
-
-  $next [+1]                      LEAddressType       peer_address_type
-
-  $next [+BdAddr.$size_in_bytes]  BdAddr              peer_address
-
-  $next [+1]                      LEOwnAddressType    own_address_type
-
-  $next [+2]                      UInt                connection_interval_min
-    -- Time: N * 1.25 ms
-    -- Time Range: 7.5 ms to 4 s.
-    [requires: 0x0006 <= this <= 0x0C80]
-
-  $next [+2]                      UInt                connection_interval_max
-    -- Time: N * 1.25 ms
-    -- Time Range: 7.5 ms to 4 s.
-    [requires: 0x0006 <= this <= 0x0C80]
-
-  $next [+2]                      UInt                max_latency
-    -- Maximum Peripheral latency for the connection in number of connection
-    -- events.
-    [requires: 0x0000 <= this <= 0x01F3]
-
-  $next [+2]                      UInt                supervision_timeout
-    -- See Core Spec v5.3, Vol 6, Part B, Section 4.5.2.
-    -- Time: N * 10 ms
-    -- Time Range: 100 ms to 32 s
-    [requires: 0x000A <= this <= 0x0C80]
-
-  $next [+2]                      UInt                min_connection_event_length
-    -- Time: N * 0.625 ms
-
-  $next [+2]                      UInt                max_connection_event_length
-    -- Time: N * 0.625 ms
-
-
-struct LECreateConnectionCancelCommand:
-  -- 7.8.13 LE Create Connection Cancel command (v4.0) (LE)
-  -- HCI_LE_Create_Connection_Cancel
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-# 7.8.14 LE Read Filter Accept List Size command
-# HCI_LE_Read_Filter_Accept_List_Size
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct LEClearFilterAcceptListCommand:
-  -- 7.8.15 LE Clear Filter Accept List command (v4.0) (LE)
-  -- HCI_LE_Clear_Filter_Accept_List
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct LEAddDeviceToFilterAcceptListCommand:
-  -- 7.8.16 LE Add Device To Filter Accept List command (v4.0) (LE)
-  -- HCI_LE_Add_Device_To_Filter_Accept_List
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader      header
-  $next [+1]                      LEPeerAddressType  address_type
-    -- The address type of the peer.
-
-  $next [+BdAddr.$size_in_bytes]  BdAddr             address
-    -- Public Device Address or Random Device Address of the device to be added
-    -- to the Filter Accept List. Ignored if `address_type` is ANONYMOUS.
-
-
-struct LERemoveDeviceFromFilterAcceptListCommand:
-  -- 7.8.17 LE Remove Device From Filter Accept List command (v4.0) (LE)
-  -- HCI_LE_Remove_Device_From_Filter_Accept_List
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader      header
-  $next [+1]                      LEPeerAddressType  address_type
-    -- The address type of the peer.
-
-  $next [+BdAddr.$size_in_bytes]  BdAddr             address
-    -- Public Device Address or Random Device Address of the device to be added
-    -- to the Filter Accept List. Ignored if `address_type` is ANONYMOUS.
-
-
-struct LEConnectionUpdateCommand:
-  -- 7.8.18 LE Connection Update command (v4.0) (LE)
-  -- HCI_LE_Connection_Update
-
-  [requires: connection_interval_min <= connection_interval_max && min_connection_event_length <= max_connection_event_length]
-
-  let hdr_size = CommandHeader.$size_in_bytes
-
-  0     [+hdr_size]  CommandHeader  header
-
-  $next [+2]         UInt           connection_handle
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-  $next [+2]         UInt           connection_interval_min
-    -- Time: N * 1.25 ms
-    -- Time Range: 7.5 ms to 4 s.
-    [requires: 0x0006 <= this <= 0x0C80]
-
-  $next [+2]         UInt           connection_interval_max
-    -- Time: N * 1.25 ms
-    -- Time Range: 7.5 ms to 4 s.
-    [requires: 0x0006 <= this <= 0x0C80]
-
-  $next [+2]         UInt           max_latency
-    -- Maximum Peripheral latency for the connection in number of subrated
-    -- connection events.
-    [requires: 0x0000 <= this <= 0x01F3]
-
-  $next [+2]         UInt           supervision_timeout
-    -- See Core Spec v5.3, Vol 6, Part B, Section 4.5.2.
-    -- Time: N * 10 ms
-    -- Time Range: 100 ms to 32 s
-    [requires: 0x000A <= this <= 0x0C80]
-
-  $next [+2]         UInt           min_connection_event_length
-    -- Time: N * 0.625 ms
-
-  $next [+2]         UInt           max_connection_event_length
-    -- Time: N * 0.625 ms
-
-# 7.8.19 LE Set Host Channel Classification command
-# HCI_LE_Set_Host_Channel_Classification
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.20 LE Read Channel Map command
-# HCI_LE_Read_Channel_Map
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct LEReadRemoteFeaturesCommand:
-  -- 7.8.21 LE Read Remote Features command (v4.0) (LE)
-  -- HCI_LE_Read_Remote_Features
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           connection_handle
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-# 7.8.22 LE Encrypt command
-# HCI_LE_Encrypt
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.23 LE Rand command
-# HCI_LE_Rand
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct LEEnableEncryptionCommand:
-  -- 7.8.24 LE Enable Encryption command (v4.0) (LE)
-  -- HCI_LE_Enable_Encryption
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]                CommandHeader  header
-  $next [+2]                       UInt           connection_handle
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-  $next [+8]                       UInt           random_number
-  $next [+2]                       UInt           encrypted_diversifier
-  $next [+LinkKey.$size_in_bytes]  LinkKey        long_term_key
-
-
-struct LELongTermKeyRequestReplyCommand:
-  -- 7.8.25 LE Long Term Key Request Reply command (v4.0) (LE)
-  -- HCI_LE_Long_Term_Key_Request_Reply
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]                CommandHeader  header
-  $next [+2]                       UInt           connection_handle
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-  $next [+LinkKey.$size_in_bytes]  LinkKey        long_term_key
-
-
-struct LELongTermKeyRequestNegativeReplyCommand:
-  -- 7.8.26 LE Long Term Key Request Negative Reply command (v4.0) (LE)
-  -- HCI_LE_Long_Term_Key_Request_Negative_Reply
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           connection_handle
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-
-struct LEReadSupportedStatesCommand:
-  -- 7.8.27 LE Read Supported States command (v4.0) (LE)
-  -- HCI_LE_Read_Supported_States
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-# 7.8.28 LE Receiver Test command
-# HCI_LE_Receiver_Test [v1] [v2] [v3]
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.29 LE Transmitter Test command
-# HCI_LE_Transmitter_Test [v1] [v2] [v3] [v4]
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.30 LE Test End command
-# HCI_LE_Test_End
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.31 LE Remote Connection Parameter Request Reply command
-# HCI_LE_Remote_Connection_Parameter_Request_Reply
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.32 LE Remote Connection Parameter Request Negative Reply command
-# HCI_LE_Remote_Connection_Parameter_Request_Negative_Reply
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.33 LE Set Data Length command
-# HCI_LE_Set_Data_Length
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.34 LE Read Suggested Default Data Length command
-# HCI_LE_Read_Suggested_Default_Data_Length
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.35 LE Write Suggested Default Data Length command
-# HCI_LE_Write_Suggested_Default_Data_Length
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.36 LE Read Local P-256 Public Key command
-# HCI_LE_Read_Local_P-256_Public_Key
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.37 LE Generate DHKey command
-# HCI_LE_Generate_DHKey [v1] [v2]
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.38 LE Add Device To Resolving List command
-# HCI_LE_Add_Device_To_Resolving_List
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.39 LE Remove Device From Resolving List command
-# HCI_LE_Remove_Device_From_Resolving_List
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct LEClearResolvingListCommand:
-  -- 7.8.40 LE Clear Resolving List command (v4.2) (LE)
-  -- HCI_LE_Clear_Resolving_List
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-# 7.8.41 LE Read Resolving List Size command
-# HCI_LE_Read_Resolving_List_Size
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.42 LE Read Peer Resolvable Address command
-# HCI_LE_Read_Peer_Resolvable_Address
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.43 LE Read Local Resolvable Address command
-# HCI_LE_Read_Local_Resolvable_Address
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct LESetAddressResolutionEnableCommand:
-  -- 7.8.44 LE Set Address Resolution Enable command (v4.2) (LE)
-  -- HCI_LE_Set_Address_Resolution_Enable
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader       header
-  $next [+1]         GenericEnableParam  address_resolution_enable
-
-# 7.8.45 LE Set Resolvable Private Address Timeout command
-# HCI_LE_Set_Resolvable_Private_Address_Timeout
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.46 LE Read Maximum Data Length command
-# HCI_LE_Read_Maximum_Data_Length
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.47 LE Read PHY command
-# HCI_LE_Read_PHY
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.48 LE Set Default PHY command
-# HCI_LE_Set_Default_PHY
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.49 LE Set PHY command
-# HCI_LE_Set_PHY
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct LESetAdvertisingSetRandomAddressCommand:
-  -- 7.8.52 LE Set Advertising Set Random Address command (v5.0) (LE)
-  -- HCI_LE_Set_Advertising_Set_Random_Address
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader  header
-  $next [+1]                      UInt           advertising_handle
-    -- Handle used to identify an advertising set.
-
-  $next [+BdAddr.$size_in_bytes]  BdAddr         random_address
-    -- The random address to use in the advertising PDUs.
-
-
-struct LESetExtendedAdvertisingParametersV1Command:
-  -- 7.8.53 LE Set Extended Advertising Parameters [v1] command (v5.0) (LE)
-  -- HCI_LE_Set_Extended_Advertising_Parameters [v1]
-
-  let hdr_size = CommandHeader.$size_in_bytes
-
-  0     [+hdr_size]               CommandHeader                 header
-
-  $next [+1]                      UInt                          advertising_handle
-    -- Handle used to identify an advertising set.
-
-  $next [+2]  bits:
-
-    0     [+7]                    LEAdvertisingEventProperties  advertising_event_properties
-
-  $next [+3]                      UInt                          primary_advertising_interval_min
-    -- Time = N * 0.625 s
-    -- Time Range: 20 ms to 10,485.759375 s
-    [requires: 0x000020 <= this]
-
-  $next [+3]                      UInt                          primary_advertising_interval_max
-    -- Time = N * 0.625 s
-    -- Time Range: 20 ms to 10,485.759375 s
-    [requires: 0x000020 <= this]
-
-  $next [+1]  bits:
-
-    0     [+3]                    LEAdvertisingChannels         primary_advertising_channel_map
-
-  $next [+1]                      LEOwnAddressType              own_address_type
-
-  $next [+1]                      LEPeerAddressTypeNoAnon       peer_address_type
-
-  $next [+BdAddr.$size_in_bytes]  BdAddr                        peer_address
-    -- Public Device Address, Random Device Address, Public Identity Address, or Random (static)
-    -- Identity Address of the device to be connected.
-
-  $next [+1]                      LEAdvertisingFilterPolicy     advertising_filter_policy
-
-  $next [+1]                      Int                           advertising_tx_power
-    -- Range: -127 <= N <= +126
-    -- Units: dBm
-    -- If N = 127: Host has no preference.
-    [requires: -127 <= this]
-
-  $next [+1]                      LEPrimaryAdvertisingPHY       primary_advertising_phy
-    -- LEPHY::kLE2M and LEPHY::kLECodedS2 are excluded.
-
-  $next [+1]                      UInt                          secondary_advertising_max_skip
-    -- Maximum advertising events the controller can skip before sending the AUX_ADV_IND packets on
-    -- the secondary advertising physical channel. If this value is zero, AUX_ADV_IND shall be sent
-    -- prior to the next advertising event.
-
-  $next [+1]                      LESecondaryAdvertisingPHY     secondary_advertising_phy
-
-  $next [+1]                      UInt                          advertising_sid
-    -- Value of the Advertising SID subfield in the ADI field of the PDU
-    [requires: 0x00 <= this <= 0x0F]
-
-  $next [+1]                      GenericEnableParam            scan_request_notification_enable
-
-# TODO: b/265052417 - LE Set Extended Advertising Parameters [v2] definition needs to be added
-
-
-struct LESetExtendedAdvertisingDataCommand:
-  -- 7.8.54 LE Set Extended Advertising Data command (v5.0) (LE)
-  -- HCI_LE_Set_Extended_Advertising_Data
-
-  let hdr_size = CommandHeader.$size_in_bytes
-
-  0     [+hdr_size]  CommandHeader                    header
-
-  $next [+1]         UInt                             advertising_handle
-    -- Handle used to identify an advertising set.
-
-  $next [+1]         LESetExtendedAdvDataOp           operation
-
-  $next [+1]         LEExtendedAdvFragmentPreference  fragment_preference
-    -- Provides a hint to the Controller as to whether advertising data should be fragmented.
-
-  $next [+1]         UInt                             advertising_data_length (sz)
-    -- Length of the advertising data included in this command packet, up to
-    -- kMaxLEExtendedAdvertisingDataLength bytes. If the advertising set uses legacy advertising
-    -- PDUs that support advertising data then this shall not exceed kMaxLEAdvertisingDataLength
-    -- bytes.
-    [requires: 0 <= this <= 251]
-
-  $next [+sz]        UInt:8[sz]                       advertising_data
-    -- Variable length advertising data.
-
-
-struct LESetExtendedScanResponseDataCommand:
-  -- 7.8.55 LE Set Extended Scan Response Data command (v5.0) (LE)
-  -- HCI_LE_Set_Extended_Scan_Response_Data
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader                    header
-  $next [+1]         UInt                             advertising_handle
-    -- Used to identify an advertising set
-    [requires: 0x00 <= this <= 0xEF]
-
-  $next [+1]         LESetExtendedAdvDataOp           operation
-  $next [+1]         LEExtendedAdvFragmentPreference  fragment_preference
-    -- Provides a hint to the controller as to whether advertising data should be fragmented
-
-  $next [+1]         UInt                             scan_response_data_length (sz)
-    -- The number of octets in the scan_response_data parameter
-    [requires: 0 <= this <= 251]
-
-  $next [+sz]        UInt:8[sz]                       scan_response_data
-    -- Scan response data formatted as defined in Core Spec v5.4, Vol 3, Part C, Section 11
-
-
-struct LESetExtendedAdvertisingEnableData:
-  -- Data fields for variable-length portion of an LE Set Extended Advertising Enable command
-  0     [+1]  UInt  advertising_handle
-  $next [+2]  UInt  duration
-  $next [+1]  UInt  max_extended_advertising_events
-
-
-struct LESetExtendedAdvertisingEnableCommand:
-  -- 7.8.56 LE Set Extended Advertising Enable command (v5.0) (LE)
-  -- HCI_LE_Set_Extended_Advertising_Enable
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]                   CommandHeader                         header
-  $next [+1]                          GenericEnableParam                    enable
-  $next [+1]                          UInt                                  num_sets
-  let single_data_size = LESetExtendedAdvertisingEnableData.$size_in_bytes
-  $next [+single_data_size*num_sets]  LESetExtendedAdvertisingEnableData[]  data
-
-
-struct LEReadMaxAdvertisingDataLengthCommand:
-  -- 7.8.57 LE Read Maximum Advertising Data Length command (v5.0) (LE)
-  -- HCI_LE_Read_Maximum_Advertising_Data_Length
-  -- This command has no parameters
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct LEReadNumSupportedAdvertisingSetsCommand:
-  -- 7.8.58 LE Read Number of Supported Advertising Sets command (v5.0) (LE)
-  -- HCI_LE_Read_Number_of_Supported_Advertising_Sets
-  -- This command has no parameters
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct LERemoveAdvertisingSetCommand:
-  -- 7.8.59 LE Remove Advertising Set command (v5.0) (LE)
-  -- HCI_LE_Remove_Advertising_Set
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+1]         UInt           advertising_handle
-
-
-struct LEClearAdvertisingSetsCommand:
-  -- 7.8.60 LE Clear Advertising Sets command (v5.0) (LE)
-  -- HCI_LE_Clear_Advertising_Sets
-  -- This command has no parameters
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-# 7.8.61 LE Set Periodic Advertising Parameters command
-# HCI_LE_Set_Periodic_Advertising_Parameters [v1] [v2]
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.62 LE Set Periodic Advertising Data command
-# HCI_LE_Set_Periodic_Advertising_Data
-# TODO: b/265052417 - Definition needs to be added
-
-
-# 7.8.63 LE Set Periodic Advertising Enable command
-# HCI_LE_Set_Periodic_Advertising_Enable
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct LESetExtendedScanParametersData:
-  -- Data fields for variable-length portion of an LE Set Extneded Scan Parameters command
-
-  0     [+1]  LEScanType  scan_type
-
-  $next [+2]  UInt        scan_interval
-    -- Time interval from when the Controller started its last scan until it begins the subsequent
-    -- scan on the primary advertising physical channel.
-    -- Time = N × 0.625 ms
-    -- Time Range: 2.5 ms to 40.959375 s
-    [requires: 0x0004 <= this]
-
-  $next [+2]  UInt        scan_window
-    -- Duration of the scan on the primary advertising physical channel.
-    -- Time = N × 0.625 ms
-    -- Time Range: 2.5 ms to 40.959375 s
-    [requires: 0x0004 <= this]
-
-
-struct LESetExtendedScanParametersCommand(num_entries: UInt:8):
-  -- 7.8.64 LE Set Extended Scan Parameters command (v5.0) (LE)
-  -- HCI_LE_Set_Extended_Scan_Parameters
-  -- num_entries corresponds to the number of bits set in the |scanning_phys| field
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]            CommandHeader                                 header
-  $next [+1]                   LEOwnAddressType                              own_address_type
-  $next [+1]                   LEScanFilterPolicy                            scanning_filter_policy
-  $next [+1]                   LEScanPHYBits                                 scanning_phys
-  let single_entry_size = LESetExtendedScanParametersData.$size_in_bytes
-  let total_entries_size = num_entries*single_entry_size
-  $next [+total_entries_size]  LESetExtendedScanParametersData[num_entries]  data
-    -- Indicates the type of address being used in the scan request packets (for active scanning).
-
-
-struct LESetExtendedScanEnableCommand:
-  -- 7.8.65 LE Set Extended Scan Enable command (v5.0) (LE)
-  -- HCI_LE_Set_Extended_Scan_Enable
-
-  let hdr_size = CommandHeader.$size_in_bytes
-
-  0     [+hdr_size]  CommandHeader                       header
-
-  $next [+1]         GenericEnableParam                  scanning_enabled
-
-  $next [+1]         LEExtendedDuplicateFilteringOption  filter_duplicates
-    -- See enum class LEExtendedDuplicateFilteringOption in this file for possible values
-
-  $next [+2]         UInt                                duration
-    -- Possible values:
-    --   0x0000: Scan continuously until explicitly disabled
-    --   0x0001-0xFFFF: Scan duration, where:
-    --     Time = N * 10 ms
-    --     Time Range: 10 ms to 655.35 s
-
-  $next [+2]         UInt                                period
-    -- Possible values:
-    --   0x0000: Periodic scanning disabled (scan continuously)
-    --   0x0001-0xFFFF: Time interval from when the Controller started its last
-    --   Scan_Duration until it begins the subsequent Scan_Duration, where:
-    --     Time = N * 1.28 sec
-    --     Time Range: 1.28 s to 83,884.8 s
-
-# 7.8.66 LE Extended Create Connection command
-# HCI_LE_Extended_Create_Connection [v1] [v2]
-# TODO: b/265052417 - Definition needs to be added
-
-
-struct LEPeriodicAdvertisingCreateSyncCommand:
-  -- 7.8.67 LE Periodic Advertising Create Sync command (v5.0) (LE)
-  -- HCI_LE_Periodic_Advertising_Create_Sync
-
-  let hdr_size = CommandHeader.$size_in_bytes
-
-  0     [+hdr_size]               CommandHeader                           header
-
-  $next [+1]                      LEPeriodicAdvertisingCreateSyncOptions  options
-
-  $next [+1]                      UInt                                    advertising_sid
-    -- Advertising SID subfield in the ADI field used to identify the Periodic Advertising
-    [requires: 0x00 <= this <= 0x0F]
-
-  $next [+1]                      LEPeriodicAdvertisingAddressType        advertiser_address_type
-
-  $next [+BdAddr.$size_in_bytes]  BdAddr                                  advertiser_address
-    -- Public Device Address, Random Device Address, Public Identity Address, or Random (static)
-    -- Identity Address of the advertiser
-
-  $next [+2]                      UInt                                    skip
-    -- The maximum number of periodic advertising events that can be skipped after a successful
-    -- receive
-    [requires: 0x0000 <= this <= 0x01F3]
-
-  $next [+2]                      UInt                                    sync_timeout
-    -- Synchronization timeout for the periodic advertising.
-    -- Time = N * 10 ms
-    -- Time Range: 100 ms to 163.84 s
-    [requires: 0x000A <= this <= 0x4000]
-
-  $next [+1]                      LEPeriodicAdvertisingSyncCTEType        sync_cte_type
-    -- Constant Tone Extension sync options
-
-
-struct LEPeriodicAdvertisingCreateSyncCancel:
-  -- 7.8.68 LE Periodic Advertising Create Sync Cancel command (v5.0) (LE)
-  -- HCI_LE_Periodic_Advertising_Create_Sync_Cancel
-  -- Note that this command has no arguments
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct LEPeriodicAdvertisingTerminateSyncCommand:
-  -- 7.8.69 LE Periodic Advertising Terminate Sync command (v5.0) (LE)
-  -- HCI_LE_Periodic_Advertising_Terminate_Sync
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           sync_handle
-    -- Identifies the periodic advertising train
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-
-struct LEAddDeviceToPeriodicAdvertiserListCommand:
-  -- 7.8.70 LE Add Device To Periodic Advertiser List command (v5.0) (LE)
-  -- HCI_LE_Add_Device_To_Periodic_Advertiser_List
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader  header
-  $next [+1]                      LEAddressType  advertiser_address_type
-    -- Address type of the advertiser. The LEAddressType::kPublicIdentity and
-    -- LEAddressType::kRandomIdentity values are excluded for this command.
-
-  $next [+BdAddr.$size_in_bytes]  BdAddr         advertiser_address
-    -- Public Device Address, Random Device Address, Public Identity Address, or
-    -- Random (static) Identity Address of the advertiser.
-
-  $next [+1]                      UInt           advertising_sid
-    -- Advertising SID subfield in the ADI field used to identify the Periodic
-    -- Advertising.
-
-
-struct LERemoveDeviceFromPeriodicAdvertiserListCommand:
-  -- 7.8.71 LE Remove Device From Periodic Advertiser List command (v5.0) (LE)
-  -- HCI_LE_Remove_Device_From_Periodic_Advertiser_List
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader  header
-  $next [+1]                      UInt           advertiser_address_type
-    -- Address type of the advertiser. The LEAddressType::kPublicIdentity and
-    -- LEAddressType::kRandomIdentity values are excluded for this command.
-
-  $next [+BdAddr.$size_in_bytes]  BdAddr         advertiser_address
-    -- Public Device Address, Random Device Address, Public Identity Address, or
-    -- Random (static) Identity Address of the advertiser.
-
-  $next [+1]                      UInt           advertising_sid
-    -- Advertising SID subfield in the ADI field used to identify the Periodic
-    -- Advertising.
-
-
-struct LEClearPeriodicAdvertiserListCommand:
-  -- 7.8.72 LE Clear Periodic Advertiser List command (v5.0) (LE)
-  -- HCI_LE_Clear_Periodic_Advertiser_List
-  -- Note that this command has no arguments
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct LEReadPeriodicAdvertiserListSizeCommand:
-  -- 7.8.73 LE Read Periodic Advertiser List Size command (v5.0) (LE)
-  -- HCI_LE_Read_Periodic_Advertiser_List_Size
-  -- Note that this command has no arguments
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct LEReadTransmitPowerCommand:
-  -- 7.8.74 LE Read Transmit Power command (v5.0) (LE)
-  -- HCI_LE_Read_Transmit_Power
-  -- Note that this command has no arguments
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct LEReadRFPathCompensationCommand:
-  -- 7.8.75 LE Read RF Path Compensation command (v5.0) (LE)
-  -- HCI_LE_Read_RF_Path_Compensation
-  -- Note that this command has no arguments
-  let hdr_size = CommandHeader.$size_in_bytes
-  0 [+hdr_size]  CommandHeader  header
-
-
-struct LEWriteRFPathCompensationCommand:
-  -- 7.8.76 LE Write RF Path Compensation command (v5.0) (LE)
-  -- HCI_LE_Write_RF_Path_Compensation
-  -- Values provided are used in the Tx Power Level and RSSI calculation.
-  --   Range: -128.0 dB (0xFB00) ≤ N ≤ 128.0 dB (0x0500)
-  --   Units: 0.1 dB
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         Int            rf_tx_path_compensation_value
-    [requires: -1280 <= this <= 1280]
-
-  $next [+2]         Int            rf_rx_path_compensation_value
-    [requires: -1280 <= this <= 1280]
-
-
-struct LESetPrivacyModeCommand:
-  -- 7.8.77 LE Set Privacy Mode command (v5.0) (LE)
-  -- HCI_LE_Set_Privacy_Mode
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader            header
-  $next [+1]                      LEPeerAddressTypeNoAnon  peer_identity_address_type
-    -- The peer identity address type (either Public Identity or Private
-    -- Identity).
-
-  $next [+BdAddr.$size_in_bytes]  BdAddr                   peer_identity_address
-    -- Public Identity Address or Random (static) Identity Address of the
-    -- advertiser.
-
-  $next [+1]                      LEPrivacyMode            privacy_mode
-    -- The privacy mode to be used for the given entry on the resolving list.
-
-# 7.8.93 [No longer used]
-# 7.8.94 LE Modify Sleep Clock Accuracy command
-# 7.8.95 [No longer used]
-
-
-struct LEReadISOTXSyncCommand:
-  -- 7.8.96 LE Read ISO TX Sync command (v5.2) (LE)
-  -- HCI_LE_Read_ISO_TX_Sync
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           connection_handle
-    -- Connection handle of the CIS or BIS
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-
-struct LESetCIGParametersCommand:
-  -- 7.8.97 LE Set CIG Parameters command (v5.2) (LE)
-  -- HCI_LE_Set_CIG_Parameters
-
-  let hdr_size = CommandHeader.$size_in_bytes
-
-  0     [+hdr_size]                CommandHeader                            header
-
-  $next [+1]                       UInt                                     cig_id
-    -- Used to identify the CIG
-    [requires: 0x00 <= this <= 0xEF]
-
-  $next [+3]                       UInt                                     sdu_interval_c_to_p
-    -- The interval, in microseconds, of periodic SDUs (Central => Peripheral)
-    [requires: 0x0000FF <= this <= 0x0FFFFF]
-
-  $next [+3]                       UInt                                     sdu_interval_p_to_c
-    -- The interval, in microseconds, of periodic SDUs (Peripheral => Central)
-    [requires: 0x0000FF <= this <= 0x0FFFFF]
-
-  $next [+1]                       LESleepClockAccuracyRange                worst_case_sca
-    -- Worst-case sleep clock accuracy of all Peripherals that will participate in the CIG
-
-  $next [+1]                       LECISPacking                             packing
-    -- Preferred method of arranging subevents of multiple CISes
-
-  $next [+1]                       LECISFraming                             framing
-    -- Format of the CIS Data PDUs
-
-  $next [+2]                       UInt                                     max_transport_latency_c_to_p
-    -- Maximum transport latency, in milliseconds, from the Central's Controller to the
-    -- Peripheral's Controller
-    [requires: 0x0005 <= this <= 0x0FA0]
-
-  $next [+2]                       UInt                                     max_transport_latency_p_to_c
-    -- Maximum transport latency, in milliseconds, from the Peripheral's Controller to the
-    -- Central's Controller
-    [requires: 0x0005 <= this <= 0x0FA0]
-
-  $next [+1]                       UInt                                     cis_count
-    -- Total number of CIS configurations in the CIG being added or modified
-    [requires: 0x00 <= this <= 0x1F]
-
-  let single_cis_options_size = LESetCIGParametersCISOptions.$size_in_bytes
-
-  let total_cis_options_size = cis_count*single_cis_options_size
-
-  $next [+total_cis_options_size]  LESetCIGParametersCISOptions[cis_count]  cis_options
-    -- Array of parameters, one for each of the CISes in this CIG
-
-# 7.8.98 LE Set CIG Parameters Test command
-
-
-struct LECreateCISCommand:
-  -- 7.8.99 LE Create CIS command (v5.2) (LE)
-  -- HCI_LE_Create_CIS
-  struct ConnectionInfo:
-    -- Handles for each stream being created
-
-    0     [+2]  UInt  cis_connection_handle
-      -- Connection handle of a CIS
-      [requires: 0x0000 <= this <= 0xEFFF]
-
-    $next [+2]  UInt  acl_connection_handle
-      -- Connection handle of an ACL connection
-      [requires: 0x0000 <= this <= 0xEFFF]
-
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]               CommandHeader              header
-  $next [+1]                      UInt                       cis_count
-    -- Total number of CISes to be created
-    [requires: 0x01 <= this <= 0x1F]
-
-  let single_cis_params_size = ConnectionInfo.$size_in_bytes
-  let total_cis_params_size = cis_count*single_cis_params_size
-  $next [+total_cis_params_size]  ConnectionInfo[cis_count]  cis_connection_info
-    -- Connection handle information for the CIS(es) being created
-
-
-struct LERemoveCIGCommand:
-  -- 7.8.100 LE Remove CIG command (v5.2) (LE)
-  -- HCI_LE_Remove_CIG
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+1]         UInt           cig_id
-    -- Identifier of a CIG
-    [requires: 0x00 <= this <= 0xEF]
-
-
-struct LEAcceptCISRequestCommand:
-  -- 7.8.101 LE Accept CIS Request command (v5.2) (LE)
-  -- HCI_LE_Accept_CIS_Request
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           connection_handle
-    -- Connection handle of the CIS
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-
-struct LERejectCISRequestCommand:
-  -- 7.8.102 LE Reject CIS Request command (v5.2) (LE)
-  -- HCI_LE_Reject_CIS_Request
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           connection_handle
-    -- Connection handle of the CIS
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-  $next [+1]         StatusCode     reason
-    -- Reason the CIS request was rejected
-
-# 7.8.103 LE Create BIG command
-# 7.8.104 LE Create BIG Test command
-# 7.8.105 LE Terminate BIG command
-# 7.8.106 LE BIG Create Sync command
-# 7.8.107 LE BIG Terminate Sync command
-
-
-struct LERequestPeerSCACommand:
-  -- 7.8.108 LE Request Peer SCA command
-  -- HCI_LE_Request_Peer_SCA
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           connection_handle
-    -- Connection handle of the ACL
-    [requires: 0x0000 <= this <= 0xEFF]
-
-
-struct LESetupISODataPathCommand:
-  -- 7.8.109 LE Setup ISO Data Path command
-  -- HCI_LE_Setup_ISO_Data_Path
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]                    CommandHeader                       header
-  $next [+2]                           UInt                                connection_handle
-    -- Connection handle of the CIS or BIS
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-  $next [+1]                           DataPathDirection                   data_path_direction
-    -- Specifies the direction for which the data path is being configured
-
-  $next [+1]                           UInt                                data_path_id
-    -- Data transport path used (0x00 for HCI).
-    [requires: 0x00 <= this <= 0xFE]
-
-  let vcf_size = CodecId.$size_in_bytes
-  $next [+vcf_size]                    CodecId                             codec_id
-    -- Codec to be used
-
-  $next [+3]                           UInt                                controller_delay
-    -- Controller delay in microseconds (0s to 4s)
-    [requires: 0x000000 <= this <= 0x3D0900]
-
-  $next [+1]                           UInt                                codec_configuration_length
-    -- Length of codec configuration
-
-  $next [+codec_configuration_length]  UInt:8[codec_configuration_length]  codec_configuration
-    -- Codec-specific configuration data
-
-
-struct LERemoveISODataPathCommand:
-  -- 7.8.110 LE Remove ISO Data Path command
-  -- HCI_LE_Remove_ISO_Data_Path
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+2]         UInt           connection_handle
-    -- Connection handle of the CIS or BIS
-    [requires: 0x0000 <= this <= 0x0EFFF]
-
-  $next [+1]  bits:
-    0     [+1]       Flag           remove_input_data_path
-    $next [+1]       Flag           remove_output_data_path
-    $next [+6]       UInt           padding
-
-# 7.8.111 LE ISO Transmit Test command
-# 7.8.112 LE ISO Receive Test command
-# 7.8.113 LE ISO Read Test Counters command
-# 7.8.114 LE ISO Test End command
-
-
-# ========================= HCI Event packets ===========================
-# Core Spec v5.3 Vol 4, Part E, Section 7.7
-
-
-struct VendorDebugEvent:
-  -- This opcode is reserved for vendor-specific debugging events.
-  -- See Core Spec v5.3 Vol 4, Part E, Section 5.4.4.
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]  EventHeader  header
-  $next [+1]         UInt         subevent_code
-    -- The event code for the vendor subevent.
-
-
-struct InquiryCompleteEvent:
-  -- Inquiry Complete Event (v1.1) (BR/EDR)
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]  EventHeader  header
-  $next [+1]         StatusCode   status
-
-
-struct InquiryResult:
-  0     [+BdAddr.$size_in_bytes]  BdAddr                  bd_addr
-    -- BD_ADDR for a device which responded.
-
-  $next [+1]                      PageScanRepetitionMode  page_scan_repetition_mode
-  $next [+2]                      UInt                    reserved
-    -- Reserved for future use.
-
-  $next [+3]                      ClassOfDevice           class_of_device
-    -- Class of Device for the device.
-
-  $next [+2]                      ClockOffset             clock_offset
-    -- The lower 15 bits represent bits 16-2 of CLKNPeripheral-CLK.
-
-
-struct InquiryResultEvent:
-  -- Inquiry Result Event (v1.1) (BR/EDR)
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]                     EventHeader      header
-  $next [+1]                            UInt             num_responses
-    -- Number of responses from the Inquiry.
-
-  let response_size = InquiryResult.$size_in_bytes
-  $next [+num_responses*response_size]  InquiryResult[]  responses
-
-
-struct CommandCompleteEvent:
-  -- Core Spec v5.3 Vol 4, Part E, Section 7.7.14
-  -- EventHeader.opcode == 0xe
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]  EventHeader  header
-  $next [+1]         UInt         num_hci_command_packets
-  $next [+2]         OpCodeBits   command_opcode
-  let event_fixed_size = $size_in_bytes-hdr_size
-  let return_parameters_size = header.parameter_total_size-event_fixed_size
-
-
-struct SimpleCommandCompleteEvent:
-  -- A Command Complete event where a StatusCode is the only return parameter.
-  -- Also useful for generically getting to status of a larger command complete
-  -- event.
-  let hdr_size = CommandCompleteEvent.$size_in_bytes
-  0     [+hdr_size]  CommandCompleteEvent  command_complete
-  $next [+1]         StatusCode            status
-
-
-struct CommandStatusEvent:
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]  EventHeader  header
-  $next [+1]         StatusCode   status
-  $next [+1]         UInt         num_hci_command_packets
-  $next [+2]         OpCodeBits   command_opcode
-
-
-struct ConnectionCompleteEvent:
-  -- Connection Complete Event (v1.1) (BR/EDR)
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]               EventHeader         header
-  $next [+1]                      StatusCode          status
-  $next [+2]                      UInt                connection_handle
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-  $next [+BdAddr.$size_in_bytes]  BdAddr              bd_addr
-    -- The address of the connected device
-
-  $next [+1]                      LinkType            link_type
-  $next [+1]                      GenericEnableParam  encryption_enabled
-
-
-struct ConnectionRequestEvent:
-  -- Connection Request Event (v1.1) (BR/EDR)
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]               EventHeader    header
-  $next [+BdAddr.$size_in_bytes]  BdAddr         bd_addr
-    -- The address of the device that's requesting the connection.
-
-  $next [+3]                      ClassOfDevice  class_of_device
-    -- The Class of Device of the device which requests the connection.
-
-  $next [+1]                      LinkType       link_type
-
-
-struct DisconnectionCompleteEvent:
-  -- Disconnection Complete Event (v1.1) (BR/EDR & LE)
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]  EventHeader  header
-  $next [+1]         StatusCode   status
-  $next [+2]         UInt         connection_handle
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-  $next [+1]         StatusCode   reason
-
-
-struct AuthenticationCompleteEvent:
-  -- Authentication Complete Event (v1.1) (BR/EDR)
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]  EventHeader  header
-  $next [+1]         StatusCode   status
-  $next [+2]         UInt         connection_handle
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-
-struct RemoteNameRequestCompleteEvent:
-  -- Remote Name Request Complete Event (v1.1) (BR/EDR)
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]               EventHeader  header
-  $next [+1]                      StatusCode   status
-  $next [+BdAddr.$size_in_bytes]  BdAddr       bd_addr
-  $next [+248]                    UInt:8[248]  remote_name
-    -- UTF-8 encoded friendly name. If the name is less than 248 characters, it
-    -- is null terminated and the remaining bytes are not valid.
-
-
-struct EncryptionChangeEventV1:
-  -- Encryption Change Event (v1.1) (BR/EDR & LE)
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]  EventHeader       header
-  $next [+1]         StatusCode        status
-  $next [+2]         UInt              connection_handle
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-  $next [+1]         EncryptionStatus  encryption_enabled
-
-
-struct ChangeConnectionLinkKeyCompleteEvent:
-  -- Change Connection Link Key Complete Event (v1.1) (BR/EDR)
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]  EventHeader  header
-  $next [+1]         StatusCode   status
-  $next [+2]         UInt         connection_handle
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-
-struct ReadRemoteSupportedFeaturesCompleteEvent:
-  -- Read Remote Supported Features Complete Event (v1.1) (BR/EDR)
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]  EventHeader     header
-  $next [+1]         StatusCode      status
-  $next [+2]         UInt            connection_handle
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-  $next [+8]         LmpFeatures(0)  lmp_features
-    -- Page 0 of the LMP features.
-
-
-struct ReadRemoteVersionInfoCompleteEvent:
-  -- Read Remote Version Information Complete Event (v1.1) (BR/EDR & LE)
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]  EventHeader               header
-  $next [+1]         StatusCode                status
-  $next [+2]         UInt                      connection_handle
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-  $next [+1]         CoreSpecificationVersion  version
-    -- Version of the Current LMP or Link Layer supported by the remote Controller.
-
-  $next [+2]         UInt                      company_identifier
-    -- Company identifier for the manufacturer of the remote Controller. Assigned by Bluetooth SIG.
-
-  $next [+2]         UInt                      subversion
-    -- Revision of the LMP or Link Layer implementation in the remote Controller. This value is vendor-specific.
-
-
-struct ReadRemoteExtendedFeaturesCompleteEvent:
-  -- Read Remote Extended Features Complete Event (v1.1) (BR/EDR)
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]  EventHeader               header
-  $next [+1]         StatusCode                status
-  $next [+2]         UInt                      connection_handle
-    -- Only the lower 12-bits are meaningful.
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-  $next [+1]         UInt                      page_number
-    -- 0x00: The normal LMP features as returned by HCI_Read_Remote_Supported_Features command.
-    -- 0x01 to 0xFF: The page number of the features returned.
-
-  $next [+1]         UInt                      max_page_number
-    -- The highest features page number which contains non-zero bits for the remote device.
-
-  $next [+8]         LmpFeatures(page_number)  lmp_features
-    -- Bit map of requested page of LMP features.
-
-
-struct LEMetaEvent:
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]  EventHeader  header
-  $next [+1]         UInt         subevent_code
-    -- The event code for the LE subevent.
-
-
-struct LEConnectionCompleteSubevent:
-  0     [+LEMetaEvent.$size_in_bytes]  LEMetaEvent        le_meta_event
-
-  $next [+1]                           StatusCode         status
-
-  $next [+2]                           UInt               connection_handle
-    -- Only the lower 12-bits are meaningful.
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-  $next [+1]                           ConnectionRole     role
-
-  $next [+1]                           LEPeerAddressType  peer_address_type
-
-  $next [+BdAddr.$size_in_bytes]       BdAddr             peer_address
-    -- Public Device Address or Random Device Address of the peer device.
-
-  $next [+2]                           UInt               connection_interval
-    -- Time: N * 1.25 ms
-    -- Range: 7.5 ms to 4 s
-    [requires: 0x0006 <= this <= 0x0C80]
-
-  $next [+2]                           UInt               peripheral_latency
-    [requires: 0x0000 <= this <= 0x01F3]
-
-  $next [+2]                           UInt               supervision_timeout
-    -- Time: N * 10 ms
-    -- Range: 100 ms to 32 s
-    [requires: 0x000A <= this <= 0x0C80]
-
-  $next [+1]                           LEClockAccuracy    central_clock_accuracy
-    -- Only valid for a peripheral. On a central, this parameter shall be set to 0x00.
-
-
-struct LEConnectionUpdateCompleteSubevent:
-  0     [+LEMetaEvent.$size_in_bytes]  LEMetaEvent  le_meta_event
-
-  $next [+1]                           StatusCode   status
-
-  $next [+2]                           UInt         connection_handle
-    -- Only the lower 12-bits are meaningful.
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-  $next [+2]                           UInt         connection_interval
-    -- Time: N * 1.25 ms
-    -- Range: 7.5 ms to 4 s
-    [requires: 0x0006 <= this <= 0x0C80]
-
-  $next [+2]                           UInt         peripheral_latency
-    [requires: 0x0000 <= this <= 0x01F3]
-
-  $next [+2]                           UInt         supervision_timeout
-    -- Time: N * 10 ms
-    -- Range: 100 ms to 32 s
-    [requires: 0x000A <= this <= 0x0C80]
-
-
-struct LEReadRemoteFeaturesCompleteSubevent:
-  0     [+LEMetaEvent.$size_in_bytes]    LEMetaEvent   le_meta_event
-  $next [+1]                             StatusCode    status
-  $next [+2]                             UInt          connection_handle
-    -- Only the lower 12-bits are meaningful.
-    [requires: 0x0000 <= this <= 0x0EFF]
-
-  $next [+8]  bits:
-    0     [+LEFeatureSet.$size_in_bits]  LEFeatureSet  le_features
-
-
-struct LEExtendedAdvertisingReportData:
-  0     [+2]  bits:
-
-    0     [+7]                    LEExtendedAdvertisingEventType  event_type
-
-  $next [+1]                      LEAddressType                   address_type
-    -- Address type of the advertiser.
-
-  $next [+BdAddr.$size_in_bytes]  BdAddr                          address
-    -- Public Device Address, Random Device Address, Public Identity Address or
-    -- Random (static) Identity Address of the advertising device.
-
-  $next [+1]                      LEPrimaryAdvertisingPHY         primary_phy
-    -- Indicates the PHY used to send the advertising PDU on the primary advertising
-    -- channel. Legacy PDUs always use LE_1M. NONE, LE_2M, and LE_CODED_S2 are excluded.
-
-  $next [+1]                      LESecondaryAdvertisingPHY       secondary_phy
-    -- Indicates the PHY used to send the advertising PDU(s), if any, on the secondary
-    -- advertising channel. A value of NONE means that no packets were received on the
-    -- secondary advertising channel.
-
-  $next [+1]                      UInt                            advertising_sid
-    -- Value of the Advertising SID subfield in the ADI field of the PDU. A value of
-    -- 0xFF means no ADI field provided.
-    [requires: 0x00 <= this <= 0x0F || this == 0xFF]
-
-  $next [+1]                      UInt                            tx_power
-    -- Units: dBm. A value of 0x7F means Tx Power information is not available.
-    [requires: -127 <= this <= 20 || this == 0x7F]
-
-  $next [+1]                      UInt                            rssi
-    -- Units: dBm. A value of 0x7F means RSSI is not available.
-    [requires: -127 <= this <= 20 || this == 0x7F]
-
-  $next [+2]                      UInt                            periodic_advertising_interval
-    -- 0x0000: No periodic advertising.
-    -- 0xXXXX:
-    --   Time = N * 1.25 ms
-    --   Time Range: 7.5 ms to 81,918.75 s
-    [requires: 0x0006 <= this <= 0xFFFF || this == 0x0000]
-
-  $next [+1]                      LEDirectAddressType             direct_address_type
-
-  $next [+BdAddr.$size_in_bytes]  BdAddr                          direct_address
-    -- TargetA field in the advertisement or either Public Identity Address or Random (static)
-    -- Identity Address of the target device.
-
-  $next [+1]                      UInt                            data_length
-    -- Length of the |data| field.
-
-  $next [+data_length]            UInt:8[data_length]             data
-    -- |data_length| octets of advertising or scan response data formatted as defined in
-    -- [Vol 3] Part C, Section 11. Note: Each element of this array has a variable length.
-
-
-struct LEExtendedAdvertisingReportSubevent(reports_size: UInt:8):
-  -- LE Extended Advertising Report Event (v5.0) (LE)
-  0     [+LEMetaEvent.$size_in_bytes]  LEMetaEvent           le_meta_event
-  $next [+1]                           UInt                  num_reports
-    -- Number of separate reports in the event.
-    [requires: 0x01 <= this <= 0x0A]
-
-  $next [+reports_size]                UInt:8[reports_size]  reports
-    -- Since each report has a variable length, they are stored in a UInt:8 array.
-
-# ============================ Test packets =============================
-
-
-struct TestCommandPacket:
-  -- Test HCI Command packet with single byte payload.
-  let hdr_size = CommandHeader.$size_in_bytes
-  0     [+hdr_size]  CommandHeader  header
-  $next [+1]         UInt           payload
-
-
-struct TestEventPacket:
-  -- Test HCI Event packet with single byte payload.
-  let hdr_size = EventHeader.$size_in_bytes
-  0     [+hdr_size]  EventHeader  header
-  $next [+1]         UInt         payload
diff --git a/pw_bluetooth/public/pw_bluetooth/hci_commands.emb b/pw_bluetooth/public/pw_bluetooth/hci_commands.emb
new file mode 100644
index 0000000..c5a8119
--- /dev/null
+++ b/pw_bluetooth/public/pw_bluetooth/hci_commands.emb
@@ -0,0 +1,2898 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# This file contains Emboss definitions for Host Controller Interface packets
+# and types found in the Bluetooth Core Specification. The Emboss compiler is
+# used to generate a C++ header from this file.
+
+import "hci_common.emb" as hci
+
+[$default byte_order: "LittleEndian"]
+[(cpp) namespace: "pw::bluetooth::emboss"]
+# =========================== Constants =================================
+
+
+enum InquiryAccessCode:
+  -- General- and Device-specific Inquiry Access Codes (DIACs) for use in Inquiry
+  -- command LAP fields.
+  -- (https://www.bluetooth.com/specifications/assigned-numbers/baseband)
+  [maximum_bits: 24]
+  GIAC = 0x9E8B33
+    -- General Inquiry Access Code
+
+  LIAC = 0x9E8B00
+    -- Limited Dedicated Inquiry Access Code
+
+
+enum PcmDataFormat:
+  -- PCM data formats from assigned numbers.
+  -- (https://www.bluetooth.com/specifications/assigned-numbers/host-controller-interface)
+  [maximum_bits: 8]
+  NOT_APPLICABLE  = 0x00
+  ONES_COMPLEMENT = 0x01
+  TWOS_COMPLEMENT = 0x02
+  SIGN_MAGNITUDE  = 0x03
+  UNSIGNED        = 0x04
+
+
+enum ScoDataPath:
+  [maximum_bits: 8]
+  HCI             = 0x00
+  AUDIO_TEST_MODE = 0xFF
+    -- 0x01 - 0xFE specify the logical channel number (vendor specific)
+
+
+enum PageTimeout:
+  [maximum_bits: 16]
+  MIN     = 0x0001
+  MAX     = 0xFFFF
+  DEFAULT = 0x2000
+
+
+enum ScanInterval:
+  -- The minimum and maximum range values for Page and Inquiry Scan Interval (in time slices)
+  -- Page Scan Interval: (see Core Spec v5.0, Vol 2, Part E, Section 7.3.19)
+  -- Inquiry Scan Interval: (see Core Spec v5.0, Vol 2, Part E, Section 7.3.21)
+  [maximum_bits: 16]
+  MIN = 0x0012
+  MAX = 0x1000
+
+
+enum ScanWindow:
+  -- The minimum and maximum range valeus for Page and Inquiry Scan Window (in time slices)
+  -- Page Scan Window: (see Core Spec v5.0, Vol 2, Part E, Section 7.3.19)
+  -- Inquiry Scan Window: (see Core Spec v5.0, Vol 2, Part E, Section 7.3.21)
+  [maximum_bits: 16]
+  MIN = 0x0011
+  MAX = 0x1000
+
+
+bits ScoPacketType:
+  -- Bitmask of SCO packet types.
+  # SCO packet types
+  0     [+1]  Flag  hv1
+  $next [+1]  Flag  hv2
+  $next [+1]  Flag  hv3
+  # eSCO packet types
+  $next [+1]  Flag  ev3
+  $next [+1]  Flag  ev4
+  $next [+1]  Flag  ev5
+  $next [+1]  Flag  not_2_ev3
+  $next [+1]  Flag  not_3_ev3
+  $next [+1]  Flag  not_2_ev5
+  $next [+1]  Flag  not_3_ev5
+  $next [+6]  UInt  padding
+
+
+bits PacketType:
+  -- Bitmask values for supported Packet Types
+  -- Used for HCI_Create_Connection and HCI_Change_Connection_Packet_Type
+  -- All other bits reserved for future use.
+  1  [+1]  Flag  disable_2_dh1
+  2  [+1]  Flag  disable_3_dh1
+  3  [+1]  Flag  enable_dm1     # Note: always on in >= v1.2
+  4  [+1]  Flag  enable_dh1
+  8  [+1]  Flag  disable_2_dh3
+  9  [+1]  Flag  disable_3_dh3
+  10 [+1]  Flag  enable_dm3
+  11 [+1]  Flag  enable_dh3
+  12 [+1]  Flag  disable_2_dh5
+  13 [+1]  Flag  disable_3_dh5
+  14 [+1]  Flag  enable_dm5
+  15 [+1]  Flag  enable_dh5
+
+
+enum OobDataPresent:
+  -- Whether there is out-of-band data present, and what type.
+  -- All other values reserved for future use.
+  [maximum_bits: 8]
+  NOT_PRESENT   = 0x00
+  P192_         = 0x01
+  P256_         = 0x02
+  P192_AND_P256 = 0x03
+
+
+bits ScanEnableBits:
+  -- Bitmask Values for the Scan_Enable parameter in a
+  -- HCI_(Read,Write)_Scan_Enable command.
+  0     [+1]  Flag  inquiry
+    -- Inquiry scan enabled
+
+  $next [+1]  Flag  page
+    -- Page scan enabled
+
+  $next [+6]  UInt  padding
+
+
+enum InquiryScanType:
+  [maximum_bits: 8]
+  STANDARD   = 0x00
+    -- Standard scan (Default) (Mandatory)
+
+  INTERLACED = 0x01
+
+
+struct LocalName:
+  0 [+248]  UInt:8[248]  local_name
+
+
+struct ExtendedInquiryResponse:
+  0 [+240]  UInt:8[240]  extended_inquiry_response
+
+
+enum LEExtendedDuplicateFilteringOption:
+  -- Possible values that can be used for the |filter_duplicates| parameter in a
+  -- HCI_LE_Set_Extended_Scan_Enable command.
+  [maximum_bits: 8]
+  DISABLED                           = 0x00
+  ENABLED                            = 0x01
+  ENABLED_RESET_FOR_EACH_SCAN_PERIOD = 0x02
+    -- Duplicate advertisements in a single scan period should not be sent to the
+    -- Host in advertising report events; this setting shall only be used if the
+    -- Period parameter is non-zero.
+
+
+enum LEPeriodicAdvertisingCreateSyncUseParams:
+  [maximum_bits: 1]
+
+  USE_PARAMS                   = 0x00
+    -- Use the Advertising_SID, Advertiser_Address_Type, and Adertiser_Address parameters to
+    -- determine which advertiser to listen to.
+
+  USE_PERIODIC_ADVERTISER_LIST = 0x01
+    -- Use the Periodic Advertiser List to determine which advertiser to listen to.
+
+
+bits LEPeriodicAdvertisingCreateSyncOptions:
+  -- First parameter to the LE Periodic Advertising Create Sync command
+
+  0     [+1]  LEPeriodicAdvertisingCreateSyncUseParams  advertiser_source
+
+  $next [+1]  Flag                                      enable_reporting
+    -- 0: Reporting initially enabled
+    -- 1: Reporting initially disabled
+
+  $next [+1]  Flag                                      enable_duplicate_filtering
+    -- 0: Duplicate filtering initially disabled
+    -- 1: Duplicate filtering initially enabled
+
+  $next [+5]  UInt                                      padding
+    -- Reserved for future use
+
+
+enum LEPeriodicAdvertisingAddressType:
+  -- Possible values that can be specified for the |advertiser_address_type| in an LE Periodic
+  -- Advertising Create Sync command.
+  [maximum_bits: 8]
+  PUBLIC = 0x00
+    -- Public Device Address or Public Identity Address
+
+  RANDOM = 0x01
+    -- Random Device Address or Random (static) Identity Address
+
+
+bits LEPeriodicAdvertisingSyncCTEType:
+  -- Bit definitions for a |sync_cte_type| field in an LE Periodic Advertising Create Sync command
+
+  0     [+1]  Flag  dont_sync_aoa
+    -- Do not sync to packets with an AoA Constant Tone Extension
+
+  $next [+1]  Flag  dont_sync_aod_1us
+    -- Do not sync to packets with an AoD Constant Tone Extension with 1 microsecond slots
+
+  $next [+1]  Flag  dont_sync_aod_2us
+    -- Do not sync to packets with an AoD Constant Tone Extension with 2 microsecond slots
+
+  $next [+1]  Flag  dont_sync_type_3
+    -- Do not sync to packets with a typoe 3 Constant Tone Extension (currently reserved for future
+    -- use)
+
+  $next [+1]  Flag  dont_sync_without_cte
+    -- Do not sync to packets without a Constant Tone Extension
+
+  $next [+3]  UInt  padding
+    -- Reserved for future use
+
+
+enum LEOwnAddressType:
+  -- Possible values that can be used for the |own_address_type| parameter in various LE packets.
+
+  [maximum_bits: 8]
+
+  PUBLIC                    = 0x00
+    -- Public Device Address
+
+  RANDOM                    = 0x01
+    -- Random Device Address
+
+  PRIVATE_DEFAULT_TO_PUBLIC = 0x02
+    -- Controller generates the Resolvable Private Address based on the local IRK from the resolving
+    -- list. If the resolving list contains no matching entry, then use the public address.
+
+  PRIVATE_DEFAULT_TO_RANDOM = 0x03
+    -- Controller generates the Resolvable Private Address based on the local IRK from the resolving
+    -- list. If the resolving list contains no matching entry, then use the random address from
+    -- LE_Set_Random_Address.
+
+
+enum LEScanType:
+  -- Possible values that can be used for the |scan_type| parameter in various LE HCI commands.
+  [maximum_bits: 8]
+  PASSIVE = 0x00
+    -- Passive Scanning. No scanning PDUs shall be sent (default)
+
+  ACTIVE  = 0x01
+    -- Active scanning. Scanning PDUs may be sent.
+
+
+enum LEScanFilterPolicy:
+  -- Possible values that can be used for the |filter_policy| parameter in various LE HCI commands
+  [maximum_bits: 8]
+  BASIC_UNFILTERED    = 0x00
+  BASIC_FILTERED      = 0x01
+  EXTENDED_UNFILTERED = 0x02
+  EXTENDED_FILTERED   = 0x03
+
+
+bits LEScanPHYBits:
+  0     [+1]  Flag  le_1m
+    -- Scan advertisements on the LE 1M PHY
+
+  $next [+1]  Flag  padding1
+    -- Reserved for future use
+
+  $next [+1]  Flag  le_coded
+    -- Scan advertisements on the LE Coded PHY
+
+  $next [+5]  UInt  padding2
+    -- Reserved for future use
+
+
+enum LEPrivacyMode:
+  -- Possible values for the |privacy_mode| parameter in an LE Set Privacy Mode
+  -- command
+  [maximum_bits: 8]
+  NETWORK = 0x00
+    -- Use Network Privacy Mode for this peer device (default).
+
+  DEVICE  = 0x01
+    -- Use Device Privacy Mode for this peer device.
+
+
+enum InquiryMode:
+  [maximum_bits: 8]
+  STANDARD = 0x00
+    -- Standard Inquiry Result format (default)
+
+  RSSI     = 0x01
+    -- Inquiry Result format with RSSI
+
+  EXTENDED = 0x02
+    -- Inquiry Result format with RSSI or EIR format
+
+
+enum PageScanType:
+  [maximum_bits: 8]
+  STANDARD_SCAN   = 0x00
+    -- Standard scan (default) (mandatory)
+
+  INTERLACED_SCAN = 0x01
+    -- Interlaced scan (optional)
+
+
+bits LEEventMask:
+  0     [+1]  Flag  le_connection_complete
+  $next [+1]  Flag  le_advertising_report
+  $next [+1]  Flag  le_connection_update_complete
+  $next [+1]  Flag  le_read_remote_features_complete
+  $next [+1]  Flag  le_long_term_key_request
+  $next [+1]  Flag  le_remote_connection_parameter_request
+  $next [+1]  Flag  le_data_length_change
+  $next [+1]  Flag  le_read_local_p256_public_key_complete
+  $next [+1]  Flag  le_generate_dhkey_complete
+  $next [+1]  Flag  le_enhanced_connection_complete
+  $next [+1]  Flag  le_directed_advertising_report
+  $next [+1]  Flag  le_phy_update_complete
+  $next [+1]  Flag  le_extended_advertising_report
+  $next [+1]  Flag  le_periodic_advertising_sync_established
+  $next [+1]  Flag  le_periodic_advertising_report
+  $next [+1]  Flag  le_periodic_advertising_sync_lost
+  $next [+1]  Flag  le_extended_scan_timeout
+  $next [+1]  Flag  le_extended_advertising_set_terminated
+  $next [+1]  Flag  le_scan_request_received
+  $next [+1]  Flag  le_channel_selection_algorithm
+  $next [+1]  Flag  le_connectionless_iq_report
+  $next [+1]  Flag  le_connection_iq_report
+  $next [+1]  Flag  le_cte_request_failed
+  $next [+1]  Flag  le_periodic_advertising_sync_transfer_received_event
+  $next [+1]  Flag  le_cis_established_event
+  $next [+1]  Flag  le_cis_request_event
+  $next [+1]  Flag  le_create_big_complete_event
+  $next [+1]  Flag  le_terminate_big_complete_event
+  $next [+1]  Flag  le_big_sync_established_event
+  $next [+1]  Flag  le_big_sync_lost_event
+  $next [+1]  Flag  le_request_peer_sca_complete_event
+  $next [+1]  Flag  le_path_loss_threshold_event
+  $next [+1]  Flag  le_transmit_power_reporting_event
+  $next [+1]  Flag  le_biginfo_advertising_report_event
+  $next [+1]  Flag  le_subrate_change_event
+
+
+enum LEAdvertisingType:
+  [maximum_bits: 8]
+  CONNECTABLE_AND_SCANNABLE_UNDIRECTED = 0x00
+    -- ADV_IND
+
+  CONNECTABLE_HIGH_DUTY_CYCLE_DIRECTED = 0x01
+    -- ADV_DIRECT_IND
+
+  SCANNABLE_UNDIRECTED                 = 0x02
+    -- ADV_SCAN_IND
+
+  NOT_CONNECTABLE_UNDIRECTED           = 0x03
+    -- ADV_NONCONN_IND
+
+  CONNECTABLE_LOW_DUTY_CYCLE_DIRECTED  = 0x04
+    -- ADV_DIRECT_IND
+
+
+bits LEAdvertisingChannels:
+  0     [+1]  Flag  channel_37
+  $next [+1]  Flag  channel_38
+  $next [+1]  Flag  channel_39
+
+
+enum LEAdvertisingFilterPolicy:
+  [maximum_bits: 8]
+
+  ALLOW_ALL                                                  = 0x00
+    -- Process scan and connection requests from all devices (i.e., the Filter
+    -- Accept List is not in use) (default).
+
+  ALLOW_ALL_CONNECTIONS_AND_USE_FILTER_ACCEPT_LIST_FOR_SCANS = 0x01
+    -- Process connection requests from all devices and scan requests only from
+    -- devices that are in the Filter Accept List.
+
+  ALLOW_ALL_SCANS_AND_USE_FILTER_ACCEPT_LIST_FOR_CONNECTIONS = 0x02
+    -- Process scan requests from all devices and connection requests only from
+    -- devices that are in the Filter Accept List.
+
+  ALLOW_FILTER_ACCEPT_LIST_ONLY                              = 0x03
+    -- Process scan and connection requests only from devices in the Filter
+    -- Accept List.
+
+
+enum ScanRequestNotifications:
+  [maximum_bits: 8]
+  DISABLED = 0x00
+  ENABLED  = 0x01
+
+
+enum LESetExtendedAdvDataOp:
+  -- Potential values for the Operation parameter in a HCI_LE_Set_Extended_Advertising_Data command.
+  [maximum_bits: 8]
+  INTERMEDIATE_FRAGMENT = 0x00
+    -- Intermediate fragment of fragmented extended advertising data.
+
+  FIRST_FRAGMENT        = 0x01
+    -- First fragment of fragmented extended advertising data.
+
+  LAST_FRAGMENT         = 0x02
+    -- Last fragment of fragmented extended advertising data.
+
+  COMPLETE              = 0x03
+    -- Complete extended advertising data.
+
+  UNCHANGED_DATA        = 0x04
+    -- Unchanged data (just update the Advertising DID)
+
+
+enum LEExtendedAdvFragmentPreference:
+  -- Potential values for the Fragment_Preference parameter in a
+  -- HCI_LE_Set_Extended_Advertising_Data command.
+  [maximum_bits: 8]
+  MAY_FRAGMENT        = 0x00
+    -- The Controller may fragment all Host advertising data
+
+  SHOULD_NOT_FRAGMENT = 0x01
+    -- The Controller should not fragment or should minimize fragmentation of Host advertising data
+
+
+bits LEAdvertisingEventProperties:
+  -- The Advertising_Event_Properties bitfield values used in a HCI LE Set Extended Advertising
+  -- Parameters command.
+  0     [+1]  Flag  connectable
+  $next [+1]  Flag  scannable
+  $next [+1]  Flag  directed
+  $next [+1]  Flag  high_duty_cycle_directed_connectable
+  $next [+1]  Flag  use_legacy_pdus
+  $next [+1]  Flag  anonymous_advertising
+  $next [+1]  Flag  include_tx_power
+
+
+enum FlowControlMode:
+  [maximum_bits: 8]
+  PACKET_BASED     = 0x00
+  DATA_BLOCK_BASED = 0x01
+
+
+bits EventMaskPage2:
+  8  [+1]  Flag  number_of_completed_data_blocks_event
+  14 [+1]  Flag  triggered_clock_capture_event
+  15 [+1]  Flag  synchronization_train_complete_event
+  16 [+1]  Flag  synchronization_train_received_event
+  17 [+1]  Flag  connectionless_peripheral_broadcast_receive_event
+  18 [+1]  Flag  connectionless_peripheral_broadcast_timeout_event
+  19 [+1]  Flag  truncated_page_complete_event
+  20 [+1]  Flag  peripheral_page_response_timeout_event
+  21 [+1]  Flag  connectionless_peripheral_broadcast_channel_map_event
+  22 [+1]  Flag  inquiry_response_notification_event
+  23 [+1]  Flag  authenticated_payload_timeout_expired_event
+  24 [+1]  Flag  sam_status_change_event
+  25 [+1]  Flag  encryption_change_event_v2
+
+
+bits LECISPHYOptions:
+  -- Identifies PHYs that can be used for transmission
+  0     [+1]  Flag  le_1m
+  $next [+1]  Flag  le_2m
+  $next [+1]  Flag  le_coded
+  $next [+5]  UInt  padding
+
+
+struct LESetCIGParametersCISOptions:
+  -- Parameters for the CISes defined in a LESetCIGParametersCommand
+
+  0     [+1]  UInt             cis_id
+    -- Used to identify a CIS
+    [requires: 0x00 <= this <= 0xEF]
+
+  $next [+2]  UInt             max_sdu_c_to_p
+    -- Maximum size, in octets, of the payload from the Central's Host
+    [requires: 0x0000 <= this <= 0x0FFF]
+
+  $next [+2]  UInt             max_sdu_p_to_c
+    -- Maximum size, in octets, of the payload from the Peripheral's Host
+    [requires: 0x0000 <= this <= 0x0FFF]
+
+  $next [+1]  LECISPHYOptions  phy_c_to_p
+    -- Identifies which PHY to use for transmission from the Central to the Peripheral
+
+  $next [+1]  LECISPHYOptions  phy_p_to_c
+    -- Identifies which PHY to use for transmission from the Peripheral to the Central
+
+  $next [+1]  UInt             rtn_c_to_p
+    -- Number of times every CIS Data PDU should be retransmitted from the Central to the
+    -- Peripheral
+
+  $next [+1]  UInt             rtn_p_to_c
+    -- Number of times every CIS Data PDU should be retransmitted from the Peripheral to the
+    -- Central
+
+
+enum LESleepClockAccuracyRange:
+  -- Accuracy of the sleep clock, provided as a range
+  [maximum_bits: 8]
+  PPM_251_TO_500 = 0x00
+  PPM_151_TO_250 = 0x01
+  PPM_101_TO_150 = 0x02
+  PPM_76_TO_100  = 0x03
+  PPM_51_TO_75   = 0x04
+  PPM_31_TO_50   = 0x05
+  PPM_21_TO_30   = 0x06
+  PPM_0_TO_20    = 0x07
+
+
+enum LECISPacking:
+  -- Preferred method of arranging subevents of multiple CISes
+  [maximum_bits: 8]
+  SEQUENTIAL  = 0x00
+  INTERLEAVED = 0x01
+
+
+enum LECISFraming:
+  -- Format of CIS Data PDUs
+  [maximum_bits: 8]
+  UNFRAMED = 0x00
+  FRAMED   = 0x01
+
+
+enum DataPathDirection:
+  [maximum_bits: 8]
+  INPUT  = 0x00
+    -- Host to Controller
+
+  OUTPUT = 0x01
+    -- Controller to Host
+
+# ========================= HCI Command packets =========================
+# Core Spec v5.3 Vol 4, Part E, Section 7
+
+# ========== 7.1 Link Control Commands ==========
+
+
+struct InquiryCommand:
+  -- 7.1.1 Inquiry command (v1.1) (BR/EDR)
+  -- HCI_Inquiry
+  --
+  -- Note: NO Command Complete; Sends Inquiry Complete at the end of the
+  -- inquiry to indicate it's completion. No Inquiry Complete event is sent if
+  -- Inquiry is cancelled.
+
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+
+  0     [+hdr_size]  hci.CommandHeader  header
+
+  $next [+3]         InquiryAccessCode  lap
+    -- LAP (Lower Address Part)
+    -- In the range 0x9E8B00 - 0x9E8B3F, defined by the Bluetooth SIG in
+    -- Baseband Assigned Numbers.
+
+  $next [+1]         UInt               inquiry_length
+    -- Time before the inquiry is halted. Defined in 1.28s units.
+    -- Range: 0x01 to kInquiryLengthMax in hci_constants.h
+
+  $next [+1]         UInt               num_responses
+    -- Maximum number of responses before inquiry is halted.
+    -- Set to 0x00 for unlimited.
+
+
+struct InquiryCancelCommand:
+  -- 7.1.2 Inquiry Cancel command (v1.1) (BR/EDR)
+  -- HCI_Inquiry_Cancel
+  --
+  -- No command parameters
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+# 7.1.3 Periodic Inquiry Mode command
+# HCI_Periodic_Inquiry_Mode
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.1.4 Exit Periodic Inquiry Mode command
+# HCI_Exit_Periodic_Inquiry_Mode
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct CreateConnectionCommand:
+  -- 7.1.5 Create Connection (v1.1) (BR/EDR)
+  -- HCI_Create_Connection
+  --
+  -- NOTE on ReturnParams: No Command Complete event will be sent by the
+  -- Controller to indicate that this command has been completed. Instead, the
+  -- Connection Complete event will indicate that this command has been
+  -- completed.
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader           header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr                  bd_addr
+    -- BD_ADDR of the device to be connected
+
+  $next [+2]                          PacketType                  packet_type
+    -- Mask of allowable packet types.
+
+  $next [+1]                          hci.PageScanRepetitionMode  page_scan_repetition_mode
+    -- The Page Scan Repetition Mode of the remote device as retrieved by Inquiry.
+
+  $next [+1]                          UInt                        reserved
+    [requires: this == 0]
+
+  $next [+2]                          hci.ClockOffset             clock_offset
+    -- Clock Offset. The lower 15 bits are set to the clock offset as retrieved
+    -- by an Inquiry. The highest bit is set to 1 if the rest of this parameter
+    -- is valid.
+
+  $next [+1]                          hci.GenericEnableParam      allow_role_switch
+    -- Allow Role Switch.
+    -- Allowed values:
+    --  0x00 - No role switch allowed, this device will be the central
+    --  0x01 - Role switch allowed, this device may become peripheral during
+    --  connection setup
+
+
+struct DisconnectCommand:
+  -- 7.1.6 Disconnect command (v1.1) (BR/EDR & LE)
+  -- HCI_Disconnect
+  --
+  -- NOTE on ReturnParams: No Command Complete event will be sent by the
+  -- Controller to indicate that this command has been completed. Instead, the
+  -- Disconnection Complete event will indicate that this command has been
+  -- completed.
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               connection_handle
+    -- Connection_Handle (only the lower 12-bits are meaningful).
+    --   Range: 0x0000 to 0x0EFF
+
+  $next [+1]         hci.StatusCode     reason
+    -- Reason for the disconnect.
+
+
+struct CreateConnectionCancelCommand:
+  -- 7.1.7 Create Connection Cancel command (v1.1) (BR/EDR)
+  -- HCI_Create_Connection_Cancel
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr         bd_addr
+    -- BD_ADDR of the Create Connection Command request
+
+
+struct AcceptConnectionRequestCommand:
+  -- 7.1.8 Accept Connection Request command (v1.1) (BR/EDR)
+  -- HCI_Accept_Connection_Request
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader   header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr          bd_addr
+    -- The 48-bit BD_ADDR of the remote device requesting the connection.
+
+  $next [+1]                          hci.ConnectionRole  role
+
+
+struct RejectConnectionRequestCommand:
+  -- 7.1.9 Reject Connection Request command (v1.1) (BR/EDR)
+  -- HCI_Reject_Connection_Request
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr         bd_addr
+    -- The 48-bit BD_ADDR of the remote device requesting the connection.
+
+  $next [+1]                          hci.StatusCode     reason
+    -- Must be one of CONNECTION_REJECTED* from hci.StatusCode in this file
+
+
+struct LinkKeyRequestReplyCommand:
+  -- 7.1.10 Link Key Request Reply command (v1.1) (BR/EDR)
+  -- HCI_Link_Key_Request_Reply
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr         bd_addr
+    -- The 48-bit BD_ADDR of the remote device requesting the connection.
+
+  let bredr_link_key_size = hci.LinkKey.$size_in_bytes
+  $next [+bredr_link_key_size]        hci.LinkKey        link_key
+    -- Link key to use for the connection with the peer device.
+
+
+struct LinkKeyRequestNegativeReplyCommand:
+  -- 7.1.11 Link Key Request Negative Reply command (v1.1) (BR/EDR)
+  -- HCI_Link_Key_Request_Negative_Reply
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr         bd_addr
+    -- BD_ADDR of the peer device that the host does not have a link key for.
+
+# 7.1.12 PIN Code Request Reply command
+# HCI_PIN_Code_Request_Reply
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.1.13 PIN Code Request Negative Reply command
+# HCI_PIN_Code_Request_Negative_Reply
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.1.14 Change Connection Packet Type command
+# HCI_Change_Connection_Packet_Type
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct AuthenticationRequestedCommand:
+  -- 7.1.15 Authentication Requested command (v1.1) (BR/EDR)
+  -- HCI_Authentication_Requested
+  --
+  -- NOTE on ReturnParams: No Command Complete event will be sent by the
+  -- Controller to indicate that this command has been completed. Instead, the
+  -- Authentication Complete event will indicate that this command has been
+  -- completed.
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               connection_handle
+    -- Connection_Handle (only the lower 12-bits are meaningful).
+    --   Range: 0x0000 to 0x0EFF
+    -- Must be the handle of a connected ACL-U logical link.
+
+
+struct SetConnectionEncryptionCommand:
+  -- 7.1.16 Set Connection Encryption command (v1.1) (BR/EDR)
+  -- HCI_Set_Connection_Encryption
+  --
+  -- NOTE on ReturnParams: No Command Complete event will be sent by the
+  -- Controller to indicate that this command has been completed. Instead, the
+  -- Encryption Change event will indicate that this command has been completed.
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader       header
+  $next [+2]         UInt                    connection_handle
+    -- Connection_Handle (only the lower 12-bits are meaningful).
+    --   Range: 0x0000 to 0x0EFF
+    -- Must be the handle of a connected ACL-U logical link.
+
+  $next [+1]         hci.GenericEnableParam  encryption_enable
+    -- Whether link level encryption should be turned on or off.
+
+# 7.1.17 Change Connection Link Key command
+# HCI_Change_Connection_Link_Key
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.1.18 Link Key Selection command
+# HCI_Link_Key_Selection
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct RemoteNameRequestCommand:
+  -- 7.1.19 Remote Name Request command (v1.1) (BR/EDR)
+  -- HCI_Remote_Name_Request
+  --
+  -- NOTE on ReturnParams: No Command Complete event will be sent by the
+  -- Controller to indicate that this command has been completed. Instead, the
+  -- Remote Name Request Complete event will indicate that this command has been
+  -- completed.
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader           header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr                  bd_addr
+    -- Address of the device whose name is to be requested.
+
+  $next [+1]                          hci.PageScanRepetitionMode  page_scan_repetition_mode
+    -- Page Scan Repetition Mode of the device, obtained by Inquiry.
+
+  $next [+1]                          UInt                        reserved
+    [requires: this == 0]
+
+  $next [+2]                          hci.ClockOffset             clock_offset
+    -- Clock offset.  The lower 15 bits of this represent bits 16-2
+    -- of CLKNPeripheral-CLK, and the highest bit is set when the other
+    -- bits are valid.
+
+# 7.1.20 Remote Name Request Cancel command
+# HCI_Remote_Name_Request_Cancel
+
+
+struct ReadRemoteSupportedFeaturesCommand:
+  -- 7.1.21 Read Remote Supported Features command (v1.1) (BR/EDR)
+  -- HCI_Read_Remote_Supported_Features
+  --
+  -- NOTE on ReturnParams: No Command Complete event will be sent by the
+  -- Controller to indicate that this command has been completed. Instead, the
+  -- Read Remote Supported Features Complete event will indicate that this
+  -- command has been completed.
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               connection_handle
+    -- Connection_Handle (only the lower 12-bits are meaningful).
+    --   Range: 0x0000 to 0x0EFF
+    -- Must be the handle of a connected ACL-U logical link.
+
+
+struct ReadRemoteExtendedFeaturesCommand:
+  -- 7.1.22 Read Remote Extended Features command (v1.2) (BR/EDR)
+  -- HCI_Read_Remote_Extended_Features
+  --
+  -- NOTE on ReturnParams: No Command Complete event will be sent by the
+  -- Controller to indicate that this command has been completed. Instead, the
+  -- Read Remote Extended Features Complete event will indicate that this
+  -- command has been completed.
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               connection_handle
+    -- Connection_Handle (only the lower 12-bits are meaningful).
+    --   Range: 0x0000 to 0x0EFF
+    -- Must be the handle of a connected ACL-U logical link.
+
+  $next [+1]         UInt               page_number
+    -- Page of features to read.
+    -- Values:
+    --  - 0x00 standard features as if requested by Read Remote Supported Features
+    --  - 0x01-0xFF the corresponding features page (see Vol 2, Part C, Sec 3.3).
+
+
+struct ReadRemoteVersionInfoCommand:
+  -- 7.1.23 Read Remote Version Information command (v1.1) (BR/EDR & LE)
+  -- HCI_Read_Remote_Version_Information
+  --
+  -- NOTE on ReturnParams: No Command Complete event will be sent by the
+  -- Controller to indicate that this command has been completed. Instead, the
+  -- Read Remote Version Information Complete event will indicate that this
+  -- command has been completed.
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               connection_handle
+    -- Connection_Handle (only the lower 12-bits are meaningful).
+    --   Range: 0x0000 to 0x0EFF
+
+# 7.1.24 Read Clock Offset command
+# HCI_Read_Clock_Offset
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.1.25 Read LMP Handle command
+# HCI_Read_LMP_Handle
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.1.26 Setup Synchronous Connection command
+# HCI_Setup_Synchronous_Connection
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.1.27 Accept Synchronous Connection Request command
+# HCI_Accept_Synchronous_Connection_Request
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct RejectSynchronousConnectionRequestCommand:
+  -- 7.1.28 Reject Synchronous Connection command (BR/EDR)
+  -- HCI_Reject_Synchronous_Connection_Request
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr         bd_addr
+    -- Address of the remote device that sent the request.
+
+  $next [+1]                          hci.StatusCode     reason
+    -- Reason the connection request was rejected.
+
+
+struct IoCapabilityRequestReplyCommand:
+  -- 7.1.29 IO Capability Request Reply command (v2.1 + EDR) (BR/EDR)
+  -- HCI_IO_Capability_Request_Reply
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader               header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr                      bd_addr
+    -- The BD_ADDR of the remote device involved in simple pairing process
+
+  $next [+1]                          hci.IoCapability                io_capability
+    -- The IO capabilities of this device.
+
+  $next [+1]                          OobDataPresent                  oob_data_present
+    -- Whether there is out-of-band data present, and what type.
+
+  $next [+1]                          hci.AuthenticationRequirements  authentication_requirements
+    -- Authentication requirements of the host.
+
+
+struct UserConfirmationRequestReplyCommand:
+  -- 7.1.30 User Confirmation Request Reply command (v2.1 + EDR) (BR/EDR)
+  -- HCI_User_Confirmation_Request_Reply
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr         bd_addr
+    -- The BD_ADDR of the remote device involved in simple pairing process
+
+
+struct UserConfirmationRequestNegativeReplyCommand:
+  -- 7.1.31 User Confirmation Request Negative Reply command (v2.1 + EDR) (BR/EDR)
+  -- HCI_User_Confirmation_Request_Negative_Reply
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr         bd_addr
+    -- The BD_ADDR of the remote device involved in simple pairing process
+
+
+struct UserPasskeyRequestReplyCommand:
+  -- 7.1.32 User Passkey Request Reply command (v2.1 + EDR) (BR/EDR)
+  -- HCI_User_Passkey_Request_Reply
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr         bd_addr
+    -- The BD_ADDR of the remote device involved in simple pairing process
+
+  $next [+4]                          UInt               numeric_value
+    -- Numeric value (passkey) entered by user.
+    [requires: 0 <= this <= 999999]
+
+
+struct UserPasskeyRequestNegativeReplyCommand:
+  -- 7.1.33 User Passkey Request Negative Reply command (v2.1 + EDR) (BR/EDR)
+  -- HCI_User_Passkey_Request_Negative_Reply
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr         bd_addr
+    -- The BD_ADDR of the remote device involved in the simple pairing process.
+
+# 7.1.34 Remote OOB Data Request Reply command
+# HCI_Remote_OOB_Data_Request_Reply
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.1.35 Remote OOB Data Request Negative Reply command
+# HCI_Remote_OOB_Data_Request_Negative_Reply
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct IoCapabilityRequestNegativeReplyCommand:
+  -- 7.1.36 IO Capability Request Negative Reply command (v2.1 + EDR) (BR/EDR)
+  -- HCI_IO_Capability_Request_Negative_Reply
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr         bd_addr
+    -- The BD_ADDR of the remote device involved in the simple pairing process.
+
+  $next [+1]                          hci.StatusCode     reason
+    -- Reason that Simple Pairing was rejected. See 7.1.36 for valid error codes.
+
+
+struct CodecId:
+  0     [+1]  hci.CodingFormat  coding_format
+  $next [+2]  UInt              company_id
+    -- See assigned numbers.
+
+  $next [+2]  UInt              vendor_codec_id
+    -- Shall be ignored if |coding_format| is not VENDOR_SPECIFIC.
+
+
+struct SynchronousConnectionParameters:
+  -- Enhanced Setup Synchronous Connection Command (CSA2) (BR/EDR)
+
+  struct VendorCodingFormat:
+    0     [+1]  hci.CodingFormat  coding_format
+    $next [+2]  UInt              company_id
+      -- See assigned numbers.
+
+    $next [+2]  UInt              vendor_codec_id
+      -- Shall be ignored if |coding_format| is not VENDOR_SPECIFIC.
+
+  enum ScoRetransmissionEffort:
+    [maximum_bits: 8]
+    NONE              = 0x00
+      -- SCO or eSCO
+
+    POWER_OPTIMIZED   = 0x01
+      -- eSCO only
+
+    QUALITY_OPTIMIZED = 0x02
+      -- eSCO only
+
+    DONT_CARE         = 0xFF
+      -- SCO or eSCO
+
+  0     [+4]         UInt                     transmit_bandwidth
+    -- Transmit bandwidth in octets per second.
+
+  $next [+4]         UInt                     receive_bandwidth
+    -- Receive bandwidth in octets per second.
+
+  let vcf_size = VendorCodingFormat.$size_in_bytes
+
+  $next [+vcf_size]  VendorCodingFormat       transmit_coding_format
+    -- Local Controller -> Remote Controller coding format.
+
+  $next [+vcf_size]  VendorCodingFormat       receive_coding_format
+    -- Remote Controller -> Local Controller coding format.
+
+  $next [+2]         UInt                     transmit_codec_frame_size_bytes
+
+  $next [+2]         UInt                     receive_codec_frame_size_bytes
+
+  $next [+4]         UInt                     input_bandwidth
+    -- Host->Controller data rate in octets per second.
+
+  $next [+4]         UInt                     output_bandwidth
+    -- Controller->Host data rate in octets per second.
+
+  $next [+vcf_size]  VendorCodingFormat       input_coding_format
+    -- Host->Controller coding format.
+
+  $next [+vcf_size]  VendorCodingFormat       output_coding_format
+    -- Controller->Host coding format.
+
+  $next [+2]         UInt                     input_coded_data_size_bits
+    -- Size, in bits, of the sample or framed data.
+
+  $next [+2]         UInt                     output_coded_data_size_bits
+    -- Size, in bits, of the sample or framed data.
+
+  $next [+1]         PcmDataFormat            input_pcm_data_format
+
+  $next [+1]         PcmDataFormat            output_pcm_data_format
+
+  $next [+1]         UInt                     input_pcm_sample_payload_msb_position
+    -- The number of bit positions within an audio sample that the MSB of
+    -- the sample is away from starting at the MSB of the data.
+
+  $next [+1]         UInt                     output_pcm_sample_payload_msb_position
+    -- The number of bit positions within an audio sample that the MSB of
+    -- the sample is away from starting at the MSB of the data.
+
+  $next [+1]         ScoDataPath              input_data_path
+
+  $next [+1]         ScoDataPath              output_data_path
+
+  $next [+1]         UInt                     input_transport_unit_size_bits
+    -- The number of bits in each unit of data received from the Host over the audio data transport.
+    -- 0 indicates "not applicable"  (implied by the choice of audio data transport).
+
+  $next [+1]         UInt                     output_transport_unit_size_bits
+    -- The number of bits in each unit of data sent to the Host over the audio data transport.
+    -- 0 indicates "not applicable"  (implied by the choice of audio data transport).
+
+  $next [+2]         UInt                     max_latency_ms
+    -- The value in milliseconds representing the upper limit of the sum of
+    -- the synchronous interval, and the size of the eSCO window, where the
+    -- eSCO window is the reserved slots plus the retransmission window.
+    -- Minimum: 0x0004
+    -- Don't care: 0xFFFF
+
+  $next [+2]         ScoPacketType            packet_types
+    -- Bitmask of allowed packet types.
+
+  $next [+1]         ScoRetransmissionEffort  retransmission_effort
+
+
+struct EnhancedSetupSynchronousConnectionCommand:
+  -- 7.1.45 Enhanced Setup Synchronous Connection command
+  -- HCI_Enhanced_Setup_Synchronous_Connection
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader                header
+  $next [+2]         UInt                             connection_handle
+    -- The connection handle of the associated ACL link if creating a new (e)SCO connection, or the
+    -- handle of an existing eSCO link if updating connection parameters.
+
+  let scp_size = SynchronousConnectionParameters.$size_in_bytes
+  $next [+scp_size]  SynchronousConnectionParameters  connection_parameters
+
+
+struct EnhancedAcceptSynchronousConnectionRequestCommand:
+  -- 7.1.46 Enhanced Accept Synchronous Connection Request command (CSA2) (BR/EDR)
+  -- HCI_Enhanced_Accept_Synchronous_Connection_Request
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader                header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr                       bd_addr
+    -- The 48-bit BD_ADDR of the remote device requesting the connection.
+
+  let scp_size = SynchronousConnectionParameters.$size_in_bytes
+  $next [+scp_size]                   SynchronousConnectionParameters  connection_parameters
+
+# 7.1.47 Truncated Page command
+# HCI_Truncated_Page
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.1.48 Truncated Page Cancel command
+# HCI_Truncated_Page_Cancel
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.1.49 Set Connectionless Peripheral Broadcast command
+# HCI_Set_Connectionless_Peripheral_Broadcast
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.1.50 Set Connectionless Peripheral Broadcast Receive command
+# HCI_Set_Connectionless_Peripheral_Broadcast_Receive
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.1.51 Start Synchronization Train command
+# HCI_Start_Synchronization_Train
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.1.52 Receive Synchronization Train command
+# HCI_Receive_Synchronization_Train
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.1.53 Remote OOB Extended Data Request Reply command
+# HCI_Remote_OOB_Extended_Data_Request_Reply
+# TODO: b/265052417 - Definition needs to be added
+
+
+# ========== 7.3 Controller & Baseband Commands ==========
+
+
+struct SetEventMaskCommand:
+  -- 7.3.1 Set Event Mask command (v1.1)
+  -- HCI_Set_Event_Mask
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+8]         UInt               event_mask
+    -- 64-bit Bit mask used to control which HCI events are generated by the HCI for the
+    -- Host. See enum class EventMask in hci_constants.h
+
+
+struct ResetCommand:
+  -- 7.3.2 Reset command (v1.1)
+  -- HCI_Reset
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+# 7.3.3 Set Event Filter command
+# HCI_Set_Event_Filter
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.4 Flush command
+# HCI_Flush
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.5 Read PIN Type command
+# HCI_Read_PIN_Type
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.6 Write PIN Type command
+# HCI_Write_PIN_Type
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.8 Read Stored Link Key command
+# HCI_Read_Stored_Link_Key
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.9 Write Stored Link Key command
+# HCI_Write_Stored_Link_Key
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.10 Delete Stored Link Key command
+# HCI_Delete_Stored_Link_Key
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct WriteLocalNameCommand:
+  -- 7.3.11 Write Local Name command (v1.1) (BR/EDR)
+  -- HCI_Write_Local_Name
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]         hci.CommandHeader  header
+  let local_name_size = LocalName.$size_in_bytes
+  $next [+local_name_size]  LocalName          local_name
+    -- A UTF-8 encoded User Friendly Descriptive Name for the device.
+    -- If the name contained in the parameter is shorter than 248 octets, the end
+    -- of the name is indicated by a NULL octet (0x00), and the following octets
+    -- (to fill up 248 octets, which is the length of the parameter) do not have
+    -- valid values.
+
+
+struct ReadLocalNameCommand:
+  -- 7.3.12 Read Local Name command (v1.1) (BR/EDR)
+  -- HCI_Read_Local_Name
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+# 7.3.13 Read Connection Accept Timeout command
+# HCI_Read_Connection_Accept_Timeout
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.14 Write Connection Accept Timeout command
+# HCI_Write_Connection_Accept_Timeout
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.15 Read Page Timeout command
+# HCI_Read_Page_Timeout
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct WritePageTimeoutCommand:
+  -- 7.3.16 Write Page Timeout command (v1.1) (BR/EDR)
+  -- HCI_Write_Page_Timeout
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               page_timeout
+    -- Page_Timeout, in time slices (0.625 ms)
+    -- Range: From MIN to MAX in PageTimeout in this file
+    [requires: 0x0001 <= this <= 0xFFFF]
+
+
+struct ReadScanEnableCommand:
+  -- 7.3.17 Read Scan Enable command (v1.1) (BR/EDR)
+  -- HCI_Read_Scan_Enable
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct WriteScanEnableCommand:
+  -- 7.3.18 Write Scan Enable command (v1.1) (BR/EDR)
+  -- HCI_Write_Scan_Enable
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+1]         ScanEnableBits     scan_enable
+    -- Bit Mask of enabled scans. See enum class ScanEnableBits in this file
+    -- for how to construct this bitfield.
+
+
+struct ReadPageScanActivityCommand:
+  -- 7.3.19 Read Page Scan Activity command (v1.1) (BR/EDR)
+  -- HCI_Read_Page_Scan_Activity
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct WritePageScanActivityCommand:
+  -- 7.3.20 Write Page Scan Activity command (v1.1) (BR/EDR)
+  -- HCI_Write_Page_Scan_Activity
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               page_scan_interval
+    -- Page_Scan_Interval, in time slices (0.625ms)
+    -- Valid Range: MIN - MAX in ScanInterval in this file
+    [requires: 0x0012 <= this <= 0x1000]
+
+  $next [+2]         UInt               page_scan_window
+    -- Page_Scan_Window, in time slices
+    -- Valid Range: MIN - MAX in ScanWindow in this file
+    [requires: 0x0011 <= this <= 0x1000]
+
+
+struct ReadInquiryScanActivityCommand:
+  -- 7.3.21 Read Inquiry Scan Activity command (v1.1) (BR/EDR)
+  -- HCI_Read_Inquiry_Scan_Activity
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct WriteInquiryScanActivityCommand:
+  -- 7.3.22 Write Inquiry Scan Activity command (v1.1) (BR/EDR)
+  -- HCI_Write_Inquiry_Scan_Activity
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               inquiry_scan_interval
+    -- Inquiry_Scan_Interval, in time slices (0.625ms)
+    -- Valid Range: MIN - MAX in ScanInterval in this file
+    [requires: 0x0012 <= this <= 0x1000]
+
+  $next [+2]         UInt               inquiry_scan_window
+    -- Inquiry_Scan_Window, in time slices
+    -- Valid Range: MIN - MAX in ScanWindow in this file
+    [requires: 0x0011 <= this <= 0x1000]
+
+# 7.3.23 Read Authentication Enable command
+# HCI_Read_Authentication_Enable
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.24 Write Authentication Enable command
+# HCI_Write_Authentication_Enable
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct ReadClassOfDeviceCommand:
+  -- 7.3.25 Read Class of Device command (v1.1) (BR/EDR)
+  -- HCI_Read_Class_Of_Device
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct WriteClassOfDeviceCommand:
+  -- 7.3.26 Write Class Of Device command (v1.1) (BR/EDR)
+  -- HCI_Write_Class_Of_Device
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+3]         hci.ClassOfDevice  class_of_device
+
+# 7.3.27 Read Voice Setting command
+# HCI_Read_Voice_Setting
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.28 Write Voice Setting command
+# HCI_Write_Voice_Setting
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.29 Read Automatic Flush Timeout command
+# HCI_Read_Automatic_Flush_Timeout
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct WriteAutomaticFlushTimeoutCommand:
+  -- 7.3.30 Write Automatic Flush Timeout command (v1.1) (BR/EDR)
+  -- HCI_Write_Automatic_Flush_Timeout
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               connection_handle
+    -- Connection_Handle (only the lower 12-bits are meaningful).
+    --   Range: 0x0000 to 0x0EFF
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+2]         UInt               flush_timeout
+    -- The value for the Flush_Timeout configuration parameter (Core Spec v5.2, Vol 4, Part E, Sec 6.19).
+    -- Range: 0x0000 to 0x07FF. 0x0000 indicates infinite flush timeout (no automatic flush).
+    -- Time = flush_timeout * 0.625ms.
+    -- Time Range: 0.625ms to 1279.375ms.
+    [requires: 0x0000 <= this <= 0x07FF]
+
+# 7.3.31 Read Num Broadcast Retransmissions command
+# HCI_Read_Num_Broadcast_Retransmissions
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.32 Write Num Broadcast Retransmissions command
+# HCI_Write_Num_Broadcast_Retransmissions
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.33 Read Hold Mode Activity command
+# HCI_Read_Hold_Mode_Activity
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.34 Write Hold Mode Activity command
+# HCI_Write_Hold_Mode_Activity
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.35 Read Transmit Power Level command
+# HCI_Read_Transmit_Power_Level
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.36 Read Synchronous Flow Control Enable command
+# HCI_Read_Synchronous_Flow_Control_Enable
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct WriteSynchronousFlowControlEnableCommand:
+  -- 7.3.37 Write Synchonous Flow Control Enable command (BR/EDR)
+  -- HCI_Write_Synchronous_Flow_Control_Enable
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader       header
+  $next [+1]         hci.GenericEnableParam  synchronous_flow_control_enable
+    -- If enabled, HCI_Number_Of_Completed_Packets events shall be sent from the controller
+    -- for synchronous connection handles.
+
+# 7.3.38 Set Controller To Host Flow Control command
+# HCI_Set_Controller_To_Host_Flow_Control
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.39 Host Buffer Size command
+# HCI_Host_Buffer_Size
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.40 Host Number Of Completed Packets command
+# HCI_Host_Number_Of_Completed_Packets
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.41 Read Link Supervision Timeout command
+# HCI_Read_Link_Supervision_Timeout
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.42 Write Link Supervision Timeout command
+# HCI_Write_Link_Supervision_Timeout
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.43 Read Number Of Supported IAC command
+# HCI_Read_Number_Of_Supported_IAC
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.44 Read Current IAC LAP command
+# HCI_Read_Current_IAC_LAP
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.45 Write Current IAC LAP command
+# HCI_Write_Current_IAC_LAP
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.46 Set AFH Host Channel Classification command
+# HCI_Set_AFH_Host_Channel_Classification
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.47 Read Inquiry Scan Type command
+# HCI_Read_Inquiry_Scan_Type
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct WriteInquiryScanTypeCommand:
+  -- 7.3.48 Write Inquiry Scan Type (v1.2) (BR/EDR)
+  -- HCI_Write_Inquiry_Scan_Type
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+1]         InquiryScanType    inquiry_scan_type
+    -- See enum class InquiryScanType in this file for possible values
+
+
+struct ReadInquiryModeCommand:
+  -- 7.3.49 Read Inquiry Mode (v1.2) (BR/EDR)
+  -- HCI_Read_Inquiry_Mode
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct WriteInquiryModeCommand:
+  -- 7.3.50 Write Inquiry Mode (v1.2) (BR/EDR)
+  -- HCI_Write_Inquiry_Mode
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+1]         InquiryMode        inquiry_mode
+
+
+struct ReadPageScanTypeCommand:
+  -- 7.3.51 Read Page Scan Type (v1.2) (BR/EDR)
+  -- HCI_Read_Page_Scan_Type
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct WritePageScanTypeCommand:
+  -- 7.3.52 Write Page Scan Type (v1.2) (BR/EDR)
+  -- HCI_Write_Page_Scan_Type
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+1]         PageScanType       page_scan_type
+
+# 7.3.53 Read AFH Channel Assessment Mode command
+# HCI_Read_AFH_Channel_Assessment_Mode
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.54 Write AFH Channel Assessment Mode command
+# HCI_Write_AFH_Channel_Assessment_Mode
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.55 Read Extended Inquiry Response command
+# HCI_Read_Extended_Inquiry_Response
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct WriteExtendedInquiryResponseCommand:
+  -- 7.3.56 Write Extended Inquiry Response (v1.2) (BR/EDR)
+  -- HCI_Write_Extended_Inquiry_Response
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader        header
+  $next [+1]         UInt                     fec_required
+    -- If FEC Encoding is required. (v1.2) (7.3.56)
+
+  let eir_size = ExtendedInquiryResponse.$size_in_bytes
+  $next [+eir_size]  ExtendedInquiryResponse  extended_inquiry_response
+    -- Extended inquiry response data as defined in Vol 3, Part C, Sec 8
+
+# 7.3.57 Refresh Encryption Key command
+# HCI_Refresh_Encryption_Key
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct ReadSimplePairingModeCommand:
+  -- 7.3.58 Read Simple Pairing Mode (v2.1 + EDR) (BR/EDR)
+  -- HCI_Read_Simple_Pairing_Mode
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct WriteSimplePairingModeCommand:
+  -- 7.3.59 Write Simple Pairing Mode (v2.1 + EDR) (BR/EDR)
+  -- HCI_Write_Simple_Pairing_Mode
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader       header
+  $next [+1]         hci.GenericEnableParam  simple_pairing_mode
+
+# 7.3.60 Read Local OOB Data command
+# HCI_Read_Local_OOB_Data
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.61 Read Inquiry Response Transmit Power Level command
+# HCI_Read_Inquiry_Response_Transmit_Power_Level
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.62 Write Inquiry Transmit Power Level command
+# HCI_Write_Inquiry_Transmit_Power_Level
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.63 Send Keypress Notification command
+# HCI_Send_Keypress_Notification
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.64 Read Default Erroneous Data Reporting command
+# HCI_Read_Default_Erroneous_Data_Reporting
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.65 Write Default Erroneous Data Reporting command
+# HCI_Write_Default_Erroneous_Data_Reporting
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.66 Enhanced Flush command
+# HCI_Enhanced_Flush
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct SetEventMaskPage2Command:
+  -- 7.3.69 Set Event Mask Page 2 command (v3.0 + HS)
+  -- HCI_Set_Event_Mask_Page_2
+  0     [+hci.CommandHeader.$size_in_bytes]  hci.CommandHeader  header
+  $next [+8]  bits:
+    0     [+26]                              EventMaskPage2     event_mask_page_2
+      -- Bit mask used to control which HCI events are generated by the HCI for the Host.
+
+
+struct ReadFlowControlModeCommand:
+  -- 7.3.72 Read Flow Control Mode command (v3.0 + HS) (BR/EDR)
+  -- HCI_Read_Flow_Control_Mode
+  0 [+hci.CommandHeader.$size_in_bytes]  hci.CommandHeader  header
+
+
+struct WriteFlowControlModeCommand:
+  -- 7.3.73 Write Flow Control Mode command (v3.0 + HS) (BR/EDR)
+  -- HCI_Write_Flow_Control_Mode
+  0     [+hci.CommandHeader.$size_in_bytes]  hci.CommandHeader  header
+  $next [+1]                                 FlowControlMode    flow_control_mode
+
+# 7.3.74 Read Enhanced Transmit Power Level command
+# HCI_Read_Enhanced_Transmit_Power_Level
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct ReadLEHostSupportCommand:
+  -- 7.3.78 Read LE Host Support command (v4.0) (BR/EDR)
+  -- HCI_Read_LE_Host_Support
+  0 [+hci.CommandHeader.$size_in_bytes]  hci.CommandHeader  header
+
+
+struct WriteLEHostSupportCommand:
+  -- 7.3.79 Write LE Host Support command (v4.0) (BR/EDR)
+  -- HCI_Write_LE_Host_Support
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader       header
+  $next [+1]         hci.GenericEnableParam  le_supported_host
+    -- Sets the LE Supported (Host) Link Manager Protocol feature bit.
+
+  $next [+1]         UInt                    unused
+    -- Core Spec v5.0, Vol 2, Part E, Section 6.35: This parameter was named
+    -- "Simultaneous_LE_Host" and the value is set to "disabled(0x00)" and
+    -- "shall be ignored".
+    -- Core Spec v5.3, Vol 4, Part E, Section 7.3.79: This parameter was renamed
+    -- to "Unused" and "shall be ignored by the controller".
+
+# 7.3.80 Set MWS Channel Parameters command
+# HCI_Set_MWS_Channel_Parameters
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.81 Set External Frame Configuration command
+# HCI_Set_External_Frame_Configuration
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.82 Set MWS Signaling command
+# HCI_Set_MWS_Signaling
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.83 Set MWS Transport Layer command
+# HCI_Set_MWS_Transport_Layer
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.84 Set MWS Scan Frequency Table command
+# HCI_Set_MWS_Scan_Frequency_Table
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.85 Set MWS_PATTERN Configuration command
+# HCI_Set_MWS_PATTERN_Configuration
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.86 Set Reserved LT_ADDR command
+# HCI_Set_Reserved_LT_ADDR
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.87 Delete Reserved LT_ADDR command
+# HCI_Delete_Reserved_LT_ADDR
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.88 Set Connectionless Peripheral Broadcast Data command
+# HCI_Set_Connectionless_Peripheral_Broadcast_Data
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.89 Read Synchronization Train Parameters command
+# HCI_Read_Synchronization_Train_Parameters
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.90 Write Synchronization Train Parameters command
+# HCI_Write_Synchronization_Train_Parameters
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.91 Read Secure Connections Host Support command
+# HCI_Read_Secure_Connections_Host_Support
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct WriteSecureConnectionsHostSupportCommand:
+  -- 7.3.92 Write Secure Connections Host Support command
+  -- HCI_Write_Secure_Connections_Host_Support
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader       header
+  $next [+1]         hci.GenericEnableParam  secure_connections_host_support
+
+
+struct ReadAuthenticatedPayloadTimeoutCommand:
+  -- 7.3.93 Read Authenticated Payload Timeout command (v4.1) (BR/EDR & LE)
+  -- HCI_Read_Authenticated_Payload_Timeout
+  0     [+hci.CommandHeader.$size_in_bytes]  hci.CommandHeader  header
+  $next [+2]                                 UInt               connection_handle
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+
+struct WriteAuthenticatedPayloadTimeoutCommand:
+  -- 7.3.94 Write Authenticated Payload Timeout command (v4.1) (BR/EDR & LE)
+  -- HCI_Write_Authenticated_Payload_Timeout
+  0     [+hci.CommandHeader.$size_in_bytes]  hci.CommandHeader  header
+  $next [+2]                                 UInt               connection_handle
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+2]                                 UInt               authenticated_payload_timeout
+    -- Default = 0x0BB8 (30 s)
+    -- Time = N * 10 ms
+    -- Time Range: 10 ms to 655,350 ms
+    [requires: 0x0001 <= this <= 0xFFFF]
+
+# 7.3.95 Read Local OOB Extended Data command
+# HCI_Read_Local_OOB_Extended_Data
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.96 Read Extended Page Timeout command
+# HCI_Read_Extended_Page_Timeout
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.97 Write Extended Page Timeout command
+# HCI_Write_Extended_Page_Timeout
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.98 Read Extended Inquiry Length command
+# HCI_Read_Extended_Inquiry_Length
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.99 Write Extended Inquiry Length command
+# HCI_Write_Extended_Inquiry_Length
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.100 Set Ecosystem Base Interval command
+# HCI_Set_Ecosystem_Base_Interval
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.101 Configure Data Path command
+# HCI_Configure_Data_Path
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.3.102 Set Min Encryption Key Size command
+# HCI_Set_Min_Encryption_Key_size
+# TODO: b/265052417 - Definition needs to be added
+
+
+# ========== 7.4 Informational Parameters ==========
+
+
+struct ReadLocalVersionInformationCommand:
+  -- 7.4.1 Read Local Version Information command (v1.1)
+  -- HCI_Read_Local_Version_Information
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct ReadLocalSupportedCommandsCommand:
+  -- 7.4.2 Read Local Supported Commands command (v1.2)
+  -- HCI_Read_Local_Supported_Commands
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct ReadLocalSupportedFeaturesCommand:
+  -- 7.4.3 Read Local Supported Features command (v1.1)
+  -- HCI_Read_Local_Supported_Features
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct ReadLocalExtendedFeaturesCommand:
+  -- 7.4.4 Read Local Extended Features command (v1.2) (BR/EDR)
+  -- HCI_Read_Local_Extended_Features
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+1]         UInt               page_number
+    -- 0x00: Requests the normal LMP features as returned by
+    -- Read_Local_Supported_Features.
+    -- 0x01-0xFF: Return the corresponding page of features.
+
+
+struct ReadBufferSizeCommand:
+  -- 7.4.5 Read Buffer Size command (v1.1)
+  -- HCI_Read_Buffer_Size
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct ReadBdAddrCommand:
+  -- 7.4.6 Read BD_ADDR command (v1.1) (BR/EDR, LE)
+  -- HCI_Read_BD_ADDR
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+# 7.4.7 Read Data Block Size command
+# HCI_Read_Data_Block_Size
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.4.8 Read Local Supported Codecs command
+# HCI_Read_Local_Supported_Codecs [v1][v2]
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.4.9 Read Local Simple Pairing Options command
+# HCI_Read_Local_Simple_Pairing_Options
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.4.10 Read Local Supported Codec Capabilities command
+# HCI_Read_Local_Supported_Codec_Capabilities
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.4.11 Read Local Supported Controller Delay command
+# HCI_Read_Local_Supported_Controller_Delay
+# TODO: b/265052417 - Definition needs to be added
+
+
+# ========== 7.5 Status Parameters ==========
+
+
+struct ReadEncryptionKeySizeCommand:
+  -- 7.5.6 Read Encryption Key Size (v1.1) (BR/EDR)
+  -- HCI_Read_Encryption_Key_Size
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               connection_handle
+    -- Identifies an active ACL link (only the lower 12 bits are meaningful).
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+# ========== 7.8 LE Controller Commands ==========
+
+
+struct LESetEventMaskCommand:
+  -- 7.8.1 LE Set Event Mask command (v4.0) (LE)
+  -- HCI_LE_Set_Event_Mask
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+8]  bits:
+    0     [+35]      LEEventMask        le_event_mask
+      -- Bitmask that indicates which LE events are generated by the HCI for the Host.
+
+
+struct LEReadBufferSizeCommandV1:
+  -- 7.8.2 LE Read Buffer Size command [v1] (v4.0) (LE)
+  -- HCI_LE_Read_Buffer_Size [v1]
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct LEReadBufferSizeCommandV2:
+  -- 7.8.2 LE Read Buffer Size command [v2] (v5.2) (LE)
+  -- HCI_LE_Read_Buffer_Size [v2]
+  -- Version 2 of this command changed the opcode and added ISO return
+  -- parameters.
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct LEReadLocalSupportedFeaturesCommand:
+  -- 7.8.3 LE Read Local Supported Features command (v4.0) (LE)
+  -- HCI_LE_Read_Local_Supported_Features
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct LESetRandomAddressCommand:
+  -- 7.8.4 LE Set Random Address command (v4.0) (LE)
+  -- HCI_LE_Set_Random_Address
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr         random_address
+
+
+struct LESetAdvertisingParametersCommand:
+  -- 7.8.5 LE Set Advertising Parameters command (v4.0) (LE)
+  -- HCI_LE_Set_Advertising_Parameters
+
+  [requires: advertising_interval_min <= advertising_interval_max]
+
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+
+  0     [+hdr_size]                   hci.CommandHeader          header
+
+  $next [+2]                          UInt                       advertising_interval_min
+    -- Default: 0x0800 (1.28 s)
+    -- Time: N * 0.625 ms
+    -- Time Range: 20 ms to 10.24 s
+    [requires: 0x0020 <= this <= 0x4000]
+
+  $next [+2]                          UInt                       advertising_interval_max
+    -- Default: 0x0800 (1.28 s)
+    -- Time: N * 0.625 ms
+    -- Time Range: 20 ms to 10.24 s
+    [requires: 0x0020 <= this <= 0x4000]
+
+  $next [+1]                          LEAdvertisingType          adv_type
+    -- Used to determine the packet type that is used for advertising when
+    -- advertising is enabled.
+
+  $next [+1]                          LEOwnAddressType           own_address_type
+
+  $next [+1]                          hci.LEPeerAddressType      peer_address_type
+    -- ANONYMOUS address type not allowed.
+
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr                 peer_address
+    -- Public Device Address, Random Device Address, Public Identity Address, or
+    -- Random (static) Identity Address of the device to be connected.
+
+  $next [+1]  bits:
+
+    0     [+3]                        LEAdvertisingChannels      advertising_channel_map
+      -- Indicates the advertising channels that shall be used when transmitting
+      -- advertising packets. At least 1 channel must be enabled.
+      -- Default: all channels enabled
+
+  $next [+1]                          LEAdvertisingFilterPolicy  advertising_filter_policy
+    -- This parameter shall be ignored when directed advertising is enabled.
+
+
+struct LEReadAdvertisingChannelTxPowerCommand:
+  -- 7.8.6 LE Read Advertising Channel Tx Power command (v4.0) (LE)
+  -- HCI_LE_Read_Advertising_Channel_Tx_Power
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct LESetAdvertisingDataCommand:
+  -- 7.8.7 LE Set Advertising Data command (v4.0) (LE)
+  -- HCI_LE_Set_Advertising_Data
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+1]         UInt               advertising_data_length
+    -- The number of significant octets in `advertising_data`.
+    [requires: 0x00 <= this <= 0x1F]
+
+  $next [+31]        UInt:8[31]         advertising_data
+    -- 31 octets of advertising data formatted as defined in Core Spec
+    -- v5.3, Vol 3, Part C, Section 11.
+    -- Default: All octets zero
+
+
+struct LESetScanResponseDataCommand:
+  -- 7.8.8 LE Set Scan Response Data command (v4.0) (LE)
+  -- HCI_LE_Set_Scan_Response_Data
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+1]         UInt               scan_response_data_length
+    -- The number of significant octets in `scan_response_data`.
+    [requires: 0x00 <= this <= 0x1F]
+
+  $next [+31]        UInt:8[31]         scan_response_data
+    -- 31 octets of scan response data formatted as defined in Core Spec
+    -- v5.3, Vol 3, Part C, Section 11.
+    -- Default: All octets zero
+
+
+struct LESetAdvertisingEnableCommand:
+  -- 7.8.9 LE Set Advertising Enable command (v4.0) (LE)
+  -- HCI_LE_Set_Advertising_Enable
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader       header
+  $next [+1]         hci.GenericEnableParam  advertising_enable
+
+
+struct LESetScanParametersCommand:
+  -- 7.8.10 LE Set Scan Parameters command (v4.0) (LE)
+  -- HCI_LE_Set_Scan_Parameters
+
+  [requires: le_scan_window <= le_scan_interval]
+
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+
+  0     [+hdr_size]  hci.CommandHeader   header
+
+  $next [+1]         LEScanType          le_scan_type
+    -- Controls the type of scan to perform.
+
+  $next [+2]         UInt                le_scan_interval
+    -- Default: 0x0010 (10ms)
+    -- Time: N * 0.625 ms
+    -- Time Range: 2.5 ms to 10.24 s
+    [requires: 0x0004 <= this <= 0x4000]
+
+  $next [+2]         UInt                le_scan_window
+    -- Default: 0x0010 (10ms)
+    -- Time: N * 0.625 ms
+    -- Time Range: 2.5ms to 10.24 s
+    [requires: 0x0004 <= this <= 0x4000]
+
+  $next [+1]         LEOwnAddressType    own_address_type
+    -- The type of address being used in the scan request packets.
+
+  $next [+1]         LEScanFilterPolicy  scanning_filter_policy
+
+
+struct LESetScanEnableCommand:
+  -- 7.8.11 LE Set Scan Enable command (v4.0) (LE)
+  -- HCI_LE_Set_Scan_Enable
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader       header
+  $next [+1]         hci.GenericEnableParam  le_scan_enable
+  $next [+1]         hci.GenericEnableParam  filter_duplicates
+    -- Controls whether the Link Layer should filter out duplicate advertising
+    -- reports to the Host, or if the Link Layer should generate advertising
+    -- reports for each packet received. Ignored if le_scan_enable is set to
+    -- disabled.
+    -- See Core Spec v5.3, Vol 6, Part B, Section 4.4.3.5
+
+
+struct LECreateConnectionCommand:
+  -- 7.8.12 LE Create Connection command (v4.0) (LE)
+  -- HCI_LE_Create_Connection
+
+  [requires: le_scan_window <= le_scan_interval && connection_interval_min <= connection_interval_max]
+
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+
+  0     [+hdr_size]                   hci.CommandHeader       header
+
+  $next [+2]                          UInt                    le_scan_interval
+    -- The time interval from when the Controller started the last LE scan until
+    -- it begins the subsequent LE scan.
+    -- Time: N * 0.625 ms
+    -- Time Range: 2.5 ms to 10.24 s
+    [requires: 0x0004 <= this <= 0x4000]
+
+  $next [+2]                          UInt                    le_scan_window
+    -- Amount of time for the duration of the LE scan.
+    -- Time: N * 0.625 ms
+    -- Time Range: 2.5 ms to 10.24 s
+    [requires: 0x0004 <= this <= 0x4000]
+
+  $next [+1]                          hci.GenericEnableParam  initiator_filter_policy
+
+  $next [+1]                          hci.LEAddressType       peer_address_type
+
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr              peer_address
+
+  $next [+1]                          LEOwnAddressType        own_address_type
+
+  $next [+2]                          UInt                    connection_interval_min
+    -- Time: N * 1.25 ms
+    -- Time Range: 7.5 ms to 4 s.
+    [requires: 0x0006 <= this <= 0x0C80]
+
+  $next [+2]                          UInt                    connection_interval_max
+    -- Time: N * 1.25 ms
+    -- Time Range: 7.5 ms to 4 s.
+    [requires: 0x0006 <= this <= 0x0C80]
+
+  $next [+2]                          UInt                    max_latency
+    -- Maximum Peripheral latency for the connection in number of connection
+    -- events.
+    [requires: 0x0000 <= this <= 0x01F3]
+
+  $next [+2]                          UInt                    supervision_timeout
+    -- See Core Spec v5.3, Vol 6, Part B, Section 4.5.2.
+    -- Time: N * 10 ms
+    -- Time Range: 100 ms to 32 s
+    [requires: 0x000A <= this <= 0x0C80]
+
+  $next [+2]                          UInt                    min_connection_event_length
+    -- Time: N * 0.625 ms
+
+  $next [+2]                          UInt                    max_connection_event_length
+    -- Time: N * 0.625 ms
+
+
+struct LECreateConnectionCancelCommand:
+  -- 7.8.13 LE Create Connection Cancel command (v4.0) (LE)
+  -- HCI_LE_Create_Connection_Cancel
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+# 7.8.14 LE Read Filter Accept List Size command
+# HCI_LE_Read_Filter_Accept_List_Size
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct LEClearFilterAcceptListCommand:
+  -- 7.8.15 LE Clear Filter Accept List command (v4.0) (LE)
+  -- HCI_LE_Clear_Filter_Accept_List
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct LEAddDeviceToFilterAcceptListCommand:
+  -- 7.8.16 LE Add Device To Filter Accept List command (v4.0) (LE)
+  -- HCI_LE_Add_Device_To_Filter_Accept_List
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader      header
+  $next [+1]                          hci.LEPeerAddressType  address_type
+    -- The address type of the peer.
+
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr             address
+    -- Public Device Address or Random Device Address of the device to be added
+    -- to the Filter Accept List. Ignored if `address_type` is ANONYMOUS.
+
+
+struct LERemoveDeviceFromFilterAcceptListCommand:
+  -- 7.8.17 LE Remove Device From Filter Accept List command (v4.0) (LE)
+  -- HCI_LE_Remove_Device_From_Filter_Accept_List
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader      header
+  $next [+1]                          hci.LEPeerAddressType  address_type
+    -- The address type of the peer.
+
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr             address
+    -- Public Device Address or Random Device Address of the device to be added
+    -- to the Filter Accept List. Ignored if `address_type` is ANONYMOUS.
+
+
+struct LEConnectionUpdateCommand:
+  -- 7.8.18 LE Connection Update command (v4.0) (LE)
+  -- HCI_LE_Connection_Update
+
+  [requires: connection_interval_min <= connection_interval_max && min_connection_event_length <= max_connection_event_length]
+
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+
+  0     [+hdr_size]  hci.CommandHeader  header
+
+  $next [+2]         UInt               connection_handle
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+2]         UInt               connection_interval_min
+    -- Time: N * 1.25 ms
+    -- Time Range: 7.5 ms to 4 s.
+    [requires: 0x0006 <= this <= 0x0C80]
+
+  $next [+2]         UInt               connection_interval_max
+    -- Time: N * 1.25 ms
+    -- Time Range: 7.5 ms to 4 s.
+    [requires: 0x0006 <= this <= 0x0C80]
+
+  $next [+2]         UInt               max_latency
+    -- Maximum Peripheral latency for the connection in number of subrated
+    -- connection events.
+    [requires: 0x0000 <= this <= 0x01F3]
+
+  $next [+2]         UInt               supervision_timeout
+    -- See Core Spec v5.3, Vol 6, Part B, Section 4.5.2.
+    -- Time: N * 10 ms
+    -- Time Range: 100 ms to 32 s
+    [requires: 0x000A <= this <= 0x0C80]
+
+  $next [+2]         UInt               min_connection_event_length
+    -- Time: N * 0.625 ms
+
+  $next [+2]         UInt               max_connection_event_length
+    -- Time: N * 0.625 ms
+
+# 7.8.19 LE Set Host Channel Classification command
+# HCI_LE_Set_Host_Channel_Classification
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.20 LE Read Channel Map command
+# HCI_LE_Read_Channel_Map
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct LEReadRemoteFeaturesCommand:
+  -- 7.8.21 LE Read Remote Features command (v4.0) (LE)
+  -- HCI_LE_Read_Remote_Features
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               connection_handle
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+# 7.8.22 LE Encrypt command
+# HCI_LE_Encrypt
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.23 LE Rand command
+# HCI_LE_Rand
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct LEEnableEncryptionCommand:
+  -- 7.8.24 LE Enable Encryption command (v4.0) (LE)
+  -- HCI_LE_Enable_Encryption
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                    hci.CommandHeader  header
+  $next [+2]                           UInt               connection_handle
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+8]                           UInt               random_number
+  $next [+2]                           UInt               encrypted_diversifier
+  $next [+hci.LinkKey.$size_in_bytes]  hci.LinkKey        long_term_key
+
+
+struct LELongTermKeyRequestReplyCommand:
+  -- 7.8.25 LE Long Term Key Request Reply command (v4.0) (LE)
+  -- HCI_LE_Long_Term_Key_Request_Reply
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                    hci.CommandHeader  header
+  $next [+2]                           UInt               connection_handle
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+hci.LinkKey.$size_in_bytes]  hci.LinkKey        long_term_key
+
+
+struct LELongTermKeyRequestNegativeReplyCommand:
+  -- 7.8.26 LE Long Term Key Request Negative Reply command (v4.0) (LE)
+  -- HCI_LE_Long_Term_Key_Request_Negative_Reply
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               connection_handle
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+
+struct LEReadSupportedStatesCommand:
+  -- 7.8.27 LE Read Supported States command (v4.0) (LE)
+  -- HCI_LE_Read_Supported_States
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+# 7.8.28 LE Receiver Test command
+# HCI_LE_Receiver_Test [v1] [v2] [v3]
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.29 LE Transmitter Test command
+# HCI_LE_Transmitter_Test [v1] [v2] [v3] [v4]
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.30 LE Test End command
+# HCI_LE_Test_End
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.31 LE Remote Connection Parameter Request Reply command
+# HCI_LE_Remote_Connection_Parameter_Request_Reply
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.32 LE Remote Connection Parameter Request Negative Reply command
+# HCI_LE_Remote_Connection_Parameter_Request_Negative_Reply
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.33 LE Set Data Length command
+# HCI_LE_Set_Data_Length
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.34 LE Read Suggested Default Data Length command
+# HCI_LE_Read_Suggested_Default_Data_Length
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.35 LE Write Suggested Default Data Length command
+# HCI_LE_Write_Suggested_Default_Data_Length
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.36 LE Read Local P-256 Public Key command
+# HCI_LE_Read_Local_P-256_Public_Key
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.37 LE Generate DHKey command
+# HCI_LE_Generate_DHKey [v1] [v2]
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.38 LE Add Device To Resolving List command
+# HCI_LE_Add_Device_To_Resolving_List
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.39 LE Remove Device From Resolving List command
+# HCI_LE_Remove_Device_From_Resolving_List
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct LEClearResolvingListCommand:
+  -- 7.8.40 LE Clear Resolving List command (v4.2) (LE)
+  -- HCI_LE_Clear_Resolving_List
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+# 7.8.41 LE Read Resolving List Size command
+# HCI_LE_Read_Resolving_List_Size
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.42 LE Read Peer Resolvable Address command
+# HCI_LE_Read_Peer_Resolvable_Address
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.43 LE Read Local Resolvable Address command
+# HCI_LE_Read_Local_Resolvable_Address
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct LESetAddressResolutionEnableCommand:
+  -- 7.8.44 LE Set Address Resolution Enable command (v4.2) (LE)
+  -- HCI_LE_Set_Address_Resolution_Enable
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader       header
+  $next [+1]         hci.GenericEnableParam  address_resolution_enable
+
+# 7.8.45 LE Set Resolvable Private Address Timeout command
+# HCI_LE_Set_Resolvable_Private_Address_Timeout
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.46 LE Read Maximum Data Length command
+# HCI_LE_Read_Maximum_Data_Length
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.47 LE Read PHY command
+# HCI_LE_Read_PHY
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.48 LE Set Default PHY command
+# HCI_LE_Set_Default_PHY
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.49 LE Set PHY command
+# HCI_LE_Set_PHY
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct LESetAdvertisingSetRandomAddressCommand:
+  -- 7.8.52 LE Set Advertising Set Random Address command (v5.0) (LE)
+  -- HCI_LE_Set_Advertising_Set_Random_Address
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader  header
+  $next [+1]                          UInt               advertising_handle
+    -- Handle used to identify an advertising set.
+
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr         random_address
+    -- The random address to use in the advertising PDUs.
+
+
+struct LESetExtendedAdvertisingParametersV1Command:
+  -- 7.8.53 LE Set Extended Advertising Parameters [v1] command (v5.0) (LE)
+  -- HCI_LE_Set_Extended_Advertising_Parameters [v1]
+
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+
+  0     [+hdr_size]                   hci.CommandHeader              header
+
+  $next [+1]                          UInt                           advertising_handle
+    -- Handle used to identify an advertising set.
+
+  $next [+2]  bits:
+
+    0     [+7]                        LEAdvertisingEventProperties   advertising_event_properties
+
+  $next [+3]                          UInt                           primary_advertising_interval_min
+    -- Time = N * 0.625 s
+    -- Time Range: 20 ms to 10,485.759375 s
+    [requires: 0x000020 <= this]
+
+  $next [+3]                          UInt                           primary_advertising_interval_max
+    -- Time = N * 0.625 s
+    -- Time Range: 20 ms to 10,485.759375 s
+    [requires: 0x000020 <= this]
+
+  $next [+1]  bits:
+
+    0     [+3]                        LEAdvertisingChannels          primary_advertising_channel_map
+
+  $next [+1]                          LEOwnAddressType               own_address_type
+
+  $next [+1]                          hci.LEPeerAddressTypeNoAnon    peer_address_type
+
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr                     peer_address
+    -- Public Device Address, Random Device Address, Public Identity Address, or Random (static)
+    -- Identity Address of the device to be connected.
+
+  $next [+1]                          LEAdvertisingFilterPolicy      advertising_filter_policy
+
+  $next [+1]                          Int                            advertising_tx_power
+    -- Range: -127 <= N <= +126
+    -- Units: dBm
+    -- If N = 127: Host has no preference.
+    [requires: -127 <= this]
+
+  $next [+1]                          hci.LEPrimaryAdvertisingPHY    primary_advertising_phy
+    -- LEPHY::kLE2M and LEPHY::kLECodedS2 are excluded.
+
+  $next [+1]                          UInt                           secondary_advertising_max_skip
+    -- Maximum advertising events the controller can skip before sending the AUX_ADV_IND packets on
+    -- the secondary advertising physical channel. If this value is zero, AUX_ADV_IND shall be sent
+    -- prior to the next advertising event.
+
+  $next [+1]                          hci.LESecondaryAdvertisingPHY  secondary_advertising_phy
+
+  $next [+1]                          UInt                           advertising_sid
+    -- Value of the Advertising SID subfield in the ADI field of the PDU
+    [requires: 0x00 <= this <= 0x0F]
+
+  $next [+1]                          hci.GenericEnableParam         scan_request_notification_enable
+
+# TODO: b/265052417 - LE Set Extended Advertising Parameters [v2] definition needs to be added
+
+
+struct LESetExtendedAdvertisingDataCommand:
+  -- 7.8.54 LE Set Extended Advertising Data command (v5.0) (LE)
+  -- HCI_LE_Set_Extended_Advertising_Data
+
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+
+  0     [+hdr_size]  hci.CommandHeader                header
+
+  $next [+1]         UInt                             advertising_handle
+    -- Handle used to identify an advertising set.
+
+  $next [+1]         LESetExtendedAdvDataOp           operation
+
+  $next [+1]         LEExtendedAdvFragmentPreference  fragment_preference
+    -- Provides a hint to the Controller as to whether advertising data should be fragmented.
+
+  $next [+1]         UInt                             advertising_data_length (sz)
+    -- Length of the advertising data included in this command packet, up to
+    -- kMaxLEExtendedAdvertisingDataLength bytes. If the advertising set uses legacy advertising
+    -- PDUs that support advertising data then this shall not exceed kMaxLEAdvertisingDataLength
+    -- bytes.
+    [requires: 0 <= this <= 251]
+
+  $next [+sz]        UInt:8[sz]                       advertising_data
+    -- Variable length advertising data.
+
+
+struct LESetExtendedScanResponseDataCommand:
+  -- 7.8.55 LE Set Extended Scan Response Data command (v5.0) (LE)
+  -- HCI_LE_Set_Extended_Scan_Response_Data
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader                header
+  $next [+1]         UInt                             advertising_handle
+    -- Used to identify an advertising set
+    [requires: 0x00 <= this <= 0xEF]
+
+  $next [+1]         LESetExtendedAdvDataOp           operation
+  $next [+1]         LEExtendedAdvFragmentPreference  fragment_preference
+    -- Provides a hint to the controller as to whether advertising data should be fragmented
+
+  $next [+1]         UInt                             scan_response_data_length (sz)
+    -- The number of octets in the scan_response_data parameter
+    [requires: 0 <= this <= 251]
+
+  $next [+sz]        UInt:8[sz]                       scan_response_data
+    -- Scan response data formatted as defined in Core Spec v5.4, Vol 3, Part C, Section 11
+
+
+struct LESetExtendedAdvertisingEnableData:
+  -- Data fields for variable-length portion of an LE Set Extended Advertising Enable command
+  0     [+1]  UInt  advertising_handle
+  $next [+2]  UInt  duration
+  $next [+1]  UInt  max_extended_advertising_events
+
+
+struct LESetExtendedAdvertisingEnableCommand:
+  -- 7.8.56 LE Set Extended Advertising Enable command (v5.0) (LE)
+  -- HCI_LE_Set_Extended_Advertising_Enable
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader                     header
+  $next [+1]                          hci.GenericEnableParam                enable
+  $next [+1]                          UInt                                  num_sets
+  let single_data_size = LESetExtendedAdvertisingEnableData.$size_in_bytes
+  $next [+single_data_size*num_sets]  LESetExtendedAdvertisingEnableData[]  data
+
+
+struct LEReadMaxAdvertisingDataLengthCommand:
+  -- 7.8.57 LE Read Maximum Advertising Data Length command (v5.0) (LE)
+  -- HCI_LE_Read_Maximum_Advertising_Data_Length
+  -- This command has no parameters
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct LEReadNumSupportedAdvertisingSetsCommand:
+  -- 7.8.58 LE Read Number of Supported Advertising Sets command (v5.0) (LE)
+  -- HCI_LE_Read_Number_of_Supported_Advertising_Sets
+  -- This command has no parameters
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct LERemoveAdvertisingSetCommand:
+  -- 7.8.59 LE Remove Advertising Set command (v5.0) (LE)
+  -- HCI_LE_Remove_Advertising_Set
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+1]         UInt               advertising_handle
+
+
+struct LEClearAdvertisingSetsCommand:
+  -- 7.8.60 LE Clear Advertising Sets command (v5.0) (LE)
+  -- HCI_LE_Clear_Advertising_Sets
+  -- This command has no parameters
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+# 7.8.61 LE Set Periodic Advertising Parameters command
+# HCI_LE_Set_Periodic_Advertising_Parameters [v1] [v2]
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.62 LE Set Periodic Advertising Data command
+# HCI_LE_Set_Periodic_Advertising_Data
+# TODO: b/265052417 - Definition needs to be added
+
+
+# 7.8.63 LE Set Periodic Advertising Enable command
+# HCI_LE_Set_Periodic_Advertising_Enable
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct LESetExtendedScanParametersData:
+  -- Data fields for variable-length portion of an LE Set Extneded Scan Parameters command
+
+  0     [+1]  LEScanType  scan_type
+
+  $next [+2]  UInt        scan_interval
+    -- Time interval from when the Controller started its last scan until it begins the subsequent
+    -- scan on the primary advertising physical channel.
+    -- Time = N × 0.625 ms
+    -- Time Range: 2.5 ms to 40.959375 s
+    [requires: 0x0004 <= this]
+
+  $next [+2]  UInt        scan_window
+    -- Duration of the scan on the primary advertising physical channel.
+    -- Time = N × 0.625 ms
+    -- Time Range: 2.5 ms to 40.959375 s
+    [requires: 0x0004 <= this]
+
+
+struct LESetExtendedScanParametersCommand(num_entries: UInt:8):
+  -- 7.8.64 LE Set Extended Scan Parameters command (v5.0) (LE)
+  -- HCI_LE_Set_Extended_Scan_Parameters
+  -- num_entries corresponds to the number of bits set in the |scanning_phys| field
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]            hci.CommandHeader                             header
+  $next [+1]                   LEOwnAddressType                              own_address_type
+  $next [+1]                   LEScanFilterPolicy                            scanning_filter_policy
+  $next [+1]                   LEScanPHYBits                                 scanning_phys
+  let single_entry_size = LESetExtendedScanParametersData.$size_in_bytes
+  let total_entries_size = num_entries*single_entry_size
+  $next [+total_entries_size]  LESetExtendedScanParametersData[num_entries]  data
+    -- Indicates the type of address being used in the scan request packets (for active scanning).
+
+
+struct LESetExtendedScanEnableCommand:
+  -- 7.8.65 LE Set Extended Scan Enable command (v5.0) (LE)
+  -- HCI_LE_Set_Extended_Scan_Enable
+
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+
+  0     [+hdr_size]  hci.CommandHeader                   header
+
+  $next [+1]         hci.GenericEnableParam              scanning_enabled
+
+  $next [+1]         LEExtendedDuplicateFilteringOption  filter_duplicates
+    -- See enum class LEExtendedDuplicateFilteringOption in this file for possible values
+
+  $next [+2]         UInt                                duration
+    -- Possible values:
+    --   0x0000: Scan continuously until explicitly disabled
+    --   0x0001-0xFFFF: Scan duration, where:
+    --     Time = N * 10 ms
+    --     Time Range: 10 ms to 655.35 s
+
+  $next [+2]         UInt                                period
+    -- Possible values:
+    --   0x0000: Periodic scanning disabled (scan continuously)
+    --   0x0001-0xFFFF: Time interval from when the Controller started its last
+    --   Scan_Duration until it begins the subsequent Scan_Duration, where:
+    --     Time = N * 1.28 sec
+    --     Time Range: 1.28 s to 83,884.8 s
+
+# 7.8.66 LE Extended Create Connection command
+# HCI_LE_Extended_Create_Connection [v1] [v2]
+# TODO: b/265052417 - Definition needs to be added
+
+
+struct LEPeriodicAdvertisingCreateSyncCommand:
+  -- 7.8.67 LE Periodic Advertising Create Sync command (v5.0) (LE)
+  -- HCI_LE_Periodic_Advertising_Create_Sync
+
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+
+  0     [+hdr_size]                   hci.CommandHeader                       header
+
+  $next [+1]                          LEPeriodicAdvertisingCreateSyncOptions  options
+
+  $next [+1]                          UInt                                    advertising_sid
+    -- Advertising SID subfield in the ADI field used to identify the Periodic Advertising
+    [requires: 0x00 <= this <= 0x0F]
+
+  $next [+1]                          LEPeriodicAdvertisingAddressType        advertiser_address_type
+
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr                              advertiser_address
+    -- Public Device Address, Random Device Address, Public Identity Address, or Random (static)
+    -- Identity Address of the advertiser
+
+  $next [+2]                          UInt                                    skip
+    -- The maximum number of periodic advertising events that can be skipped after a successful
+    -- receive
+    [requires: 0x0000 <= this <= 0x01F3]
+
+  $next [+2]                          UInt                                    sync_timeout
+    -- Synchronization timeout for the periodic advertising.
+    -- Time = N * 10 ms
+    -- Time Range: 100 ms to 163.84 s
+    [requires: 0x000A <= this <= 0x4000]
+
+  $next [+1]                          LEPeriodicAdvertisingSyncCTEType        sync_cte_type
+    -- Constant Tone Extension sync options
+
+
+struct LEPeriodicAdvertisingCreateSyncCancel:
+  -- 7.8.68 LE Periodic Advertising Create Sync Cancel command (v5.0) (LE)
+  -- HCI_LE_Periodic_Advertising_Create_Sync_Cancel
+  -- Note that this command has no arguments
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct LEPeriodicAdvertisingTerminateSyncCommand:
+  -- 7.8.69 LE Periodic Advertising Terminate Sync command (v5.0) (LE)
+  -- HCI_LE_Periodic_Advertising_Terminate_Sync
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               sync_handle
+    -- Identifies the periodic advertising train
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+
+struct LEAddDeviceToPeriodicAdvertiserListCommand:
+  -- 7.8.70 LE Add Device To Periodic Advertiser List command (v5.0) (LE)
+  -- HCI_LE_Add_Device_To_Periodic_Advertiser_List
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader  header
+  $next [+1]                          hci.LEAddressType  advertiser_address_type
+    -- Address type of the advertiser. The LEAddressType::kPublicIdentity and
+    -- LEAddressType::kRandomIdentity values are excluded for this command.
+
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr         advertiser_address
+    -- Public Device Address, Random Device Address, Public Identity Address, or
+    -- Random (static) Identity Address of the advertiser.
+
+  $next [+1]                          UInt               advertising_sid
+    -- Advertising SID subfield in the ADI field used to identify the Periodic
+    -- Advertising.
+
+
+struct LERemoveDeviceFromPeriodicAdvertiserListCommand:
+  -- 7.8.71 LE Remove Device From Periodic Advertiser List command (v5.0) (LE)
+  -- HCI_LE_Remove_Device_From_Periodic_Advertiser_List
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader  header
+  $next [+1]                          UInt               advertiser_address_type
+    -- Address type of the advertiser. The LEAddressType::kPublicIdentity and
+    -- LEAddressType::kRandomIdentity values are excluded for this command.
+
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr         advertiser_address
+    -- Public Device Address, Random Device Address, Public Identity Address, or
+    -- Random (static) Identity Address of the advertiser.
+
+  $next [+1]                          UInt               advertising_sid
+    -- Advertising SID subfield in the ADI field used to identify the Periodic
+    -- Advertising.
+
+
+struct LEClearPeriodicAdvertiserListCommand:
+  -- 7.8.72 LE Clear Periodic Advertiser List command (v5.0) (LE)
+  -- HCI_LE_Clear_Periodic_Advertiser_List
+  -- Note that this command has no arguments
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct LEReadPeriodicAdvertiserListSizeCommand:
+  -- 7.8.73 LE Read Periodic Advertiser List Size command (v5.0) (LE)
+  -- HCI_LE_Read_Periodic_Advertiser_List_Size
+  -- Note that this command has no arguments
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct LEReadTransmitPowerCommand:
+  -- 7.8.74 LE Read Transmit Power command (v5.0) (LE)
+  -- HCI_LE_Read_Transmit_Power
+  -- Note that this command has no arguments
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct LEReadRFPathCompensationCommand:
+  -- 7.8.75 LE Read RF Path Compensation command (v5.0) (LE)
+  -- HCI_LE_Read_RF_Path_Compensation
+  -- Note that this command has no arguments
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0 [+hdr_size]  hci.CommandHeader  header
+
+
+struct LEWriteRFPathCompensationCommand:
+  -- 7.8.76 LE Write RF Path Compensation command (v5.0) (LE)
+  -- HCI_LE_Write_RF_Path_Compensation
+  -- Values provided are used in the Tx Power Level and RSSI calculation.
+  --   Range: -128.0 dB (0xFB00) ≤ N ≤ 128.0 dB (0x0500)
+  --   Units: 0.1 dB
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         Int                rf_tx_path_compensation_value
+    [requires: -1280 <= this <= 1280]
+
+  $next [+2]         Int                rf_rx_path_compensation_value
+    [requires: -1280 <= this <= 1280]
+
+
+struct LESetPrivacyModeCommand:
+  -- 7.8.77 LE Set Privacy Mode command (v5.0) (LE)
+  -- HCI_LE_Set_Privacy_Mode
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandHeader            header
+  $next [+1]                          hci.LEPeerAddressTypeNoAnon  peer_identity_address_type
+    -- The peer identity address type (either Public Identity or Private
+    -- Identity).
+
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr                   peer_identity_address
+    -- Public Identity Address or Random (static) Identity Address of the
+    -- advertiser.
+
+  $next [+1]                          LEPrivacyMode                privacy_mode
+    -- The privacy mode to be used for the given entry on the resolving list.
+
+# 7.8.93 [No longer used]
+# 7.8.94 LE Modify Sleep Clock Accuracy command
+# 7.8.95 [No longer used]
+
+
+struct LEReadISOTXSyncCommand:
+  -- 7.8.96 LE Read ISO TX Sync command (v5.2) (LE)
+  -- HCI_LE_Read_ISO_TX_Sync
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               connection_handle
+    -- Connection handle of the CIS or BIS
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+
+struct LESetCIGParametersCommand:
+  -- 7.8.97 LE Set CIG Parameters command (v5.2) (LE)
+  -- HCI_LE_Set_CIG_Parameters
+
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+
+  0     [+hdr_size]                hci.CommandHeader                        header
+
+  $next [+1]                       UInt                                     cig_id
+    -- Used to identify the CIG
+    [requires: 0x00 <= this <= 0xEF]
+
+  $next [+3]                       UInt                                     sdu_interval_c_to_p
+    -- The interval, in microseconds, of periodic SDUs (Central => Peripheral)
+    [requires: 0x0000FF <= this <= 0x0FFFFF]
+
+  $next [+3]                       UInt                                     sdu_interval_p_to_c
+    -- The interval, in microseconds, of periodic SDUs (Peripheral => Central)
+    [requires: 0x0000FF <= this <= 0x0FFFFF]
+
+  $next [+1]                       LESleepClockAccuracyRange                worst_case_sca
+    -- Worst-case sleep clock accuracy of all Peripherals that will participate in the CIG
+
+  $next [+1]                       LECISPacking                             packing
+    -- Preferred method of arranging subevents of multiple CISes
+
+  $next [+1]                       LECISFraming                             framing
+    -- Format of the CIS Data PDUs
+
+  $next [+2]                       UInt                                     max_transport_latency_c_to_p
+    -- Maximum transport latency, in milliseconds, from the Central's Controller to the
+    -- Peripheral's Controller
+    [requires: 0x0005 <= this <= 0x0FA0]
+
+  $next [+2]                       UInt                                     max_transport_latency_p_to_c
+    -- Maximum transport latency, in milliseconds, from the Peripheral's Controller to the
+    -- Central's Controller
+    [requires: 0x0005 <= this <= 0x0FA0]
+
+  $next [+1]                       UInt                                     cis_count
+    -- Total number of CIS configurations in the CIG being added or modified
+    [requires: 0x00 <= this <= 0x1F]
+
+  let single_cis_options_size = LESetCIGParametersCISOptions.$size_in_bytes
+
+  let total_cis_options_size = cis_count*single_cis_options_size
+
+  $next [+total_cis_options_size]  LESetCIGParametersCISOptions[cis_count]  cis_options
+    -- Array of parameters, one for each of the CISes in this CIG
+
+# 7.8.98 LE Set CIG Parameters Test command
+
+
+struct LECreateCISCommand:
+  -- 7.8.99 LE Create CIS command (v5.2) (LE)
+  -- HCI_LE_Create_CIS
+  struct ConnectionInfo:
+    -- Handles for each stream being created
+
+    0     [+2]  UInt  cis_connection_handle
+      -- Connection handle of a CIS
+      [requires: 0x0000 <= this <= 0xEFFF]
+
+    $next [+2]  UInt  acl_connection_handle
+      -- Connection handle of an ACL connection
+      [requires: 0x0000 <= this <= 0xEFFF]
+
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]               hci.CommandHeader          header
+  $next [+1]                      UInt                       cis_count
+    -- Total number of CISes to be created
+    [requires: 0x01 <= this <= 0x1F]
+
+  let single_cis_params_size = ConnectionInfo.$size_in_bytes
+  let total_cis_params_size = cis_count*single_cis_params_size
+  $next [+total_cis_params_size]  ConnectionInfo[cis_count]  cis_connection_info
+    -- Connection handle information for the CIS(es) being created
+
+
+struct LERemoveCIGCommand:
+  -- 7.8.100 LE Remove CIG command (v5.2) (LE)
+  -- HCI_LE_Remove_CIG
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+1]         UInt               cig_id
+    -- Identifier of a CIG
+    [requires: 0x00 <= this <= 0xEF]
+
+
+struct LEAcceptCISRequestCommand:
+  -- 7.8.101 LE Accept CIS Request command (v5.2) (LE)
+  -- HCI_LE_Accept_CIS_Request
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               connection_handle
+    -- Connection handle of the CIS
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+
+struct LERejectCISRequestCommand:
+  -- 7.8.102 LE Reject CIS Request command (v5.2) (LE)
+  -- HCI_LE_Reject_CIS_Request
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               connection_handle
+    -- Connection handle of the CIS
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+1]         hci.StatusCode     reason
+    -- Reason the CIS request was rejected
+
+# 7.8.103 LE Create BIG command
+# 7.8.104 LE Create BIG Test command
+# 7.8.105 LE Terminate BIG command
+# 7.8.106 LE BIG Create Sync command
+# 7.8.107 LE BIG Terminate Sync command
+
+
+struct LERequestPeerSCACommand:
+  -- 7.8.108 LE Request Peer SCA command
+  -- HCI_LE_Request_Peer_SCA
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               connection_handle
+    -- Connection handle of the ACL
+    [requires: 0x0000 <= this <= 0xEFF]
+
+
+struct LESetupISODataPathCommand:
+  -- 7.8.109 LE Setup ISO Data Path command
+  -- HCI_LE_Setup_ISO_Data_Path
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]                    hci.CommandHeader                   header
+  $next [+2]                           UInt                                connection_handle
+    -- Connection handle of the CIS or BIS
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+1]                           DataPathDirection                   data_path_direction
+    -- Specifies the direction for which the data path is being configured
+
+  $next [+1]                           UInt                                data_path_id
+    -- Data transport path used (0x00 for HCI).
+    [requires: 0x00 <= this <= 0xFE]
+
+  let vcf_size = CodecId.$size_in_bytes
+  $next [+vcf_size]                    CodecId                             codec_id
+    -- Codec to be used
+
+  $next [+3]                           UInt                                controller_delay
+    -- Controller delay in microseconds (0s to 4s)
+    [requires: 0x000000 <= this <= 0x3D0900]
+
+  $next [+1]                           UInt                                codec_configuration_length
+    -- Length of codec configuration
+
+  $next [+codec_configuration_length]  UInt:8[codec_configuration_length]  codec_configuration
+    -- Codec-specific configuration data
+
+
+struct LERemoveISODataPathCommand:
+  -- 7.8.110 LE Remove ISO Data Path command
+  -- HCI_LE_Remove_ISO_Data_Path
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+2]         UInt               connection_handle
+    -- Connection handle of the CIS or BIS
+    [requires: 0x0000 <= this <= 0x0EFFF]
+
+  $next [+1]  bits:
+    0     [+1]       Flag               remove_input_data_path
+    $next [+1]       Flag               remove_output_data_path
+    $next [+6]       UInt               padding
+
+# 7.8.111 LE ISO Transmit Test command
+# 7.8.112 LE ISO Receive Test command
+# 7.8.113 LE ISO Read Test Counters command
+# 7.8.114 LE ISO Test End command
diff --git a/pw_bluetooth/public/pw_bluetooth/hci_common.emb b/pw_bluetooth/public/pw_bluetooth/hci_common.emb
new file mode 100644
index 0000000..7aeb4e6
--- /dev/null
+++ b/pw_bluetooth/public/pw_bluetooth/hci_common.emb
@@ -0,0 +1,487 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# This file contains Emboss definitions for Host Controller Interface packets
+# and types found in the Bluetooth Core Specification. The Emboss compiler is
+# used to generate a C++ header from this file.
+
+[$default byte_order: "LittleEndian"]
+[(cpp) namespace: "pw::bluetooth::emboss"]
+# =========================== Common Definitions =================================
+
+
+enum StatusCode:
+  -- HCI Error Codes. Refer to Core Spec v5.0, Vol 2, Part D for definitions and
+  -- descriptions. All enum values are in increasing numerical order, however the
+  -- values are listed below for clarity.
+  [maximum_bits: 8]
+  SUCCESS                                           = 0x00
+  UNKNOWN_COMMAND                                   = 0x01
+  UNKNOWN_CONNECTION_ID                             = 0x02
+  HARDWARE_FAILURE                                  = 0x03
+  PAGE_TIMEOUT                                      = 0x04
+  AUTHENTICATION_FAILURE                            = 0x05
+  PIN_OR_KEY_MISSING                                = 0x06
+  MEMORY_CAPACITY_EXCEEDED                          = 0x07
+  CONNECTION_TIMEOUT                                = 0x08
+  CONNECTION_LIMIT_EXCEEDED                         = 0x09
+  SYNCHRONOUS_CONNECTION_LIMIT_EXCEEDED             = 0x0A
+  CONNECTION_ALREADY_EXISTS                         = 0x0B
+  COMMAND_DISALLOWED                                = 0x0C
+  CONNECTION_REJECTED_LIMITED_RESOURCES             = 0x0D
+  CONNECTION_REJECTED_SECURITY                      = 0x0E
+  CONNECTION_REJECTED_BAD_BD_ADDR                   = 0x0F
+  CONNECTION_ACCEPT_TIMEOUT_EXCEEDED                = 0x10
+  UNSUPPORTED_FEATURE_OR_PARAMETER                  = 0x11
+  INVALID_HCI_COMMAND_PARAMETERS                    = 0x12
+  REMOTE_USER_TERMINATED_CONNECTION                 = 0x13
+  REMOTE_DEVICE_TERMINATED_CONNECTION_LOW_RESOURCES = 0x14
+  REMOTE_DEVICE_TERMINATED_CONNECTION_POWER_OFF     = 0x15
+  CONNECTION_TERMINATED_BY_LOCAL_HOST               = 0x16
+  REPEATED_ATTEMPTS                                 = 0x17
+  PAIRING_NOT_ALLOWED                               = 0x18
+  UNKNOWN_LMP_PDU                                   = 0x19
+  UNSUPPORTED_REMOTE_FEATURE                        = 0x1A
+  SCO_OFFSET_REJECTED                               = 0x1B
+  SCO_INTERVAL_REJECTED                             = 0x1C
+  SCO_AIRMODE_REJECTED                              = 0x1D
+  INVALID_LMP_OR_LL_PARAMETERS                      = 0x1E
+  UNSPECIFIED_ERROR                                 = 0x1F
+  UNSUPPORTED_LMP_OR_LL_PARAMETER_VALUE             = 0x20
+  ROLE_CHANGE_NOT_ALLOWED                           = 0x21
+  LMP_OR_LL_RESPONSE_TIMEOUT                        = 0x22
+  LMP_ERROR_TRANSACTION_COLLISION                   = 0x23
+  LMP_PDU_NOT_ALLOWED                               = 0x24
+  ENCRYPTION_MODE_NOT_ACCEPTABLE                    = 0x25
+  LINK_KEY_CANNOT_BE_CHANGED                        = 0x26
+  REQUESTED_QOS_NOT_SUPPORTED                       = 0x27
+  INSTANT_PASSED                                    = 0x28
+  PAIRING_WITH_UNIT_KEY_NOT_SUPPORTED               = 0x29
+  DIFFERENT_TRANSACTION_COLLISION                   = 0x2A
+  RESERVED_0                                        = 0x2B
+  QOS_UNACCEPTABLE_PARAMETER                        = 0x2C
+  QOS_REJECTED                                      = 0x2D
+  CHANNEL_CLASSIFICATION_NOT_SUPPORTED              = 0x2E
+  INSUFFICIENT_SECURITY                             = 0x2F
+  PARAMETER_OUT_OF_MANDATORY_RANGE                  = 0x30
+  RESERVED_1                                        = 0x31
+  ROLE_SWITCH_PENDING                               = 0x32
+  RESERVED_2                                        = 0x33
+  RESERVED_SLOT_VIOLATION                           = 0x34
+  ROLE_SWITCH_FAILED                                = 0x35
+  EXTENDED_INQUIRY_RESPONSE_TOO_LARGE               = 0x36
+  SECURE_SIMPLE_PAIRING_NOT_SUPPORTED_BY_HOST       = 0x37
+  HOST_BUSY_PAIRING                                 = 0x38
+  CONNECTION_REJECTED_NO_SUITABLE_CHANNEL_FOUND     = 0x39
+  CONTROLLER_BUSY                                   = 0x3A
+  UNACCEPTABLE_CONNECTION_PARAMETERS                = 0x3B
+  DIRECTED_ADVERTISING_TIMEOUT                      = 0x3C
+  CONNECTION_TERMINATED_MIC_FAILURE                 = 0x3D
+  CONNECTION_FAILED_TO_BE_ESTABLISHED               = 0x3E
+  MAC_CONNECTION_FAILED                             = 0x3F
+  COARSE_CLOCK_ADJUSTMENT_REJECTED                  = 0x40
+  # 5.0
+  TYPE_0_SUBMAP_NOT_DEFINED                         = 0x41
+  UNKNOWN_ADVERTISING_IDENTIFIER                    = 0x42
+  LIMIT_REACHED                                     = 0x43
+  OPERATION_CANCELLED_BY_HOST                       = 0x44
+
+
+enum MajorDeviceClass:
+  [maximum_bits: 5]
+  MISCELLANEOUS = 0x00
+  COMPUTER      = 0x01
+  PHONE         = 0x02
+  LAN           = 0x03
+  AUDIO_VIDEO   = 0x04
+  PERIPHERAL    = 0x05
+  IMAGING       = 0x06
+  WEARABLE      = 0x07
+  TOY           = 0x08
+  HEALTH        = 0x09
+  UNCATEGORIZED = 0x1F
+
+
+bits MajorServiceClasses:
+  0     [+1]  Flag  limited_discoverable_mode
+  $next [+1]  Flag  le_audio
+  $next [+1]  Flag  reserved
+  $next [+1]  Flag  positioning
+  $next [+1]  Flag  networking
+  $next [+1]  Flag  rendering
+  $next [+1]  Flag  capturing
+  $next [+1]  Flag  object_transfer
+  $next [+1]  Flag  audio
+  $next [+1]  Flag  telephony
+  $next [+1]  Flag  information
+
+
+enum ComputerMinorDeviceClass:
+  [maximum_bits: 6]
+  UNCATEGORIZED       = 0x00
+  DESKTOP_WORKSTATION = 0x01
+  SERVER_CLASS        = 0x02
+  LAPTOP              = 0x03
+  HANDHELD_PC         = 0x04
+  PALM_SIZE_PC        = 0x05
+  WEARABLE            = 0x06
+  TABLET              = 0x07
+
+
+enum PhoneMinorDeviceClass:
+  [maximum_bits: 6]
+  UNCATEGORIZED               = 0x00
+  CELLULAR                    = 0x01
+  CORDLESS                    = 0x02
+  SMARTPHONE                  = 0x03
+  WIRED_MODEM_OR_VOID_GATEWAY = 0x04
+  COMMON_ISDN_ACCESS          = 0x05
+
+
+enum LANMinorDeviceClass:
+  [maximum_bits: 6]
+  FULLY_AVAILABLE      = 0x00
+  UTILIZED_1_TO_17     = 0x08
+  UTILIZED_17_TO_33    = 0x10
+  UTILIZED_33_TO_50    = 0x18
+  UTILIZED_50_TO_67    = 0x20
+  UTILIZED_67_TO_83    = 0x28
+  UTILIZED_83_TO_99    = 0x30
+  NO_SERVICE_AVAILABLE = 0x38
+
+
+enum AudioVideoMinorDeviceClass:
+  [maximum_bits: 6]
+  UNCATEGORIZED                 = 0x00
+  WEARABLE_HEADSET_DEVICE       = 0x01
+  HANDS_FREE_DEVICE             = 0x02
+  RESERVED_0                    = 0x03
+  MICROPHONE                    = 0x04
+  LOUDSPEAKER                   = 0x05
+  HEADPHONES                    = 0x06
+  PORTABLE_AUDIO                = 0x07
+  CAR_AUDIO                     = 0x08
+  SET_TOP_BOX                   = 0x09
+  HIFI_AUDIO_DEVICE             = 0x0A
+  VCR                           = 0x0B
+  VIDEO_CAMERA                  = 0x0C
+  CAMCORDER                     = 0x0D
+  VIDEO_MONITOR                 = 0x0E
+  VIDEO_DISPLAY_AND_LOUDSPEAKER = 0x0F
+  VIDEO_CONFERENCING            = 0x10
+  RESERVED_1                    = 0x11
+  GAMING_TOY                    = 0x12
+
+
+enum PeripheralMinorDeviceClass0:
+  [maximum_bits: 4]
+  UNCATEGORIZED                  = 0x00
+  JOYSTICK                       = 0x01
+  GAMEPAD                        = 0x02
+  REMOTE_CONTROL                 = 0x03
+  SENSING_DEVICE                 = 0x04
+  DIGITIZER_TABLET               = 0x05
+  CARD_READER                    = 0x06
+  DIGITAL_PEN                    = 0x07
+  HANDHELD_SCANNER               = 0x08
+  HANDHELD_GESTURAL_INPUT_DEVICE = 0x09
+
+
+enum PeripheralMinorDeviceClass1:
+  [maximum_bits: 2]
+  UNCATEGORIZED                  = 0x00
+  KEYBOARD                       = 0x01
+  POINTING_DEVICE                = 0x02
+  COMBO_KEYBOARD_POINTING_DEVICE = 0x03
+
+
+bits PeripheralMinorDeviceClass:
+  0     [+4]  PeripheralMinorDeviceClass0  device_class_0
+  $next [+2]  PeripheralMinorDeviceClass1  device_class_1
+
+
+enum ImagingMinorDeviceClass:
+  [maximum_bits: 2]
+  UNCATEGORIZED = 0x00
+
+
+bits ImagingMinorDeviceClassBits:
+  0     [+2]  ImagingMinorDeviceClass  device_class
+  $next [+1]  Flag                     display
+  $next [+1]  Flag                     camera
+  $next [+1]  Flag                     scanner
+  $next [+1]  Flag                     printer
+
+
+enum WearableMinorDeviceClass:
+  [maximum_bits: 6]
+  WRISTWATCH = 0x01
+  PAGER      = 0x02
+  JACKET     = 0x03
+  HELMET     = 0x04
+  GLASSES    = 0x05
+
+
+enum ToyMinorDeviceClass:
+  [maximum_bits: 6]
+  ROBOT      = 0x01
+  VEHICLE    = 0x02
+  DOLL       = 0x03
+  CONTROLLER = 0x04
+  GAME       = 0x05
+
+
+enum HealthMinorDeviceClass:
+  [maximum_bits: 6]
+  UNDEFINED                 = 0x00
+  BLOOD_PRESSURE_MONITOR    = 0x01
+  THERMOMETER               = 0x02
+  WEIGHING_SCALE            = 0x03
+  GLUCOSE_METER             = 0x04
+  PULSE_OXIMETER            = 0x05
+  HEART_PULSE_RATE_MONITOR  = 0x06
+  HEALTH_DATA_DISPLAY       = 0x07
+  STEP_COUNTER              = 0x08
+  BODY_COMPOSITION_ANALYZER = 0x09
+  PEAK_FLOW_MONITOR         = 0x0A
+  MEDICATION_MONITOR        = 0x0B
+  KNEE_PROSTHESIS           = 0x0C
+  ANKLE_PROSTHESIS          = 0x0D
+  GENERIC_HEALTH_MANAGER    = 0x0E
+  PERSONAL_MOBILITY_DEVICE  = 0x0F
+
+
+enum GenericEnableParam:
+  -- Binary values that can be generically passed to HCI commands that expect a
+  -- 1-octet boolean "enable"/"disable" parameter.
+  [maximum_bits: 8]
+  DISABLE = 0x00
+  ENABLE  = 0x01
+
+
+enum GenericPresenceParam:
+  [maximum_bits: 8]
+  NOT_PRESENT = 0x00
+  PRESENT = 0x01
+
+
+struct BdAddr:
+  -- Bluetooth Device Address
+  0 [+6]  UInt  bd_addr
+
+
+bits ClassOfDevice:
+  -- Defined in Assigned Numbers for the Baseband
+  -- https://www.bluetooth.com/specifications/assigned-numbers/baseband
+  0     [+2]    UInt                         zero
+    [requires: this == 0]
+
+  if major_device_class == MajorDeviceClass.COMPUTER:
+    2     [+6]  ComputerMinorDeviceClass     computer_minor_device_class
+
+  if major_device_class == MajorDeviceClass.PHONE:
+    2     [+6]  PhoneMinorDeviceClass        phone_minor_device_class
+
+  if major_device_class == MajorDeviceClass.LAN:
+    2     [+6]  LANMinorDeviceClass          lan_minor_device_class
+
+  if major_device_class == MajorDeviceClass.AUDIO_VIDEO:
+    2     [+6]  AudioVideoMinorDeviceClass   audio_video_minor_device_class
+
+  if major_device_class == MajorDeviceClass.PERIPHERAL:
+    2     [+6]  PeripheralMinorDeviceClass   peripheral_minor_device_class
+
+  if major_device_class == MajorDeviceClass.IMAGING:
+    2     [+6]  ImagingMinorDeviceClassBits  imaging_minor_device_class
+
+  if major_device_class == MajorDeviceClass.WEARABLE:
+    2     [+6]  WearableMinorDeviceClass     wearable_minor_device_class
+
+  if major_device_class == MajorDeviceClass.TOY:
+    2     [+6]  ToyMinorDeviceClass          toy_minor_device_class
+
+  if major_device_class == MajorDeviceClass.HEALTH:
+    2     [+6]  HealthMinorDeviceClass       health_minor_device_class
+
+  8     [+5]    MajorDeviceClass             major_device_class
+  $next [+11]   MajorServiceClasses          major_service_classes
+
+
+enum ConnectionRole:
+  [maximum_bits: 8]
+  CENTRAL    = 0x00
+  PERIPHERAL = 0x01
+
+
+enum LEPeerAddressType:
+  -- Possible values that can be used for the address_type parameters in various
+  -- HCI commands
+  [maximum_bits: 8]
+  PUBLIC    = 0x00
+  RANDOM    = 0x01
+  ANONYMOUS = 0xFF
+
+
+enum LEPeerAddressTypeNoAnon:
+  -- Possible values that can be used for the address_type parameters in various
+  -- HCI commands
+  [maximum_bits: 8]
+  PUBLIC    = 0x00
+  RANDOM    = 0x01
+  ANONYMOUS = 0xFF
+
+
+bits ClockOffset:
+  -- Clock Offset. The lower 15 bits are set to the clock offset as retrieved
+  -- by an Inquiry. The highest bit is set to 1 if the rest of this parameter
+  -- is valid.
+  15 [+1]     Flag  valid
+  if valid:
+    0  [+15]  UInt  clock_offset
+
+
+enum LEPrimaryAdvertisingPHY:
+  [maximum_bits: 8]
+  LE_1M       = 0x01
+  LE_CODED    = 0x03
+  LE_CODED_S2 = 0x04
+
+
+enum LESecondaryAdvertisingPHY:
+  [maximum_bits: 8]
+  NONE        = 0x00
+  LE_1M       = 0x01
+  LE_2M       = 0x02
+  LE_CODED    = 0x03
+  LE_CODED_S2 = 0x04
+
+
+enum LEAddressType:
+  -- Possible values that can be reported for various |*_address_type| parameters in various LE packets.
+  [maximum_bits: 8]
+  PUBLIC          = 0x00
+    -- Public Device Address (default)
+
+  RANDOM          = 0x01
+    -- Random Device Address
+
+  PUBLIC_IDENTITY = 0x02
+    -- Public Identity Address (corresponds to Resolved Private Address)
+
+  RANDOM_IDENTITY = 0x03
+    -- Random (static) Identity Address (corresponds to Resolved Private Address)
+
+  ANONYMOUS       = 0xFF
+    -- No address provided (anonymous advertisement)
+    -- This is a special value that is only used in LE Advertising Report events.
+
+
+enum PageScanRepetitionMode:
+  -- The page scan repetition mode, representing a maximum time between Page Scans.
+  -- (See Core Spec v5.0, Volume 2, Part B, Section 8.3.1)
+  [maximum_bits: 8]
+  R0_ = 0x00  # Continuous Scan
+  R1_ = 0x01  # <= 1.28s
+  R2_ = 0x02  # <= 2.56s
+
+
+enum CodingFormat:
+  -- Coding formats from assigned numbers.
+  -- (https://www.bluetooth.com/specifications/assigned-numbers/host-controller-interface)
+  [maximum_bits: 8]
+  U_LAW           = 0x00
+  A_LAW           = 0x01
+  CVSD            = 0x02
+  TRANSPARENT     = 0x03
+  LINEAR_PCM      = 0x04
+  MSBC            = 0x05
+  LC3             = 0x06
+  G729A           = 0x07
+  VENDOR_SPECIFIC = 0xFF
+
+
+enum IoCapability:
+  -- All other values reserved for future use.
+  [maximum_bits: 8]
+  DISPLAY_ONLY       = 0x00
+  DISPLAY_YES_NO     = 0x01
+  KEYBOARD_ONLY      = 0x02
+  NO_INPUT_NO_OUTPUT = 0x03
+
+
+# inclusive-language: disable
+
+
+enum AuthenticationRequirements:
+  -- All options without MITM do not require MITM protection, and a numeric
+  -- comparison with automatic accept is allowed.
+  -- All options with MITM do require MITM protection, and IO capabilities should
+  -- be used to determine the authentication procedure.
+  [maximum_bits: 8]
+  NO_BONDING             = 0x00
+  MITM_NO_BONDING        = 0x01
+  DEDICATED_BONDING      = 0x02
+  MITM_DEDICATED_BONDING = 0x03
+  GENERAL_BONDING        = 0x04
+  MITM_GENERAL_BONDING   = 0x05
+
+# inclusive-language: enable
+
+
+struct LinkKey:
+  0 [+16]  UInt:8[16]  value
+
+# ========================= HCI packet headers ==========================
+
+
+bits OpCodeBits:
+  # Emboss currently lacks support for default field values and cross-type integral equality.
+  # (https://github.com/google/emboss/issues/21)
+  # (https://github.com/google/emboss/issues/23)
+  # Upon the addition of these features, we will transition OpCodeBits to be a parameterized
+  # field which defaults for each HCI packet type to its corresponding OpCode.
+  0     [+10]  UInt  ocf
+  $next [+6]   UInt  ogf
+
+
+struct CommandHeader:
+  -- HCI Command packet header.
+  0     [+2]  OpCodeBits  opcode
+  $next [+1]  UInt        parameter_total_size
+
+
+struct EventHeader:
+  -- HCI Event packet header.
+  0     [+1]  UInt  event_code
+  $next [+1]  UInt  parameter_total_size
+
+
+struct CommandCompleteEvent:
+  -- Core Spec v5.3 Vol 4, Part E, Section 7.7.14
+  -- EventHeader.opcode == 0xe
+  let hdr_size = EventHeader.$size_in_bytes
+  0     [+hdr_size]  EventHeader  header
+  $next [+1]         UInt         num_hci_command_packets
+  $next [+2]         OpCodeBits   command_opcode
+  let event_fixed_size = $size_in_bytes-hdr_size
+  let return_parameters_size = header.parameter_total_size-event_fixed_size
+
+
+struct VendorDebugEvent:
+  -- This opcode is reserved for vendor-specific debugging events.
+  -- See Core Spec v5.3 Vol 4, Part E, Section 5.4.4.
+  let hdr_size = EventHeader.$size_in_bytes
+  0     [+hdr_size]  EventHeader  header
+  $next [+1]         UInt         subevent_code
+    -- The event code for the vendor subevent.
diff --git a/pw_bluetooth/public/pw_bluetooth/hci_events.emb b/pw_bluetooth/public/pw_bluetooth/hci_events.emb
new file mode 100644
index 0000000..610a488
--- /dev/null
+++ b/pw_bluetooth/public/pw_bluetooth/hci_events.emb
@@ -0,0 +1,1350 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# This file contains Emboss definitions for Host Controller Interface packets
+# and types found in the Bluetooth Core Specification. The Emboss compiler is
+# used to generate a C++ header from this file.
+
+import "hci_common.emb" as hci
+
+[$default byte_order: "LittleEndian"]
+[(cpp) namespace: "pw::bluetooth::emboss"]
+# =========================== Constants =================================
+
+
+enum CoreSpecificationVersion:
+  -- Bluetooth Core Specification version
+  [maximum_bits: 8]
+  V1_0B    = 0x00  # v1.0b
+  V1_1     = 0x01  # v1.1
+  V1_2     = 0x02  # v1.2
+  V2_0_EDR = 0x03  # v2.0+EDR
+  V2_1_EDR = 0x04  # v2.0+EDR
+  V3_0_HS  = 0x05  # v3.0+HS
+  V4_0     = 0x06  # v4.0
+  V4_1     = 0x07  # v4.1
+  V4_2     = 0x08  # v4.2
+  V5_0     = 0x09  # v5.0
+  V5_1     = 0x0A  # v5.1
+  V5_2     = 0x0B  # v5.2
+  V5_3     = 0x0C  # v5.3
+  V5_4     = 0x0D  # v5.4
+
+
+enum LinkType:
+  [maximum_bits: 8]
+  SCO  = 0x00
+  ACL  = 0x01
+  ESCO = 0x02
+
+
+enum EncryptionStatus:
+  OFF                                = 0x00
+  ON_WITH_E0_FOR_BREDR_OR_AES_FOR_LE = 0x01
+  ON_WITH_AES_FOR_BREDR              = 0x02
+
+
+enum LEAdvertisingDataStatus:
+  [maximum_bits: 2]
+  COMPLETE             = 0b00
+  INCOMPLETE           = 0b01
+  INCOMPLETE_TRUNCATED = 0b10
+
+
+enum LEDirectAddressType:
+  -- Possible values that can be reported for the |direct_address_type| parameter in LE Advertising Report events.
+  [maximum_bits: 8]
+  PUBLIC                                = 0x00
+    -- Public Device Address
+
+  PRIVATE                               = 0x01
+    -- Non-resolvable Private Address or Static Device Address
+
+  RESOLVABLE_PRIVATE_OWN_ADDRESS_PUBLIC = 0x02
+    -- Resolvable Private Address (resolved by Controller; Own_Address_Type was 0x00 or 0x02)
+
+  RESOLVABLE_PRIVATE_OWN_ADDRESS_RANDOM = 0x03
+    -- Resolvable Private Address (resolved by Controller; Own_Address_Type was 0x01 or 0x03)
+
+  RESOLVABLE_PRIVATE                    = 0xFE
+    -- Resolvable Private Address (Controller unable to resolve)
+
+
+enum LEClockAccuracy:
+  -- Possible values that can be reported for the |central_clock_accuracy| and
+  -- |advertiser_clock_accuracy| parameters.
+  [maximum_bits: 8]
+  PPM_500 = 0x00
+  PPM_250 = 0x01
+  PPM_150 = 0x02
+  PPM_100 = 0x03
+  PPM_75  = 0x04
+  PPM_50  = 0x05
+  PPM_30  = 0x06
+  PPM_20  = 0x07
+
+
+enum KeyType:
+  -- The key type used during pairing.
+  [maximum_bits: 8]
+  COMBINATION                           = 0x00
+  DEBUG_COMBINATION                     = 0x03
+  UNAUTHENTICATED_COMBINATION_FROM_P192 = 0x04
+  AUTHENTICATED_COMBINATION_FROM_P192   = 0x05
+  CHANGED_COMBINATION_KEY               = 0x06
+  UNAUTHENTICATED_COMBINATION_FROM_P256 = 0x07
+  AUTHENTICATED_COMBINATION_FROM_P256   = 0x08
+
+# =========================== Field Types =================================
+
+
+bits LmpFeatures(page: UInt:8):
+  -- Bit mask of Link Manager Protocol features.
+  [requires: page <= 2]
+  if page == 0:
+    0  [+1]  Flag  three_slot_packets
+    1  [+1]  Flag  five_slot_packets
+    2  [+1]  Flag  encryption
+    3  [+1]  Flag  slot_offset
+    4  [+1]  Flag  timing_accuracy
+    5  [+1]  Flag  role_switch
+    6  [+1]  Flag  hold_mode
+    7  [+1]  Flag  sniff_mode
+    # 8: previously used
+    9  [+1]  Flag  power_control_requests
+    10 [+1]  Flag  channel_quality_driven_data_rate
+    11 [+1]  Flag  sco_link
+    12 [+1]  Flag  hv2_packets
+    13 [+1]  Flag  hv3_packets
+    14 [+1]  Flag  mu_law_log_synchronous_data
+    15 [+1]  Flag  a_law_log_synchronous_data
+    16 [+1]  Flag  cvsd_synchronous_data
+    17 [+1]  Flag  paging_parameter_negotiation
+    18 [+1]  Flag  power_control
+    19 [+1]  Flag  transparent_synchronous_data
+    20 [+3]  UInt  flow_control_lag
+    23 [+1]  Flag  broadcast_encryption
+    # 24: reserved for future use
+    25 [+1]  Flag  enhanced_data_rate_acl_2_mbs_mode
+    26 [+1]  Flag  enhanced_data_rate_acl_3_mbs_mode
+    27 [+1]  Flag  enhanced_inquiry_scan
+    28 [+1]  Flag  interlaced_inquiry_scan
+    29 [+1]  Flag  interlaced_page_scan
+    30 [+1]  Flag  rssi_with_inquiry_results
+    31 [+1]  Flag  extended_sco_link_ev3_packets
+    32 [+1]  Flag  ev4_packets
+    33 [+1]  Flag  ev5_packets
+    # 34: reserved for future use
+    35 [+1]  Flag  afh_capable_peripheral
+    36 [+1]  Flag  afh_classification_peripheral
+    37 [+1]  Flag  bredr_not_supported
+    38 [+1]  Flag  le_supported_controller
+    39 [+1]  Flag  three_slot_enhanced_data_rate_acl_packets
+    40 [+1]  Flag  five_slot_enhanced_data_rate_acl_packets
+    41 [+1]  Flag  sniff_subrating
+    42 [+1]  Flag  pause_encryption
+    43 [+1]  Flag  afh_capable_central
+    44 [+1]  Flag  afh_classification_central
+    45 [+1]  Flag  enhanced_data_rate_esco_2_mbs_mode
+    46 [+1]  Flag  enhanced_data_rate_esco_3_mbs_mode
+    47 [+1]  Flag  three_slot_enhanced_data_rate_esco_packets
+    48 [+1]  Flag  extended_inquiry_response
+    49 [+1]  Flag  simultaneous_le_and_bredr_to_same_device_capable_controller
+    # 50: reserved for future use
+    51 [+1]  Flag  secure_simple_pairing_controller_support
+    52 [+1]  Flag  encapsulated_pdu
+    53 [+1]  Flag  erroneous_data_reporting
+    54 [+1]  Flag  non_flushable_packet_boundary_flag
+    # 55: reserved for future use
+    56 [+1]  Flag  hci_link_supervision_timeout_changed_event
+    57 [+1]  Flag  variable_inquiry_tx_power_level
+    58 [+1]  Flag  enhanced_power_control
+    # 59-62: reserved for future use
+    63 [+1]  Flag  extended_features
+
+  if page == 1:
+    0  [+1]  Flag  secure_simple_pairing_host_support
+    1  [+1]  Flag  le_supported_host
+    # 2: previously used
+    3  [+1]  Flag  secure_connection_host_support
+
+  if page == 2:
+    0  [+1]  Flag  connectionless_peripheral_broadcast_transmitter_operation
+    1  [+1]  Flag  connectionless_peripheral_broadcast_receiver_operation
+    2  [+1]  Flag  synchronization_train
+    3  [+1]  Flag  synchronization_scan
+    4  [+1]  Flag  hci_inquiry_response_notification_event
+    5  [+1]  Flag  generalized_interlaced_scan
+    6  [+1]  Flag  coarse_clock_adjustment
+    # 7: reserved for future use
+    8  [+1]  Flag  secure_connections_controller_support
+    9  [+1]  Flag  ping
+    10 [+1]  Flag  slot_availability_mask
+    11 [+1]  Flag  train_nudging
+
+
+bits LEFeatureSet:
+  0     [+1]  Flag  le_encryption
+  $next [+1]  Flag  connection_parameters_request_procedure
+  $next [+1]  Flag  extended_reject_indication
+  $next [+1]  Flag  peripheral_initiated_features_exchange
+  $next [+1]  Flag  le_ping
+  $next [+1]  Flag  le_data_packet_length_extension
+  $next [+1]  Flag  ll_privacy
+  $next [+1]  Flag  extended_scanning_filter_policies
+  $next [+1]  Flag  le_2m_phy
+  $next [+1]  Flag  stable_modulation_index_transmitter
+  $next [+1]  Flag  stable_modulation_index_receiver
+  $next [+1]  Flag  le_coded_phy
+  $next [+1]  Flag  le_extended_advertising
+  $next [+1]  Flag  le_periodic_advertising
+  $next [+1]  Flag  channel_selection_algorithm_2
+  $next [+1]  Flag  le_power_class_1
+  $next [+1]  Flag  minimum_number_of_used_channels_procedure
+  $next [+1]  Flag  connection_cte_request
+  $next [+1]  Flag  connection_cte_response
+  $next [+1]  Flag  connectionless_cte_transmitter
+  $next [+1]  Flag  connectionless_cte_receiver
+  $next [+1]  Flag  antenna_switching_during_cte_transmission
+  $next [+1]  Flag  antenna_switching_during_cte_reception
+  $next [+1]  Flag  receiving_constant_tone_extensions
+  $next [+1]  Flag  periodic_advertising_sync_transfer_sender
+  $next [+1]  Flag  periodic_advertising_sync_transfer_recipient
+  $next [+1]  Flag  sleep_clock_accuracy_updates
+  $next [+1]  Flag  remote_public_key_validation
+  $next [+1]  Flag  connected_isochronous_stream_central
+  $next [+1]  Flag  connected_isochronous_stream_peripheral
+  $next [+1]  Flag  isochronous_broadcaster
+  $next [+1]  Flag  synchronized_receiver
+  $next [+1]  Flag  connected_isochronous_stream_host_support
+  $next [+1]  Flag  le_power_control_request_1
+  $next [+1]  Flag  le_power_control_request_2
+    -- Previous two bits shall always have the same value.
+
+  $next [+1]  Flag  le_path_loss_monitoring
+  $next [+1]  Flag  periodic_advertising_adi_support
+  $next [+1]  Flag  connection_subrating
+  $next [+1]  Flag  connection_subrating_host_support
+  $next [+1]  Flag  channel_classification
+  $next [+1]  Flag  advertising_coding_selection
+  $next [+1]  Flag  advertising_coding_selection_host_support
+  $next [+1]  Flag  reserved                                        # Bit 42 is skipped
+  $next [+1]  Flag  periodic_advertising_with_responses_advertiser
+  $next [+1]  Flag  periodic_advertising_with_responses_scanner
+
+
+bits LEExtendedAdvertisingEventType:
+  0     [+1]  Flag                     connectable
+  $next [+1]  Flag                     scannable
+  $next [+1]  Flag                     directed
+  $next [+1]  Flag                     scan_response
+  $next [+1]  Flag                     legacy
+  $next [+2]  LEAdvertisingDataStatus  data_status
+
+
+bits SupportedCommands(octet: UInt:8):
+  [requires: octet <= 47]
+  if octet == 0:
+    0 [+1]  Flag  inquiry
+    1 [+1]  Flag  inquiry_cancel
+    2 [+1]  Flag  periodic_inquiry_mode
+    3 [+1]  Flag  exit_periodic_inquiry_mode
+    4 [+1]  Flag  create_connection
+    5 [+1]  Flag  disconnect
+    7 [+1]  Flag  create_connection_cancel
+
+  if octet == 1:
+    0 [+1]  Flag  accept_connection_request
+    1 [+1]  Flag  reject_connection_request
+    2 [+1]  Flag  link_key_request_reply
+    3 [+1]  Flag  link_key_request_negative_reply
+    4 [+1]  Flag  pin_code_request_reply
+    5 [+1]  Flag  pin_code_request_negative_reply
+    6 [+1]  Flag  change_connection_packet_type
+    7 [+1]  Flag  authentication_requested
+
+  if octet == 2:
+    0 [+1]  Flag  set_connection_encryption
+    1 [+1]  Flag  change_connection_link_key
+    2 [+1]  Flag  link_key_selection
+    3 [+1]  Flag  remote_name_request
+    4 [+1]  Flag  remote_name_request_cancel
+    5 [+1]  Flag  read_remote_supported_features
+    6 [+1]  Flag  read_remote_extended_features
+    7 [+1]  Flag  read_remote_version_information
+
+  if octet == 3:
+    0 [+1]  Flag  read_clock_offset
+    1 [+1]  Flag  read_lmp_handle
+
+  if octet == 4:
+    1 [+1]  Flag  hold_mode
+    2 [+1]  Flag  sniff_mode
+    3 [+1]  Flag  exit_sniff_mode
+    6 [+1]  Flag  qos_setup
+    7 [+1]  Flag  role_discovery
+
+  if octet == 5:
+    0 [+1]  Flag  switch_role
+    1 [+1]  Flag  read_link_policy_settings
+    2 [+1]  Flag  write_link_policy_settings
+    3 [+1]  Flag  read_default_link_policy_settings
+    4 [+1]  Flag  write_default_link_policy_settings
+    5 [+1]  Flag  flow_specification
+    6 [+1]  Flag  set_event_mask
+    7 [+1]  Flag  reset
+
+  if octet == 6:
+    0 [+1]  Flag  set_event_filter
+    1 [+1]  Flag  flush
+    2 [+1]  Flag  read_pin_type
+    3 [+1]  Flag  write_pin_type
+    5 [+1]  Flag  read_stored_link_key
+    6 [+1]  Flag  write_stored_link_key
+    7 [+1]  Flag  deleted_stored_link_key
+
+  if octet == 7:
+    0 [+1]  Flag  write_local_name
+    1 [+1]  Flag  read_local_name
+    2 [+1]  Flag  read_connection_attempt_timeout
+    3 [+1]  Flag  write_connection_attempt_timeout
+    4 [+1]  Flag  read_page_timeout
+    5 [+1]  Flag  write_page_timeout
+    6 [+1]  Flag  read_scan_enable
+    7 [+1]  Flag  write_scan_enable
+
+  if octet == 8:
+    0 [+1]  Flag  read_page_scan_activity
+    1 [+1]  Flag  write_page_scan_activity
+    2 [+1]  Flag  read_inquiry_scan_activity
+    3 [+1]  Flag  write_inquiry_scan_activity
+    4 [+1]  Flag  read_authentication_enable
+    5 [+1]  Flag  write_authentication_enable
+
+  if octet == 9:
+    0 [+1]  Flag  read_class_of_device
+    1 [+1]  Flag  write_class_of_device
+    2 [+1]  Flag  read_voice_setting
+    3 [+1]  Flag  write_voice_setting
+    4 [+1]  Flag  read_automatic_flush_timeout
+    5 [+1]  Flag  write_automatic_flush_timeout
+    6 [+1]  Flag  read_num_broadcast_retransmissions
+    7 [+1]  Flag  write_num_broadcast_retransmissions
+
+  if octet == 10:
+    0 [+1]  Flag  read_hold_mode_activity
+    1 [+1]  Flag  write_hold_mode_activity
+    2 [+1]  Flag  read_transmit_power_level
+    3 [+1]  Flag  read_synchronous_flow_control_enable
+    4 [+1]  Flag  write_synchronous_flow_control_enable
+    5 [+1]  Flag  set_controller_to_host_flow_control
+    6 [+1]  Flag  host_buffer_size
+    7 [+1]  Flag  host_number_of_completed_packets
+
+  if octet == 11:
+    0 [+1]  Flag  read_link_supervision_timeout
+    1 [+1]  Flag  write_link_supervision_timeout
+    2 [+1]  Flag  read_number_of_supported_iac
+    3 [+1]  Flag  read_current_iaclap
+    4 [+1]  Flag  write_current_iaclap
+
+  if octet == 12:
+    1 [+1]  Flag  set_afh_host_channel_classification
+    4 [+1]  Flag  read_inquiry_scan_type
+    5 [+1]  Flag  write_inquiry_scan_type
+    6 [+1]  Flag  read_inquiry_mode
+    7 [+1]  Flag  write_inquiry_mode
+
+  if octet == 13:
+    0 [+1]  Flag  read_page_scan_type
+    1 [+1]  Flag  write_page_scan_type
+    2 [+1]  Flag  read_afh_channel_assessment_mode
+    3 [+1]  Flag  write_afh_channel_assessment_mode
+
+  if octet == 14:
+    3 [+1]  Flag  read_local_version_information
+    5 [+1]  Flag  read_local_supported_features
+    6 [+1]  Flag  read_local_extended_features
+    7 [+1]  Flag  read_buffer_size
+
+  if octet == 15:
+    1 [+1]  Flag  read_bdaddr
+    2 [+1]  Flag  read_failed_contact_counter
+    3 [+1]  Flag  reset_failed_contact_c_ounter
+    4 [+1]  Flag  read_link_quality
+    5 [+1]  Flag  read_rssi
+    6 [+1]  Flag  read_afh_channel_map
+    7 [+1]  Flag  read_clock
+
+  if octet == 16:
+    0 [+1]  Flag  read_loopback_mode
+    1 [+1]  Flag  write_loopback_mode
+    2 [+1]  Flag  enable_device_under_test_mode
+    3 [+1]  Flag  setup_synchronous_connection_request
+    4 [+1]  Flag  accept_synchronous_connection_request
+    5 [+1]  Flag  reject_synchronous_connection_request
+
+  if octet == 17:
+    0 [+1]  Flag  read_extended_inquiry_response
+    1 [+1]  Flag  write_extended_inquiry_response
+    2 [+1]  Flag  refresh_encryption_key
+    4 [+1]  Flag  sniff_subrating
+    5 [+1]  Flag  read_simple_pairing_mode
+    6 [+1]  Flag  write_simple_pairing_mode
+    7 [+1]  Flag  read_local_oob_data
+
+  if octet == 18:
+    0 [+1]  Flag  read_inquiry_response_transmit_power_level
+    1 [+1]  Flag  write_inquiry_transmit_power_level
+    2 [+1]  Flag  read_default_erroneous_data_reporting
+    3 [+1]  Flag  write_default_erroneous_data_reporting
+    7 [+1]  Flag  io_capability_request_reply
+
+  if octet == 19:
+    0 [+1]  Flag  user_confirmation_request_reply
+    1 [+1]  Flag  user_confirmation_request_negative_reply
+    2 [+1]  Flag  user_passkey_request_reply
+    3 [+1]  Flag  user_passkey_request_negative_reply
+    4 [+1]  Flag  remote_oob_data_request_reply
+    5 [+1]  Flag  write_simple_pairing_debug_mode
+    6 [+1]  Flag  enhanced_flush
+    7 [+1]  Flag  remote_oob_data_request_negative_reply
+
+  if octet == 20:
+    2 [+1]  Flag  send_keypress_notification
+    3 [+1]  Flag  io_capability_request_negative_reply
+    4 [+1]  Flag  read_encryption_key_size
+
+  if octet == 21:
+    0 [+1]  Flag  create_physical_link
+    1 [+1]  Flag  accept_physical_link
+    2 [+1]  Flag  disconnect_physical_link
+    3 [+1]  Flag  create_logical_link
+    4 [+1]  Flag  accept_logical_link
+    5 [+1]  Flag  disconnect_logical_link
+    6 [+1]  Flag  logical_link_cancel
+    7 [+1]  Flag  flow_spec_modify
+
+  if octet == 22:
+    0 [+1]  Flag  read_logical_link_accept_timeout
+    1 [+1]  Flag  write_logical_link_accept_timeout
+    2 [+1]  Flag  set_event_mask_page_2
+    3 [+1]  Flag  read_location_data
+    4 [+1]  Flag  write_location_data
+    5 [+1]  Flag  read_local_amp_info
+    6 [+1]  Flag  read_local_ampassoc
+    7 [+1]  Flag  write_remote_ampassoc
+
+  if octet == 23:
+    0 [+1]  Flag  read_flow_control_mode
+    1 [+1]  Flag  write_flow_control_mode
+    2 [+1]  Flag  read_data_block_size
+
+  if octet == 24:
+    0 [+1]  Flag  read_enhanced_transmit_power_level
+    2 [+1]  Flag  read_best_effort_flush_timeout
+    3 [+1]  Flag  write_best_effort_flush_timeout
+    4 [+1]  Flag  short_range_mode
+    5 [+1]  Flag  read_le_host_supported
+    6 [+1]  Flag  write_le_host_support
+
+  if octet == 25:
+    0 [+1]  Flag  le_set_event_mask
+    1 [+1]  Flag  le_read_buffer_size_v1
+    2 [+1]  Flag  le_read_local_supported_features
+    4 [+1]  Flag  le_set_random_address
+    5 [+1]  Flag  le_set_advertising_parameters
+    6 [+1]  Flag  le_read_advertising_channel_tx_power
+    7 [+1]  Flag  le_set_advertising_data
+
+  if octet == 26:
+    0 [+1]  Flag  le_set_scan_response_data
+    1 [+1]  Flag  le_set_advertising_enable
+    2 [+1]  Flag  le_set_scan_parameters
+    3 [+1]  Flag  le_set_scan_enable
+    4 [+1]  Flag  le_create_connection
+    5 [+1]  Flag  le_create_connection_cancel
+    6 [+1]  Flag  le_read_filter_accept_list_size
+    7 [+1]  Flag  le_clear_filter_accept_list
+
+  if octet == 27:
+    0 [+1]  Flag  le_add_device_to_filter_accept_list
+    1 [+1]  Flag  le_remove_device_from_filter_accept_list
+    2 [+1]  Flag  le_connection_update
+    3 [+1]  Flag  le_set_host_channel_classification
+    4 [+1]  Flag  le_read_channel_map
+    5 [+1]  Flag  le_read_remote_features
+    6 [+1]  Flag  le_encrypt
+    7 [+1]  Flag  le_rand
+
+  if octet == 28:
+    0 [+1]  Flag  le_start_encryption
+    1 [+1]  Flag  le_long_term_key_request_reply
+    2 [+1]  Flag  le_long_term_key_request_negative_reply
+    3 [+1]  Flag  le_read_supported_states
+    4 [+1]  Flag  le_receiver_test_v1
+    5 [+1]  Flag  le_transmitter_test_v1
+    6 [+1]  Flag  le_test_end
+
+  if octet == 29:
+    3 [+1]  Flag  enhanced_setup_synchronous_connection
+    4 [+1]  Flag  enhanced_accept_synchronous_connection
+    5 [+1]  Flag  read_local_supported_codecs
+    6 [+1]  Flag  set_mws_channel_parameters
+    7 [+1]  Flag  set_external_frame_configuration
+
+  if octet == 30:
+    0 [+1]  Flag  set_mws_signaling
+    1 [+1]  Flag  set_mws_transport_layer
+    2 [+1]  Flag  set_mws_scan_frequency_table
+    3 [+1]  Flag  get_mws_transport_layer_configuration
+    4 [+1]  Flag  set_mws_pattern_configuration
+    5 [+1]  Flag  set_triggered_clock_capture
+    6 [+1]  Flag  truncated_page
+    7 [+1]  Flag  truncated_page_cancel
+
+  if octet == 31:
+    0 [+1]  Flag  set_connectionless_peripheral_broadcast
+    1 [+1]  Flag  set_connectionless_peripheral_broadcast_receive
+    2 [+1]  Flag  start_synchronization_train
+    3 [+1]  Flag  receive_synchronization_train
+    4 [+1]  Flag  set_reserved_ltaddr
+    5 [+1]  Flag  delete_reserved_ltaddr
+    6 [+1]  Flag  set_connectionless_peripheral_broadcast_data
+    7 [+1]  Flag  read_synchronization_train_parameters
+
+  if octet == 32:
+    0 [+1]  Flag  write_synchronization_train_parameters
+    1 [+1]  Flag  remote_oob_extended_data_request_reply
+    2 [+1]  Flag  read_secure_connections_host_support
+    3 [+1]  Flag  write_secure_connections_host_support
+    4 [+1]  Flag  read_authenticated_payload_timeout
+    5 [+1]  Flag  write_authenticated_payload_timeout
+    6 [+1]  Flag  read_local_oob_extended_data
+    7 [+1]  Flag  write_secure_connections_test_mode
+
+  if octet == 33:
+    0 [+1]  Flag  read_extended_page_timeout
+    1 [+1]  Flag  write_extended_page_timeout
+    2 [+1]  Flag  read_extended_inquiry_length
+    3 [+1]  Flag  write_extended_inquiry_length
+    4 [+1]  Flag  le_remote_connection_parameter_request_reply
+    5 [+1]  Flag  le_remote_connection_parameter_request_negative_reply
+    6 [+1]  Flag  le_set_data_length
+    7 [+1]  Flag  le_read_suggested_default_data_length
+
+  if octet == 34:
+    0 [+1]  Flag  le_write_suggested_default_data_length
+    1 [+1]  Flag  le_read_local_p256_public_key
+    2 [+1]  Flag  le_generate_dh_key_v1
+    3 [+1]  Flag  le_add_device_to_resolving_list
+    4 [+1]  Flag  le_remove_device_from_resolving_list
+    5 [+1]  Flag  le_clear_resolving_list
+    6 [+1]  Flag  le_read_resolving_list_size
+    7 [+1]  Flag  le_read_peer_resolvable_address
+
+  if octet == 35:
+    0 [+1]  Flag  le_read_local_resolvable_address
+    1 [+1]  Flag  le_set_address_resolution_enable
+    2 [+1]  Flag  le_set_resolvable_private_address_timeout
+    3 [+1]  Flag  le_read_maximum_data_length
+    4 [+1]  Flag  le_read_phy
+    5 [+1]  Flag  le_set_default_phy
+    6 [+1]  Flag  le_set_phy
+    7 [+1]  Flag  le_enhanced_receiver_test_v2
+
+  if octet == 36:
+    0 [+1]  Flag  le_enhanced_transmitter_test_v2
+    1 [+1]  Flag  le_set_advertising_set_random_address
+    2 [+1]  Flag  le_set_extended_advertising_parameters
+    3 [+1]  Flag  le_set_extended_advertising_data
+    4 [+1]  Flag  le_set_extended_scan_response_data
+    5 [+1]  Flag  le_set_extended_advertising_enable
+    6 [+1]  Flag  le_read_maximum_advertising_data_length
+    7 [+1]  Flag  le_read_number_of_supported_advertising_sets
+
+  if octet == 37:
+    0 [+1]  Flag  le_remove_advertising_set
+    1 [+1]  Flag  le_clear_advertising_sets
+    2 [+1]  Flag  le_set_periodic_advertising_parameters
+    3 [+1]  Flag  le_set_periodic_advertising_data
+    4 [+1]  Flag  le_set_periodic_advertising_enable
+    5 [+1]  Flag  le_set_extended_scan_parameters
+    6 [+1]  Flag  le_set_extended_scan_enable
+    7 [+1]  Flag  le_extended_create_connection
+
+  if octet == 38:
+    0 [+1]  Flag  le_periodic_advertising_create_sync
+    1 [+1]  Flag  le_periodic_advertising_create_sync_cancel
+    2 [+1]  Flag  le_periodic_advertising_terminate_sync
+    3 [+1]  Flag  le_add_device_to_periodic_advertiser_list
+    4 [+1]  Flag  le_remove_device_from_periodic_advertiser_list
+    5 [+1]  Flag  le_clear_periodic_advertiser_list
+    6 [+1]  Flag  le_read_periodic_advertiser_list_size
+    7 [+1]  Flag  le_read_transmit_power
+
+  if octet == 39:
+    0 [+1]  Flag  le_read_rf_path_compensation
+    1 [+1]  Flag  le_write_rf_path_compensation
+    2 [+1]  Flag  le_set_privacy_mode
+    3 [+1]  Flag  le_receiver_test_v3
+    4 [+1]  Flag  le_transmitter_test_v3
+    5 [+1]  Flag  le_set_connectionless_cte_transmit_parameters
+    6 [+1]  Flag  le_set_connectionless_cte_transmit_enable
+    7 [+1]  Flag  le_set_connectionless_iq_sampling_enable
+
+  if octet == 40:
+    0 [+1]  Flag  le_set_connection_cte_receive_parameters
+    1 [+1]  Flag  le_set_connection_cte_transmit_parameters
+    2 [+1]  Flag  le_connection_cte_request_enable
+    3 [+1]  Flag  le_connection_cte_response_enable
+    4 [+1]  Flag  le_read_antenna_information
+    5 [+1]  Flag  le_set_periodic_advertising_receive_enable
+    6 [+1]  Flag  le_periodic_advertising_sync_transfer
+    7 [+1]  Flag  le_periodic_advertising_set_info_transfer
+
+  if octet == 41:
+    0 [+1]  Flag  le_set_periodic_advertising_sync_transfer_parameters
+    1 [+1]  Flag  le_set_default_periodic_advertising_sync_transfer_parameters
+    2 [+1]  Flag  le_generate_dh_key_v3
+    3 [+1]  Flag  read_local_simple_pairing_options
+    4 [+1]  Flag  le_modify_sleep_clock_accuracy
+    5 [+1]  Flag  le_read_buffer_size_v2
+    6 [+1]  Flag  le_read_isotx_sync
+    7 [+1]  Flag  le_set_cig_parameters
+
+  if octet == 42:
+    0 [+1]  Flag  le_set_cig_parameters_test
+    1 [+1]  Flag  le_create_cis
+    2 [+1]  Flag  le_remove_cig
+    3 [+1]  Flag  le_accept_cis_request
+    4 [+1]  Flag  le_reject_cis_request
+    5 [+1]  Flag  le_create_big
+    6 [+1]  Flag  le_create_big_test
+    7 [+1]  Flag  le_terminate_big
+
+  if octet == 43:
+    0 [+1]  Flag  le_big_create_sync
+    1 [+1]  Flag  le_big_terminate_sync
+    2 [+1]  Flag  le_request_peer_sca
+    3 [+1]  Flag  le_setup_iso_data_path
+    4 [+1]  Flag  le_remove_iso_data_path
+    5 [+1]  Flag  le_iso_transmit_test
+    6 [+1]  Flag  le_iso_receive_test
+    7 [+1]  Flag  le_iso_read_test_counters
+
+  if octet == 44:
+    0 [+1]  Flag  le_iso_test_end
+    1 [+1]  Flag  le_set_host_feature
+    2 [+1]  Flag  le_read_iso_link_quality
+    3 [+1]  Flag  le_enhanced_read_transmit_power_level
+    4 [+1]  Flag  le_read_remote_transmit_power_level
+    5 [+1]  Flag  le_set_path_loss_reporting_parameters
+    6 [+1]  Flag  le_set_path_loss_reporting_enable
+    7 [+1]  Flag  le_set_transmit_power_reporting_enable
+
+  if octet == 45:
+    0 [+1]  Flag  le_transmitter_test_v4
+    1 [+1]  Flag  set_ecosystem_base_interval
+    2 [+1]  Flag  read_local_supported_codecs_v2
+    3 [+1]  Flag  read_local_supported_codec_capabilities
+    4 [+1]  Flag  read_local_supported_controller_delay
+    5 [+1]  Flag  configure_data_path
+
+  if octet == 46:
+    0 [+1]  Flag  le_set_default_subrate
+    1 [+1]  Flag  le_subrate_request
+    2 [+1]  Flag  le_set_extended_advertising_parameters_v2
+    5 [+1]  Flag  le_set_periodic_advertising_subevent_data
+    6 [+1]  Flag  le_set_periodic_advertising_response_data
+    7 [+1]  Flag  le_set_periodic_sync_subevent
+
+  if octet == 47:
+    0 [+1]  Flag  le_extended_create_connection_v2
+    1 [+1]  Flag  le_set_periodic_advertising_parameters_v2
+
+# ========================= HCI Event packets ===========================
+# Core Spec v5.3 Vol 4, Part E, Section 7.7
+
+
+struct InquiryCompleteEvent:
+  -- Inquiry Complete Event (v1.1) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]  hci.EventHeader  header
+  $next [+1]         hci.StatusCode   status
+
+
+struct InquiryResult:
+  0     [+hci.BdAddr.$size_in_bytes]  hci.BdAddr                  bd_addr
+    -- BD_ADDR for a device which responded.
+
+  $next [+1]                          hci.PageScanRepetitionMode  page_scan_repetition_mode
+  $next [+2]                          UInt                        reserved
+    -- Reserved for future use.
+
+  $next [+3]                          hci.ClassOfDevice           class_of_device
+    -- Class of Device for the device.
+
+  $next [+2]                          hci.ClockOffset             clock_offset
+    -- The lower 15 bits represent bits 16-2 of CLKNPeripheral-CLK.
+
+
+struct InquiryResultEvent:
+  -- Inquiry Result Event (v1.1) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]                     hci.EventHeader  header
+  $next [+1]                            UInt             num_responses
+    -- Number of responses from the Inquiry.
+
+  let response_size = InquiryResult.$size_in_bytes
+  $next [+num_responses*response_size]  InquiryResult[]  responses
+
+
+struct SimpleCommandCompleteEvent:
+  -- A Command Complete event where a StatusCode is the only return parameter.
+  -- Also useful for generically getting the status of a larger command complete
+  -- event.
+  let hdr_size = hci.CommandCompleteEvent.$size_in_bytes
+  0     [+hdr_size]  hci.CommandCompleteEvent  command_complete
+  $next [+1]         hci.StatusCode            status
+
+
+struct CommandStatusEvent:
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]  hci.EventHeader  header
+  $next [+1]         hci.StatusCode   status
+  $next [+1]         UInt             num_hci_command_packets
+  $next [+2]         hci.OpCodeBits   command_opcode
+
+
+struct ConnectionCompleteEvent:
+  -- Connection Complete Event (v1.1) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.EventHeader         header
+  $next [+1]                          hci.StatusCode          status
+  $next [+2]                          UInt                    connection_handle
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr              bd_addr
+    -- The address of the connected device
+
+  $next [+1]                          LinkType                link_type
+  $next [+1]                          hci.GenericEnableParam  encryption_enabled
+
+
+struct ConnectionRequestEvent:
+  -- Connection Request Event (v1.1) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.EventHeader    header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr         bd_addr
+    -- The address of the device that's requesting the connection.
+
+  $next [+3]                          hci.ClassOfDevice  class_of_device
+    -- The Class of Device of the device which requests the connection.
+
+  $next [+1]                          LinkType           link_type
+
+
+struct DisconnectionCompleteEvent:
+  -- Disconnection Complete Event (v1.1) (BR/EDR & LE)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]  hci.EventHeader  header
+  $next [+1]         hci.StatusCode   status
+  $next [+2]         UInt             connection_handle
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+1]         hci.StatusCode   reason
+
+
+struct AuthenticationCompleteEvent:
+  -- Authentication Complete Event (v1.1) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]  hci.EventHeader  header
+  $next [+1]         hci.StatusCode   status
+  $next [+2]         UInt             connection_handle
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+
+struct RemoteNameRequestCompleteEvent:
+  -- Remote Name Request Complete Event (v1.1) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.EventHeader  header
+  $next [+1]                          hci.StatusCode   status
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr       bd_addr
+  $next [+248]                        UInt:8[248]      remote_name
+    -- UTF-8 encoded friendly name. If the name is less than 248 characters, it
+    -- is null terminated and the remaining bytes are not valid.
+
+
+struct EncryptionChangeEventV1:
+  -- Encryption Change Event (v1.1) (BR/EDR & LE)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]  hci.EventHeader   header
+  $next [+1]         hci.StatusCode    status
+  $next [+2]         UInt              connection_handle
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+1]         EncryptionStatus  encryption_enabled
+
+
+struct ChangeConnectionLinkKeyCompleteEvent:
+  -- Change Connection Link Key Complete Event (v1.1) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]  hci.EventHeader  header
+  $next [+1]         hci.StatusCode   status
+  $next [+2]         UInt             connection_handle
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+
+struct ReadRemoteSupportedFeaturesCompleteEvent:
+  -- Read Remote Supported Features Complete Event (v1.1) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]  hci.EventHeader  header
+  $next [+1]         hci.StatusCode   status
+  $next [+2]         UInt             connection_handle
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+8]         LmpFeatures(0)   lmp_features
+    -- Page 0 of the LMP features.
+
+
+struct ReadRemoteVersionInfoCompleteEvent:
+  -- Read Remote Version Information Complete Event (v1.1) (BR/EDR & LE)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]  hci.EventHeader           header
+  $next [+1]         hci.StatusCode            status
+  $next [+2]         UInt                      connection_handle
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+1]         CoreSpecificationVersion  version
+    -- Version of the Current LMP or Link Layer supported by the remote Controller.
+
+  $next [+2]         UInt                      company_identifier
+    -- Company identifier for the manufacturer of the remote Controller. Assigned by Bluetooth SIG.
+
+  $next [+2]         UInt                      subversion
+    -- Revision of the LMP or Link Layer implementation in the remote Controller. This value is vendor-specific.
+
+
+struct ReadRemoteExtendedFeaturesCompleteEvent:
+  -- Read Remote Extended Features Complete Event (v1.1) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]  hci.EventHeader           header
+  $next [+1]         hci.StatusCode            status
+  $next [+2]         UInt                      connection_handle
+    -- Only the lower 12-bits are meaningful.
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+1]         UInt                      page_number
+    -- 0x00: The normal LMP features as returned by HCI_Read_Remote_Supported_Features command.
+    -- 0x01 to 0xFF: The page number of the features returned.
+
+  $next [+1]         UInt                      max_page_number
+    -- The highest features page number which contains non-zero bits for the remote device.
+
+  $next [+8]         LmpFeatures(page_number)  lmp_features
+    -- Bit map of requested page of LMP features.
+
+
+struct LEMetaEvent:
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]  hci.EventHeader  header
+  $next [+1]         UInt             subevent_code
+    -- The event code for the LE subevent.
+
+
+struct LEConnectionCompleteSubevent:
+  0     [+LEMetaEvent.$size_in_bytes]  LEMetaEvent            le_meta_event
+
+  $next [+1]                           hci.StatusCode         status
+
+  $next [+2]                           UInt                   connection_handle
+    -- Only the lower 12-bits are meaningful.
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+1]                           hci.ConnectionRole     role
+
+  $next [+1]                           hci.LEPeerAddressType  peer_address_type
+
+  $next [+hci.BdAddr.$size_in_bytes]   hci.BdAddr             peer_address
+    -- Public Device Address or Random Device Address of the peer device.
+
+  $next [+2]                           UInt                   connection_interval
+    -- Time: N * 1.25 ms
+    -- Range: 7.5 ms to 4 s
+    [requires: 0x0006 <= this <= 0x0C80]
+
+  $next [+2]                           UInt                   peripheral_latency
+    [requires: 0x0000 <= this <= 0x01F3]
+
+  $next [+2]                           UInt                   supervision_timeout
+    -- Time: N * 10 ms
+    -- Range: 100 ms to 32 s
+    [requires: 0x000A <= this <= 0x0C80]
+
+  $next [+1]                           LEClockAccuracy        central_clock_accuracy
+    -- Only valid for a peripheral. On a central, this parameter shall be set to 0x00.
+
+
+struct LEConnectionUpdateCompleteSubevent:
+  0     [+LEMetaEvent.$size_in_bytes]  LEMetaEvent     le_meta_event
+
+  $next [+1]                           hci.StatusCode  status
+
+  $next [+2]                           UInt            connection_handle
+    -- Only the lower 12-bits are meaningful.
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+2]                           UInt            connection_interval
+    -- Time: N * 1.25 ms
+    -- Range: 7.5 ms to 4 s
+    [requires: 0x0006 <= this <= 0x0C80]
+
+  $next [+2]                           UInt            peripheral_latency
+    [requires: 0x0000 <= this <= 0x01F3]
+
+  $next [+2]                           UInt            supervision_timeout
+    -- Time: N * 10 ms
+    -- Range: 100 ms to 32 s
+    [requires: 0x000A <= this <= 0x0C80]
+
+
+struct LEReadRemoteFeaturesCompleteSubevent:
+  0     [+LEMetaEvent.$size_in_bytes]    LEMetaEvent     le_meta_event
+  $next [+1]                             hci.StatusCode  status
+  $next [+2]                             UInt            connection_handle
+    -- Only the lower 12-bits are meaningful.
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+8]  bits:
+    0     [+LEFeatureSet.$size_in_bits]  LEFeatureSet    le_features
+
+
+struct LEExtendedAdvertisingReportData:
+  0     [+2]  bits:
+
+    0     [+7]                        LEExtendedAdvertisingEventType  event_type
+
+  $next [+1]                          hci.LEAddressType               address_type
+    -- Address type of the advertiser.
+
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr                      address
+    -- Public Device Address, Random Device Address, Public Identity Address or
+    -- Random (static) Identity Address of the advertising device.
+
+  $next [+1]                          hci.LEPrimaryAdvertisingPHY     primary_phy
+    -- Indicates the PHY used to send the advertising PDU on the primary advertising
+    -- channel. Legacy PDUs always use LE_1M. NONE, LE_2M, and LE_CODED_S2 are excluded.
+
+  $next [+1]                          hci.LESecondaryAdvertisingPHY   secondary_phy
+    -- Indicates the PHY used to send the advertising PDU(s), if any, on the secondary
+    -- advertising channel. A value of NONE means that no packets were received on the
+    -- secondary advertising channel.
+
+  $next [+1]                          UInt                            advertising_sid
+    -- Value of the Advertising SID subfield in the ADI field of the PDU. A value of
+    -- 0xFF means no ADI field provided.
+    [requires: 0x00 <= this <= 0x0F || this == 0xFF]
+
+  $next [+1]                          UInt                            tx_power
+    -- Units: dBm. A value of 0x7F means Tx Power information is not available.
+    [requires: -127 <= this <= 20 || this == 0x7F]
+
+  $next [+1]                          Int                             rssi
+    -- Units: dBm. A value of 0x7F means RSSI is not available.
+    [requires: -127 <= this <= 20 || this == 0x7F]
+
+  $next [+2]                          UInt                            periodic_advertising_interval
+    -- 0x0000: No periodic advertising.
+    -- 0xXXXX:
+    --   Time = N * 1.25 ms
+    --   Time Range: 7.5 ms to 81,918.75 s
+    [requires: 0x0006 <= this <= 0xFFFF || this == 0x0000]
+
+  $next [+1]                          LEDirectAddressType             direct_address_type
+
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr                      direct_address
+    -- TargetA field in the advertisement or either Public Identity Address or Random (static)
+    -- Identity Address of the target device.
+
+  $next [+1]                          UInt                            data_length
+    -- Length of the |data| field.
+
+  $next [+data_length]                UInt:8[data_length]             data
+    -- |data_length| octets of advertising or scan response data formatted as defined in
+    -- [Vol 3] Part C, Section 11. Note: Each element of this array has a variable length.
+
+
+struct LEExtendedAdvertisingReportSubevent(reports_size: UInt:8):
+  -- LE Extended Advertising Report Event (v5.0) (LE)
+  0     [+LEMetaEvent.$size_in_bytes]  LEMetaEvent           le_meta_event
+  $next [+1]                           UInt                  num_reports
+    -- Number of separate reports in the event.
+    [requires: 0x01 <= this <= 0x0A]
+
+  $next [+reports_size]                UInt:8[reports_size]  reports
+    -- Since each report has a variable length, they are stored in a UInt:8 array.
+
+
+struct RoleChangeEvent:
+  -- Role Change Event (BR/EDR) (v1.1)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.EventHeader     header
+  $next [+1]                          hci.StatusCode      status
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr          bd_addr
+    -- The address of the device for which a role change has completed.
+
+  $next [+1]                          hci.ConnectionRole  role
+    -- The new role for the specified address.
+
+
+struct LinkKeyRequestEvent:
+  -- Link Key Request Event (v1.1) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.EventHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr       bd_addr
+    -- The address for the device that a host-stored link key is being requested.
+
+
+struct LinkKeyNotificationEvent:
+  -- Link Key Notification Event (v1.1) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]                    hci.EventHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]   hci.BdAddr       bd_addr
+    -- The address for the device for which a new link key has been generated.
+
+  $next [+hci.LinkKey.$size_in_bytes]  hci.LinkKey      link_key
+    -- Link key for the associated address.
+
+  $next [+1]                           KeyType          key_type
+    -- Type of key used when pairing.
+
+
+struct DataBufferOverflowEvent:
+  -- Data Buffer Overflow Event (v1.1) (BR/EDR & LE)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]  hci.EventHeader  header
+  $next [+1]         LinkType         ll_type
+
+
+struct InquiryResultWithRssi:
+  -- A single inquiry result (with RSSI).
+  0     [+hci.BdAddr.$size_in_bytes]  hci.BdAddr                  bd_addr
+    -- The address for the device which responded.
+
+  $next [+1]                          hci.PageScanRepetitionMode  page_scan_repetition_mode
+    -- The Page Scan Repetition Mode being used by the remote device.
+
+  $next [+1]                          UInt                        reserved
+  $next [+3]                          hci.ClassOfDevice           class_of_device
+  $next [+2]                          hci.ClockOffset             clock_offset
+    -- The lower 15 bits represent bits 16-2 of CLKNPeripheral-CLK. The most
+    -- significant bit is reserved.
+
+  $next [+1]                          Int                         rssi
+    -- Units: dBm
+    [requires: -127 <= this <= 20]
+
+
+struct InquiryResultWithRssiEvent:
+  -- Inquiry Result with RSSI Event (v1.2) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]                     hci.EventHeader          header
+  $next [+1]                            UInt                     num_responses
+    -- The number of responses included.
+
+  let response_size = InquiryResultWithRssi.$size_in_bytes
+  $next [+num_responses*response_size]  InquiryResultWithRssi[]  responses
+
+
+struct SynchronousConnectionCompleteEvent:
+  -- Synchronous Connection Complete Event (BR/EDR)
+
+  let hdr_size = hci.EventHeader.$size_in_bytes
+
+  0     [+hdr_size]                   hci.EventHeader   header
+
+  $next [+1]                          hci.StatusCode    status
+
+  $next [+2]                          UInt              connection_handle
+    -- A connection handle for the newly created SCO connection.
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr        bd_addr
+    -- BD_ADDR of the other connected device forming the connection.
+
+  $next [+1]                          LinkType          link_type
+
+  $next [+1]                          UInt              transmission_interval
+    -- Time between two consecutive eSCO instants measured in slots. Shall be
+    -- zero for SCO links.
+
+  $next [+1]                          UInt              retransmission_window
+    -- The size of the retransmission window measured in slots. Shall be zero
+    -- for SCO links.
+
+  $next [+2]                          UInt              rx_packet_length
+    -- Length in bytes of the eSCO payload in the receive direction. Shall be
+    -- zero for SCO links.
+
+  $next [+2]                          UInt              tx_packet_length
+    -- Length in bytes of the eSCO payload in the transmit direction. Shall be
+    -- zero for SCO links.
+
+  $next [+1]                          hci.CodingFormat  air_mode
+
+
+struct ExtendedInquiryResultEvent:
+  -- Extended Inquiry Result Event (v1.2) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.EventHeader             header
+  $next [+1]                          UInt                        num_responses
+    -- Number of responses from the inquiry. The HCI_Extended_Inquiry_Result
+    -- event always contains a single response.
+    [requires: this == 0x01]
+
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr                  bd_addr
+    -- BD_ADDR of the device that responded.
+
+  $next [+1]                          hci.PageScanRepetitionMode  page_scan_repetition_mode
+    -- The Page Scan Repetition Mode being used by the remote device.
+
+  $next [+1]                          UInt                        reserved
+  $next [+3]                          hci.ClassOfDevice           class_of_device
+  $next [+2]                          hci.ClockOffset             clock_offset
+    -- The lower 15 bits represent bits 16-2 of CLKNPeripheral-CLK.
+
+  $next [+1]                          Int                         rssi
+    -- Units: dBm
+    [requires: -127 <= this <= 20]
+
+  $next [+240]                        UInt:8[240]                 extended_inquiry_response
+    -- Extended inquiey response data as defined in Vol 3, Part C, Sec 8
+
+
+struct EncryptionKeyRefreshCompleteEvent:
+  -- Encryption Key Refresh Complete Event (v2.1 + EDR) (BR/EDR & LE)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]  hci.EventHeader  header
+  $next [+1]         hci.StatusCode   status
+  $next [+2]         UInt             connection_handle
+    -- The connection_handle on which the encryption key was refreshed due to
+    -- encryption being started or resumed.
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+
+struct IoCapabilityRequestEvent:
+  -- IO Capability Request Event (v2.1 + EDR) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.EventHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr       bd_addr
+    -- The address of the remote device involved in the Secure Simple Pairing
+    -- process.
+
+
+struct IoCapabilityResponseEvent:
+  -- IO Capability Response Event (v2.1 + EDR) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.EventHeader                 header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr                      bd_addr
+    -- The address of the remote device which the IO capabilities apply
+
+  $next [+1]                          hci.IoCapability                io_capability
+    -- IO Capabilities of the device
+
+  $next [+1]                          hci.GenericPresenceParam        oob_data_present
+    -- Whether out-of-band authentication data is present.
+
+  $next [+1]                          hci.AuthenticationRequirements  authentication_requirements
+
+
+struct UserConfirmationRequestEvent:
+  -- User Confirmation Request Event (v2.1 + EDR) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.EventHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr       bd_addr
+    -- Address of the device involved in simple pairing process
+
+  $next [+4]                          UInt             numeric_value
+    -- Numeric value to be displayed.
+    [requires: 0 <= this <= 999999]
+
+
+struct UserPasskeyRequestEvent:
+  -- User Passkey Request Event (v2.1 + EDR) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.EventHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr       bd_addr
+    -- Address of the device involved in simple pairing process
+
+
+struct SimplePairingCompleteEvent:
+  -- Simple Pairing Complete Event (v2.1 + EDR) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.EventHeader  header
+  $next [+1]                          hci.StatusCode   status
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr       bd_addr
+    -- Address of the device involved in simple pairing process
+
+
+struct UserPasskeyNotificationEvent:
+  -- User Passkey Notification Event (v2.1 + EDR) (BR/EDR)
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]                   hci.EventHeader  header
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr       bd_addr
+    -- Address of the device involved in simple pairing process
+
+  $next [+4]                          UInt             passkey
+    -- Numeric value (passkey) entered by user.
+    [requires: 0 <= this <= 999999]
+
+
+struct LELongTermKeyRequestSubevent:
+  -- LE Long Term Key Request Event (v4.0) (LE)
+  0     [+LEMetaEvent.$size_in_bytes]  LEMetaEvent  le_meta_event
+  $next [+2]                           UInt         connection_handle
+    [requires: 0x0000 <= this <= 0x0EFF]
+
+  $next [+8]                           UInt         random_number
+  $next [+2]                           UInt         encrypted_diversifier
+
+
+struct ReadLocalVersionInfoCommandCompleteEvent:
+  let hdr_size = hci.CommandCompleteEvent.$size_in_bytes
+
+  0     [+hdr_size]  hci.CommandCompleteEvent  command_complete
+
+  $next [+1]         hci.StatusCode            status
+
+  $next [+1]         CoreSpecificationVersion  hci_version
+    -- Version of the HCI Specification supported by the Controller. See
+    -- Assigned Numbers
+
+  $next [+2]         UInt                      hci_subversion
+    -- Revision of the HCI implementation in the Controller. This value is
+    -- vendor-specific.
+
+  $next [+1]         UInt                      lmp_version
+    -- Version of the Current LMP supported by the Controller. See Assigned
+    -- Numbers
+
+  $next [+2]         UInt                      company_identifier
+    -- Company identifier for the manufacturer of the Controller. See Assigned
+    -- Numbers
+
+  $next [+2]         UInt                      lmp_subversion
+    -- Subversion of the Current LMP in the Controller. This value is
+    -- vendor-specific.
+
+
+struct ReadBdAddrCommandCompleteEvent:
+  -- Read BD_ADDR Command (v1.1) (BR/EDR, LE)
+  let hdr_size = hci.CommandCompleteEvent.$size_in_bytes
+  0     [+hdr_size]                   hci.CommandCompleteEvent  command_complete
+  $next [+1]                          hci.StatusCode            status
+  $next [+hci.BdAddr.$size_in_bytes]  hci.BdAddr                bd_addr
+
+
+struct ReadBufferSizeCommandCompleteEvent:
+  -- Read Buffer Size Command (v1.1)
+
+  let hdr_size = hci.CommandCompleteEvent.$size_in_bytes
+
+  0     [+hdr_size]  hci.CommandCompleteEvent  command_complete
+
+  $next [+1]         hci.StatusCode            status
+
+  $next [+2]         UInt                      acl_data_packet_length
+    -- Maximum length (in octets) of the data portion of each HCI ACL Data
+    -- packet that the Controller is able to accept.
+    [requires: 0x0001 <= this <= 0xFFFF]
+
+  $next [+1]         UInt                      synchronous_data_packet_length
+    -- Maximum length (in octets) of the data portion of each HCI Synchronous
+    -- Data packet that the Controller is able to accept.
+    [requires: 0x01 <= this <= 0xFF]
+
+  $next [+2]         UInt                      total_num_acl_data_packets
+    -- Total number of HCI ACL Data packets that can be stored in the data
+    -- buffers of the Controller.
+    [requires: 0x0001 <= this <= 0xFFFF]
+
+  $next [+2]         UInt                      total_num_synchronous_data_packets
+    -- Total number of HCI Synchronous Data packets that can be stored in the
+    -- data buffers of the Controller. A value of 0 indicates that the
+    -- Controller does not support SCO or eSCO over HCI.
+
+
+struct LEReadLocalSupportedFeaturesCommandCompleteEvent:
+  -- LE Read Local Supported Features Command (v4.0) (LE)
+  let hdr_size = hci.CommandCompleteEvent.$size_in_bytes
+  0     [+hdr_size]                      hci.CommandCompleteEvent  command_complete
+  $next [+1]                             hci.StatusCode            status
+  $next [+8]  bits:
+    0     [+LEFeatureSet.$size_in_bits]  LEFeatureSet              le_features
+
+
+struct ReadLocalSupportedCommandsCommandCompleteEvent:
+  -- Read Local Supported Commands Command (v1.2)
+  let hdr_size = hci.CommandCompleteEvent.$size_in_bytes
+  0     [+hdr_size]  hci.CommandCompleteEvent  command_complete
+  $next [+1]         hci.StatusCode            status
+  $next [+1]         SupportedCommands(0)      supported_commands_0
+  $next [+1]         SupportedCommands(1)      supported_commands_1
+  $next [+1]         SupportedCommands(2)      supported_commands_2
+  $next [+1]         SupportedCommands(3)      supported_commands_3
+  $next [+1]         SupportedCommands(4)      supported_commands_4
+  $next [+1]         SupportedCommands(5)      supported_commands_5
+  $next [+1]         SupportedCommands(6)      supported_commands_6
+  $next [+1]         SupportedCommands(7)      supported_commands_7
+  $next [+1]         SupportedCommands(8)      supported_commands_8
+  $next [+1]         SupportedCommands(9)      supported_commands_9
+  $next [+1]         SupportedCommands(10)     supported_commands_10
+  $next [+1]         SupportedCommands(11)     supported_commands_11
+  $next [+1]         SupportedCommands(12)     supported_commands_12
+  $next [+1]         SupportedCommands(13)     supported_commands_13
+  $next [+1]         SupportedCommands(14)     supported_commands_14
+  $next [+1]         SupportedCommands(15)     supported_commands_15
+  $next [+1]         SupportedCommands(16)     supported_commands_16
+  $next [+1]         SupportedCommands(17)     supported_commands_17
+  $next [+1]         SupportedCommands(18)     supported_commands_18
+  $next [+1]         SupportedCommands(19)     supported_commands_19
+  $next [+1]         SupportedCommands(20)     supported_commands_20
+  $next [+1]         SupportedCommands(21)     supported_commands_21
+  $next [+1]         SupportedCommands(22)     supported_commands_22
+  $next [+1]         SupportedCommands(23)     supported_commands_23
+  $next [+1]         SupportedCommands(24)     supported_commands_24
+  $next [+1]         SupportedCommands(25)     supported_commands_25
+  $next [+1]         SupportedCommands(26)     supported_commands_26
+  $next [+1]         SupportedCommands(27)     supported_commands_27
+  $next [+1]         SupportedCommands(28)     supported_commands_28
+  $next [+1]         SupportedCommands(29)     supported_commands_29
+  $next [+1]         SupportedCommands(30)     supported_commands_30
+  $next [+1]         SupportedCommands(31)     supported_commands_31
+  $next [+1]         SupportedCommands(32)     supported_commands_32
+  $next [+1]         SupportedCommands(33)     supported_commands_33
+  $next [+1]         SupportedCommands(34)     supported_commands_34
+  $next [+1]         SupportedCommands(35)     supported_commands_35
+  $next [+1]         SupportedCommands(36)     supported_commands_36
+  $next [+1]         SupportedCommands(37)     supported_commands_37
+  $next [+1]         SupportedCommands(38)     supported_commands_38
+  $next [+1]         SupportedCommands(39)     supported_commands_39
+  $next [+1]         SupportedCommands(40)     supported_commands_40
+  $next [+1]         SupportedCommands(41)     supported_commands_41
+  $next [+1]         SupportedCommands(42)     supported_commands_42
+  $next [+1]         SupportedCommands(43)     supported_commands_43
+  $next [+1]         SupportedCommands(44)     supported_commands_44
+  $next [+1]         SupportedCommands(45)     supported_commands_45
+  $next [+1]         SupportedCommands(46)     supported_commands_46
+  $next [+1]         SupportedCommands(47)     supported_commands_47
diff --git a/pw_bluetooth/public/pw_bluetooth/hci_test.emb b/pw_bluetooth/public/pw_bluetooth/hci_test.emb
new file mode 100644
index 0000000..7619912
--- /dev/null
+++ b/pw_bluetooth/public/pw_bluetooth/hci_test.emb
@@ -0,0 +1,37 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# This file contains Emboss definitions for Host Controller Interface packets
+# and types found in the Bluetooth Core Specification. The Emboss compiler is
+# used to generate a C++ header from this file.
+
+import "hci_common.emb" as hci
+
+[$default byte_order: "LittleEndian"]
+[(cpp) namespace: "pw::bluetooth::emboss"]
+# ============================ Test packets =============================
+
+
+struct TestCommandPacket:
+  -- Test HCI Command packet with single byte payload.
+  let hdr_size = hci.CommandHeader.$size_in_bytes
+  0     [+hdr_size]  hci.CommandHeader  header
+  $next [+1]         UInt               payload
+
+
+struct TestEventPacket:
+  -- Test HCI Event packet with single byte payload.
+  let hdr_size = hci.EventHeader.$size_in_bytes
+  0     [+hdr_size]  hci.EventHeader  header
+  $next [+1]         UInt             payload
diff --git a/pw_bluetooth/public/pw_bluetooth/vendor.emb b/pw_bluetooth/public/pw_bluetooth/hci_vendor.emb
similarity index 62%
rename from pw_bluetooth/public/pw_bluetooth/vendor.emb
rename to pw_bluetooth/public/pw_bluetooth/hci_vendor.emb
index 6f306d5..7e3a055 100644
--- a/pw_bluetooth/public/pw_bluetooth/vendor.emb
+++ b/pw_bluetooth/public/pw_bluetooth/hci_vendor.emb
@@ -19,7 +19,7 @@
 # NOTE: The definitions below are incomplete. They get added as needed.
 # This list will grow as we support more vendor features.
 
-import "hci.emb" as hci
+import "hci_common.emb" as hci
 
 [$default byte_order: "LittleEndian"]
 [(cpp) namespace: "pw::bluetooth::vendor::android_hci"]
@@ -32,21 +32,23 @@
   0     [+hdr_size]  hci.CommandHeader  header
   $next [+1]         UInt               sub_opcode
 
-
 # ======================= Android HCI extensions ========================
 # Documentation: https://source.android.com/devices/bluetooth/hci_requirements
 
+
 enum Capability:
   [maximum_bits: 8]
   NOT_CAPABLE = 0x00
   CAPABLE     = 0x01
 
+
 bits AudioCodecSupportMask:
-  0 [+1] Flag sbc
-  1 [+1] Flag aac
-  2 [+1] Flag aptx
-  3 [+1] Flag aptx_hd
-  4 [+1] Flag ldac
+  0 [+1]  Flag  sbc
+  1 [+1]  Flag  aac
+  2 [+1]  Flag  aptx
+  3 [+1]  Flag  aptx_hd
+  4 [+1]  Flag  ldac
+
 
 enum A2dpCodecType:
   [maximum_bits: 8]
@@ -56,9 +58,11 @@
   APTX_HD = 0x08
   LDAC    = 0x10
 
+
 struct A2dpScmsTEnable:
-  0     [+1] hci.GenericEnableParam enabled
-  $next [+1] UInt                   header
+  0     [+1]  hci.GenericEnableParam  enabled
+  $next [+1]  UInt                    header
+
 
 enum A2dpSamplingFrequency:
   [maximum_bits: 8]
@@ -67,17 +71,20 @@
   HZ_88200 = 0x04
   HZ_96000 = 0x08
 
+
 enum A2dpBitsPerSample:
   [maximum_bits: 8]
   BITS_PER_SAMPLE_16 = 0x01
   BITS_PER_SAMPLE_24 = 0x02
   BITS_PER_SAMPLE_32 = 0x04
 
+
 enum A2dpChannelMode:
   [maximum_bits: 8]
   MONO   = 0x01
   STEREO = 0x02
 
+
 enum SbcSamplingFrequency:
   [maximum_bits: 4]
   HZ_48000 = 0x01
@@ -85,6 +92,7 @@
   HZ_32000 = 0x04
   HZ_16000 = 0x08
 
+
 enum SbcChannelMode:
   [maximum_bits: 4]
   JOINT_STEREO = 0x01
@@ -92,6 +100,7 @@
   DUAL_CHANNEL = 0x04
   MONO         = 0x08
 
+
 enum SbcBlockLen:
   [maximum_bits: 4]
   BLOCK_LEN_16 = 0x01
@@ -99,136 +108,157 @@
   BLOCK_LEN_8  = 0x04
   BLOCK_LEN_4  = 0x08
 
+
 enum SbcSubBands:
   [maximum_bits: 2]
   SUBBANDS_8 = 0x01
   SUBBANDS_4 = 0x02
 
+
 enum SbcAllocationMethod:
   [maximum_bits: 2]
   LOUNDNESS = 0x01
   SNR       = 0x02
 
+
 enum AacEnableVariableBitRate:
   -- 1-octet boolean "enable"/"disable" parameter for AAC variable bitrate
   [maximum_bits: 8]
   DISABLE = 0x00
   ENABLE  = 0x80
 
+
 enum LdacBitrateIndex:
   -- Values 0x03 - 0x7E are reserved
   -- Values 0x80 - 0xFF are reserved
   [maximum_bits: 8]
-  HIGH              = 0x00
-  MID               = 0x01
-  LOW               = 0x02
-  ADAPTIVE_BITRATE  = 0x7F
+  HIGH             = 0x00
+  MID              = 0x01
+  LOW              = 0x02
+  ADAPTIVE_BITRATE = 0x7F
+
 
 bits LdacChannelMode:
   -- Bitmask values for LDAC Channel Mode
-  0 [+1] Flag stereo
-  1 [+1] Flag dual
-  2 [+1] Flag mono
+  0 [+1]  Flag  stereo
+  1 [+1]  Flag  dual
+  2 [+1]  Flag  mono
+
 
 struct SbcCodecInformation:
-  0       [+1]  bits:
-    0     [+2]  SbcAllocationMethod   allocation_method
-    $next [+2]  SbcSubBands           subbands
-    $next [+4]  SbcBlockLen           block_length
-  $next   [+1]  UInt                  min_bitpool_value
-  $next   [+1]  UInt                  max_bitpool_value
-  $next   [+1]  bits:
-    0     [+4]  SbcChannelMode        channel_mode
-    $next [+4]  SbcSamplingFrequency  sampling_frequency
-  $next   [+28] UInt:8[28]            reserved
+  0     [+1]  bits:
+    0     [+2]       SbcAllocationMethod   allocation_method
+    $next [+2]       SbcSubBands           subbands
+    $next [+4]       SbcBlockLen           block_length
+
+  $next [+1]         UInt                  min_bitpool_value
+  $next [+1]         UInt                  max_bitpool_value
+  $next [+1]  bits:
+    0     [+4]       SbcChannelMode        channel_mode
+    $next [+4]       SbcSamplingFrequency  sampling_frequency
+
+  $next [+28]        UInt:8[28]            reserved
+
 
 struct AacCodecInformation:
-  0       [+1]  UInt                      object_type
-  $next   [+1]  AacEnableVariableBitRate  variable_bit_rate
-  $next   [+30] UInt:8[30]                reserved
+  0     [+1]   UInt                      object_type
+  $next [+1]   AacEnableVariableBitRate  variable_bit_rate
+  $next [+30]  UInt:8[30]                reserved
+
 
 struct LdacCodecInformation:
-  0       [+4]  UInt                  vendor_id
+  0     [+4]         UInt              vendor_id
     -- Must always be set to kLdacVendorId
-  $next   [+2]  UInt                  codec_id
+
+  $next [+2]         UInt              codec_id
     -- Must always be set to kLdacCodecId
     -- All other values are reserved
-  $next   [+1]  LdacBitrateIndex      bitrate_index
-    -- See enum class LdacBitrateIndex in this file for possible values
-  $next   [+1]  bits:
-    0     [+3]  LdacChannelMode       ldac_channel_mode
-      -- Bitmask: LDAC channel mode (see LdacChannelMode for bitmask values)
-  $next   [+24] UInt:8[24]            reserved
 
+  $next [+1]         LdacBitrateIndex  bitrate_index
+    -- See enum class LdacBitrateIndex in this file for possible values
+
+  $next [+1]  bits:
+    0     [+3]       LdacChannelMode   ldac_channel_mode
+      -- Bitmask: LDAC channel mode (see LdacChannelMode for bitmask values)
+
+  $next [+24]        UInt:8[24]        reserved
 
 # ============ Commands ============
 
 
 struct StartA2dpOffloadCommand:
   let vendor_size = AndroidCommandHeader.$size_in_bytes
-  0      [+vendor_size]  AndroidCommandHeader  vendor_command
 
-  $next  [+4] bits:
-    0    [+8]         A2dpCodecType           codec_type
+  0     [+vendor_size]  AndroidCommandHeader   vendor_command
+
+  $next [+4]  bits:
+
+    0     [+8]          A2dpCodecType          codec_type
       -- See enum class A2dpCodecType in this file for possible values
 
-  $next  [+2]         UInt                    max_latency
+  $next [+2]            UInt                   max_latency
     -- Max latency allowed in ms. A value of zero disables flush.
 
-  $next  [+2]         A2dpScmsTEnable         scms_t_enable
+  $next [+2]            A2dpScmsTEnable        scms_t_enable
 
-  $next  [+4] bits:
-    0    [+8]         A2dpSamplingFrequency   sampling_frequency
+  $next [+4]  bits:
+
+    0     [+8]          A2dpSamplingFrequency  sampling_frequency
       -- See enum class A2dpSamplingFrequency in this file for possible values
 
-  $next  [+1]         A2dpBitsPerSample       bits_per_sample
+  $next [+1]            A2dpBitsPerSample      bits_per_sample
     -- See enum class A2dpBitsPerSample in this file for possible values
 
-  $next  [+1]         A2dpChannelMode         channel_mode
+  $next [+1]            A2dpChannelMode        channel_mode
     -- See enum class A2dpChannelMode in this file for possible values
 
-  $next  [+4]         UInt                    encoded_audio_bitrate
+  $next [+4]            UInt                   encoded_audio_bitrate
     -- The encoded audio bitrate in bits per second
     -- 0x00000000 - The audio bitrate is not specified / unused
     -- 0x00000001 - 0x00FFFFFF - Encoded audio bitrate in bits per second
     -- 0x01000000 - 0xFFFFFFFF - Reserved
     [requires: 0x00000000 <= this <= 0x00FFFFFF]
 
-  $next  [+2]         UInt                    connection_handle
+  $next [+2]            UInt                   connection_handle
     -- Connection handle of A2DP connection being configured (only the lower 12-bits are meaningful)
     --   Range: 0x0000 to 0x0EFF
     [requires: 0x0000 <= this <= 0x0EFF]
 
-  $next  [+2]         UInt                    l2cap_channel_id
+  $next [+2]            UInt                   l2cap_channel_id
     -- L2CAP channel ID to be used for this A2DP connection
 
-  $next  [+2]         UInt                    l2cap_mtu_size
+  $next [+2]            UInt                   l2cap_mtu_size
     -- Maximum size of L2CAP MTU containing encoded audio packets
 
   if codec_type == A2dpCodecType.SBC:
-    28   [+32]      SbcCodecInformation     sbc_codec_information
+    28    [+32]         SbcCodecInformation    sbc_codec_information
+
   if codec_type == A2dpCodecType.AAC:
-    28   [+32]      AacCodecInformation     aac_codec_information
+    28    [+32]         AacCodecInformation    aac_codec_information
+
   if codec_type == A2dpCodecType.LDAC:
-    28   [+32]      LdacCodecInformation    ldac_codec_information
+    28    [+32]         LdacCodecInformation   ldac_codec_information
+
   if codec_type == A2dpCodecType.APTX || codec_type == A2dpCodecType.APTX_HD:
-    28   [+32]      UInt:8[32]              reserved
+    28    [+32]         UInt:8[32]             reserved
+
 
 struct StopA2dpOffloadCommand:
   let vendor_size = AndroidCommandHeader.$size_in_bytes
-  0      [+vendor_size]  AndroidCommandHeader  vendor_command
+  0 [+vendor_size]  AndroidCommandHeader  vendor_command
+
 
 struct LEMultiAdvtEnableCommand:
   -- LE multi-advertising enable command.
   let vendor_size = AndroidCommandHeader.$size_in_bytes
-  0      [+vendor_size]  AndroidCommandHeader    vendor_command
-  $next  [+1]            hci.GenericEnableParam  enable
-  $next  [+1]            UInt                    advertising_handle
+  0     [+vendor_size]  AndroidCommandHeader    vendor_command
+  $next [+1]            hci.GenericEnableParam  enable
+  $next [+1]            UInt                    advertising_handle
+
 
 struct LEGetVendorCapabilitiesCommand:
   let hdr_size = hci.CommandHeader.$size_in_bytes
-  0      [+hdr_size]  hci.CommandHeader       header
-
+  0 [+hdr_size]  hci.CommandHeader  header
 
 # ============ Events ============
 
@@ -247,41 +277,56 @@
     -- Handle used to identify the connection that caused the state change (i.e.
     -- advertising instance to be disabled). Value will be 0xFFFF if invalid.
 
+
 struct LEGetVendorCapabilitiesCommandCompleteEvent:
   let hdr_size = hci.CommandCompleteEvent.$size_in_bytes
-  0 [+hdr_size] hci.CommandCompleteEvent command_complete
-  $next [+1]                                    hci.StatusCode        status
-  $next [+1] UInt max_advt_instances
+  0     [+hdr_size]  hci.CommandCompleteEvent  command_complete
+  $next [+1]         hci.StatusCode            status
+  $next [+1]         UInt                      max_advt_instances
     -- Number of advertisement instances supported
     -- Deprecated in Google feature spec v0.98 and higher
-  $next [+1] Capability offloaded_resolution_of_private_address
+
+  $next [+1]         Capability                offloaded_resolution_of_private_address
     -- BT chip capability of RPA
     -- Deprecated in Google feature spec v0.98 and higher
-  $next [+2] UInt total_scan_results_storage
+
+  $next [+2]         UInt                      total_scan_results_storage
     -- Storage for scan results in bytes
-  $next [+1] UInt max_irk_list_sz
+
+  $next [+1]         UInt                      max_irk_list_sz
     -- Number of IRK entries supported in the firmware
-  $next [+1] Capability filtering_support
+
+  $next [+1]         Capability                filtering_support
     -- Support for filtering in the controller
-  $next [+1] UInt max_filter
+
+  $next [+1]         UInt                      max_filter
     -- Number of filters supported
-  $next [+1] Capability activity_energy_info_support
+
+  $next [+1]         Capability                activity_energy_info_support
     -- Supports reporting of activity and energy information
-  $next [+2] bits version_supported:
+
+  $next [+2]         bits                      version_supported:
     -- Specifies the version of the Google feature spec supported
-    0 [+8] UInt major_number
-    $next [+8] UInt minor_number
-  $next [+2] UInt total_num_of_advt_tracked
+    0     [+8]  UInt  major_number
+    $next [+8]  UInt  minor_number
+
+  $next [+2]         UInt                      total_num_of_advt_tracked
     -- Total number of advertisers tracked for OnLost/OnFound purposes
-  $next [+1] Capability extended_scan_support
+
+  $next [+1]         Capability                extended_scan_support
     -- Supports extended scan window and interval
-  $next [+1] Capability debug_logging_supported
+
+  $next [+1]         Capability                debug_logging_supported
     -- Supports logging of binary debug information from controller
-  $next [+1] Capability le_address_generation_offloading_support
+
+  $next [+1]         Capability                le_address_generation_offloading_support
     -- Deprecated in Google feature spec v0.98 and higher
-  $next [+4] bits:
-    0 [+5] AudioCodecSupportMask a2dp_source_offload_capability_mask
-  $next [+1] Capability bluetooth_quality_report_support
+
+  $next [+4]  bits:
+    0     [+5]       AudioCodecSupportMask     a2dp_source_offload_capability_mask
+
+  $next [+1]         Capability                bluetooth_quality_report_support
     -- Supports reporting of Bluetooth Quality events
-  $next [+4] bits:
-    0 [+5] AudioCodecSupportMask dynamic_audio_buffer_support
+
+  $next [+4]  bits:
+    0     [+5]       AudioCodecSupportMask     dynamic_audio_buffer_support
diff --git a/pw_bluetooth/size_report/BUILD.gn b/pw_bluetooth/size_report/BUILD.gn
index 37ad18b..1492456 100644
--- a/pw_bluetooth/size_report/BUILD.gn
+++ b/pw_bluetooth/size_report/BUILD.gn
@@ -22,7 +22,7 @@
     sources = [ "make_view_and_write.cc" ]
     deps = [
       "$dir_pw_bloat:bloat_this_binary",
-      "$dir_pw_bluetooth:emboss_hci",
+      "$dir_pw_bluetooth:emboss_hci_group",
     ]
   }
 
@@ -30,7 +30,7 @@
     sources = [ "make_2_views_and_write.cc" ]
     deps = [
       "$dir_pw_bloat:bloat_this_binary",
-      "$dir_pw_bluetooth:emboss_hci",
+      "$dir_pw_bluetooth:emboss_hci_group",
     ]
   }
 }
diff --git a/pw_bluetooth_hci/BUILD.gn b/pw_bluetooth_hci/BUILD.gn
index 0f51071..75ca8fe 100644
--- a/pw_bluetooth_hci/BUILD.gn
+++ b/pw_bluetooth_hci/BUILD.gn
@@ -63,12 +63,12 @@
   tests = [
     ":packet_test",
     ":uart_transport_test",
-    ":uart_transport_fuzzer_test",
   ]
+  group_deps = [ ":fuzzers" ]
 }
 
-group("fuzzers") {
-  deps = [ ":uart_transport_fuzzer" ]
+pw_fuzzer_group("fuzzers") {
+  fuzzers = [ ":uart_transport_fuzzer" ]
 }
 
 pw_test("packet_test") {
diff --git a/pw_bluetooth_hci/docs.rst b/pw_bluetooth_hci/docs.rst
index 863be7c..2d8744a 100644
--- a/pw_bluetooth_hci/docs.rst
+++ b/pw_bluetooth_hci/docs.rst
@@ -31,24 +31,25 @@
 A decoder function is provided to parse HCI packets out of a HCI UART Transport
 Layer buffer which may contain multiple packets.
 
-  .. cpp:function:: StatusWithSize DecodeHciUartData(ConstByteSpan data, const DecodedPacketCallback& packet_callback);
+.. cpp:function:: StatusWithSize DecodeHciUartData(ConstByteSpan data, const DecodedPacketCallback& packet_callback);
 
-    Parses the HCI Packets out of a HCI UART Transport Layer buffer.
+   Parses the HCI Packets out of a HCI UART Transport Layer buffer.
 
-    Parses as many complete HCI packets out of the provided buffer based on the
-    HCI UART Transport Layer as defined by Bluetooth Core Specification version
-    5.3 "Host Controller Interface Transport Layer" volume 4, part A.
+   Parses as many complete HCI packets out of the provided buffer based on the
+   HCI UART Transport Layer as defined by Bluetooth Core Specification version
+   5.3 "Host Controller Interface Transport Layer" volume 4, part A.
 
-    The HciPacketCallback is invoked for each full HCI packet.
+   The HciPacketCallback is invoked for each full HCI packet.
 
-    Returns the number of bytes processed and a status based on:
+   Returns the number of bytes processed and a status based on:
 
-      * OK - No invalid packet indicator found.
-      * DATA_LOSS - An invalid packet indicator was detected between packets.
-        Synchronization has been lost. The caller is responsible for
-        regaining synchronization
+   * OK - No invalid packet indicator found.
+   * DATA_LOSS - An invalid packet indicator was detected between packets.
+     Synchronization has been lost. The caller is responsible for
+     regaining synchronization
 
-    .. note:: The caller is responsible for detecting the lack of progress due
-      to an undersized data buffer and/or an invalid length field in case a full
+   .. note::
+      The caller is responsible for detecting the lack of progress due to an
+      undersized data buffer and/or an invalid length field in case a full
       buffer is passed and no bytes are processed.
 
diff --git a/pw_boot/docs.rst b/pw_boot/docs.rst
index 8be00c7..cec1744 100644
--- a/pw_boot/docs.rst
+++ b/pw_boot/docs.rst
@@ -22,7 +22,7 @@
 The high level pw_boot boot sequence looks like the following pseudo-code
 invocation of the user-implemented functions:
 
-.. code:: cpp
+.. code-block:: cpp
 
    void pw_boot_Entry() {  // Boot entry point provided by backend.
      pw_boot_PreStaticMemoryInit();  // User-implemented function.
diff --git a/pw_boot_cortex_m/docs.rst b/pw_boot_cortex_m/docs.rst
index 5f53c1b..ef2b05d 100644
--- a/pw_boot_cortex_m/docs.rst
+++ b/pw_boot_cortex_m/docs.rst
@@ -29,7 +29,7 @@
 The high level pw_boot_cortex_m boot sequence looks like the following
 pseudo-code invocation of the user-implemented functions:
 
-.. code:: cpp
+.. code-block:: cpp
 
    void pw_boot_Entry() {  // Boot entry point.
      // Set VTOR.
diff --git a/pw_build/BUILD.gn b/pw_build/BUILD.gn
index 6c1f1e4..d45832b 100644
--- a/pw_build/BUILD.gn
+++ b/pw_build/BUILD.gn
@@ -313,7 +313,10 @@
 
 pw_doc_group("docs") {
   sources = [
+    "bazel.rst",
+    "cmake.rst",
     "docs.rst",
+    "gn.rst",
     "python.rst",
   ]
 }
diff --git a/pw_build/bazel.rst b/pw_build/bazel.rst
new file mode 100644
index 0000000..d4f0f82
--- /dev/null
+++ b/pw_build/bazel.rst
@@ -0,0 +1,223 @@
+Bazel
+=====
+Bazel is currently very experimental, and only builds for host and ARM Cortex-M
+microcontrollers.
+
+.. _module-pw_build-bazel-wrapper-rules:
+
+Wrapper rules
+-------------
+The common configuration for Bazel for all modules is in the ``pigweed.bzl``
+file. The built-in Bazel rules ``cc_binary``, ``cc_library``, and ``cc_test``
+are wrapped with ``pw_cc_binary``, ``pw_cc_library``, and ``pw_cc_test``.
+These wrappers add parameters to calls to the compiler and linker.
+
+pw_linker_script
+----------------
+In addition to wrapping the built-in rules, Pigweed also provides a custom
+rule for handling linker scripts with Bazel. e.g.
+
+.. code-block:: python
+
+  pw_linker_script(
+    name = "some_linker_script",
+    linker_script = ":some_configurable_linker_script.ld",
+    defines = [
+        "PW_BOOT_FLASH_BEGIN=0x08000200",
+        "PW_BOOT_FLASH_SIZE=1024K",
+        "PW_BOOT_HEAP_SIZE=112K",
+        "PW_BOOT_MIN_STACK_SIZE=1K",
+        "PW_BOOT_RAM_BEGIN=0x20000000",
+        "PW_BOOT_RAM_SIZE=192K",
+        "PW_BOOT_VECTOR_TABLE_BEGIN=0x08000000",
+        "PW_BOOT_VECTOR_TABLE_SIZE=512",
+    ],
+  )
+
+  pw_cc_binary(
+    name = "some_binary",
+    srcs = ["some_source.c"],
+    additional_linker_inputs = [":some_linker_script"],
+    linkopts = ["-T $(location :some_linker_script)"],
+  )
+
+pw_cc_facade
+------------
+In Bazel, a :ref:`facade <docs-module-structure-facades>` module has a few
+components:
+
+#. The **facade target**, i.e. the interface to the module. This is what
+   *backend implementations* depend on to know what interface they're supposed
+   to implement.  The facade is declared by creating a ``pw_cc_facade`` target,
+   which is just a thin wrapper for ``cc_library``. For example,
+
+   .. code-block:: python
+
+     pw_cc_facade(
+         name = "binary_semaphore_facade",
+         # The header that constitues the facade.
+         hdrs = [
+             "public/pw_sync/binary_semaphore.h",
+         ],
+         includes = ["public"],
+         # Dependencies of this header.
+         deps = [
+             "//pw_chrono:system_clock",
+             "//pw_preprocessor",
+         ],
+     )
+
+   .. note::
+     As pure interfaces, ``pw_cc_facade`` targets should not include any source
+     files. Backend-independent source files should be placed in the "library
+     target" instead.
+
+#. The **library target**, i.e. both the facade (interface) and backend
+   (implementation). This is what *users of the module* depend on. It's a
+   regular ``pw_cc_library`` that exposes the same headers as the facade, but
+   has a dependency on the "backend label flag" (discussed next). It may also
+   include some source files (if these are backend-independent). For example,
+
+   .. code-block:: python
+
+     pw_cc_library(
+         name = "binary_semaphore",
+         # A backend-independent source file.
+         srcs = [
+             "binary_semaphore.cc",
+         ],
+         # The same header as exposed by the facade.
+         hdrs = [
+             "public/pw_sync/binary_semaphore.h",
+         ],
+         deps = [
+             # Dependencies of this header
+             "//pw_chrono:system_clock",
+             "//pw_preprocessor",
+             # The backend, hidden behind a label_flag.
+             "@pigweed_config//:pw_sync_binary_semaphore_backend",
+         ],
+     )
+
+   .. note::
+     You may be tempted to reduce duplication in the BUILD.bazel files and
+     simply add the facade target to the ``deps`` of the library target,
+     instead of re-declaring the facade's ``hdrs`` and ``deps``. *Do not do
+     this!* It's a layering check violation: the facade headers provide the
+     module's interface, and should be directly exposed by the target the users
+     depend on.
+
+#. The **backend label flag**. This is a `label_flag
+   <https://bazel.build/extending/config#label-typed-build-settings>`_: a
+   dependency edge in the build graph that can be overridden by downstream projects.
+   For facades defined in upstream Pigweed, the ``label_flags`` are collected in
+   the :ref:`pigweed_config <docs-build_system-bazel_configuration>`.
+
+#. The **backend target** implements a particular backend for a facade. It's
+   just a plain ``pw_cc_library``, with a dependency on the facade target. For example,
+
+   .. code-block:: python
+
+     pw_cc_library(
+         name = "binary_semaphore",
+         srcs = [
+             "binary_semaphore.cc",
+         ],
+         hdrs = [
+             "public/pw_sync_stl/binary_semaphore_inline.h",
+             "public/pw_sync_stl/binary_semaphore_native.h",
+             "public_overrides/pw_sync_backend/binary_semaphore_inline.h",
+             "public_overrides/pw_sync_backend/binary_semaphore_native.h",
+         ],
+         includes = [
+             "public",
+             "public_overrides",
+         ],
+         deps = [
+             # Dependencies of the backend's headers and sources.
+             "//pw_assert",
+             "//pw_chrono:system_clock",
+             # A dependency on the facade target, which defines the interface
+             # this backend target implements.
+             "//pw_sync:binary_semaphore_facade",
+         ],
+     )
+
+   If a project uses only one backend for a given facade, the backend label
+   flag should point at that backend target.
+
+#. The **facade constraint setting** and **backend constraint values**. Every
+   facade has an associated `constraint setting
+   <https://bazel.build/concepts/platforms#api-review>`_ (enum used in platform
+   definition), and each backend for this facade has an associated
+   ``constraint_value`` (enum value). Example:
+
+   .. code-block:: python
+
+     # //pw_sync/BUILD.bazel
+     constraint_setting(
+       name = "binary_semaphore_backend_constraint_setting",
+     )
+
+     # //pw_sync_stl/BUILD.bazel
+     constraint_value(
+       name = "binary_semaphore_backend",
+       constraint_setting = "//pw_sync:binary_semaphore_backend_constraint_setting",
+     )
+
+     # //pw_sync_freertos/BUILD.bazel
+     constraint_value(
+       name = "binary_semaphore_backend",
+       constraint_setting = "//pw_sync:binary_semaphore_backend_constraint_setting",
+     )
+
+   `Target platforms <https://bazel.build/extending/platforms>`_ for Pigweed
+   projects should indicate which backend they select for each facade by
+   listing the corresponding ``constraint_value`` in their definition. This can
+   be used in a couple of ways:
+
+   #.  It allows projects to switch between multiple backends based only on the
+       `target platform <https://bazel.build/extending/platforms>`_ using a
+       *backend multiplexer* (see below) instead of setting label flags in
+       their ``.bazelrc``.
+
+   #.  It allows tests or libraries that only support a particular backend to
+       express this through the `target_compatible_with
+       <https://bazel.build/reference/be/common-definitions#common.target_compatible_with>`_
+       attribute. Bazel will use this to `automatically skip incompatible
+       targets in wildcard builds
+       <https://bazel.build/extending/platforms#skipping-incompatible-targets>`_.
+
+#. The **backend multiplexer**. If a project uses more than one backend for a
+   given facade (e.g., it uses different backends for host and embedded target
+   builds), the backend label flag will point to a target that resolves to the
+   correct backend based on the target platform. This will typically be an
+   `alias <https://bazel.build/reference/be/general#alias>`_ with a ``select``
+   statement mapping constraint values to the appropriate backend targets. For
+   example,
+
+   .. code-block:: python
+
+     alias(
+         name = "pw_sync_binary_semaphore_backend_multiplexer",
+         actual = select({
+             "//pw_sync_stl:binary_semaphore_backend": "@pigweed//pw_sync_stl:binary_semaphore",
+             "//pw_sync_freertos:binary_semaphore_backend": "@pigweed//pw_sync_freertos:binary_semaphore_backend",
+             # If we're building for a host OS, use the STL backend.
+             "@platforms//os:macos": "@pigweed//pw_sync_stl:binary_semaphore",
+             "@platforms//os:linux": "@pigweed//pw_sync_stl:binary_semaphore",
+             "@platforms//os:windows": "@pigweed//pw_sync_stl:binary_semaphore",
+             # Unless the target platform is the host platform, it must
+             # explicitly specify which backend to use. The unspecified_backend
+             # is not compatible with any platform; taking this branch will produce
+             # an informative error.
+             "//conditions:default": "@pigweed//pw_build:unspecified_backend",
+         }),
+     )
+
+Toolchains and platforms
+------------------------
+Pigweed provides clang-based host toolchains for Linux and Mac Arm gcc
+toolchain. The clang-based Linux and Arm gcc toolchains are entirely hermetic.
+We don't currently provide a host toolchain for Windows.
+
diff --git a/pw_build/bazel_internal/pigweed_internal.bzl b/pw_build/bazel_internal/pigweed_internal.bzl
index b40fc3e..ecfa834 100644
--- a/pw_build/bazel_internal/pigweed_internal.bzl
+++ b/pw_build/bazel_internal/pigweed_internal.bzl
@@ -95,11 +95,6 @@
         cc_toolchain = cc_toolchain,
         user_compile_flags = ctx.fragments.cpp.copts + ctx.fragments.cpp.conlyopts,
     )
-    action_flags = cc_common.get_memory_inefficient_command_line(
-        feature_configuration = feature_configuration,
-        action_name = C_COMPILE_ACTION_NAME,
-        variables = c_compile_variables,
-    )
     env = cc_common.get_environment_variables(
         feature_configuration = feature_configuration,
         action_name = C_COMPILE_ACTION_NAME,
@@ -115,6 +110,11 @@
         arguments = [
             "-E",
             "-P",
+            # TODO: b/296928739 - This flag is needed so cc1 can be located
+            # despite the presence of symlinks. Normally this is provided
+            # through copts inherited from the toolchain, but since those are
+            # not pulled in here the flag must be explicitly added.
+            "-no-canonical-prefixes",
             "-xc",
             ctx.file.linker_script.short_path,
             "-o",
@@ -122,7 +122,7 @@
         ] + [
             "-D" + d
             for d in ctx.attr.defines
-        ] + action_flags + ctx.attr.copts,
+        ] + ctx.attr.copts,
         env = env,
     )
     linker_input = cc_common.create_linker_input(
diff --git a/pw_build/cmake.rst b/pw_build/cmake.rst
new file mode 100644
index 0000000..11f3b75
--- /dev/null
+++ b/pw_build/cmake.rst
@@ -0,0 +1,184 @@
+CMake
+=====
+Pigweed's `CMake`_ support is provided primarily for projects that have an
+existing CMake build and wish to integrate Pigweed without switching to a new
+build system.
+
+The following command generates Ninja build files for a host build in the
+``out/cmake_host`` directory:
+
+.. code-block:: sh
+
+  cmake -B out/cmake_host -S "$PW_ROOT" -G Ninja -DCMAKE_TOOLCHAIN_FILE=$PW_ROOT/pw_toolchain/host_clang/toolchain.cmake
+
+The ``PW_ROOT`` environment variable must point to the root of the Pigweed
+directory. This variable is set by Pigweed's environment setup.
+
+Tests can be executed with the ``pw_run_tests.GROUP`` targets. To run Pigweed
+module tests, execute ``pw_run_tests.modules``:
+
+.. code-block:: sh
+
+  ninja -C out/cmake_host pw_run_tests.modules
+
+:ref:`module-pw_watch` supports CMake, so you can also run
+
+.. code-block:: sh
+
+  pw watch -C out/cmake_host pw_run_tests.modules
+
+CMake functions
+---------------
+CMake convenience functions are defined in ``pw_build/pigweed.cmake``.
+
+* ``pw_add_library_generic`` -- The base helper used to instantiate CMake
+  libraries. This is meant for use in downstream projects as upstream Pigweed
+  modules are expected to use ``pw_add_library``.
+* ``pw_add_library`` -- Add an upstream Pigweed library.
+* ``pw_add_facade_generic`` -- The base helper used to instantiate facade
+  libraries. This is meant for use in downstream projects as upstream Pigweed
+  modules are expected to use ``pw_add_facade``.
+* ``pw_add_facade`` -- Declare an upstream Pigweed facade.
+* ``pw_set_backend`` -- Set the backend library to use for a facade.
+* ``pw_add_test_generic`` -- The base helper used to instantiate test targets.
+  This is meant for use in downstrema projects as upstream Pigweed modules are
+  expected to use ``pw_add_test``.
+* ``pw_add_test`` -- Declare an upstream Pigweed test target.
+* ``pw_add_test_group`` -- Declare a target to group and bundle test targets.
+* ``pw_target_link_targets`` -- Helper wrapper around ``target_link_libraries``
+  which only supports CMake targets and detects when the target does not exist.
+  Note that generator expressions are not supported.
+* ``pw_add_global_compile_options`` -- Applies compilation options to all
+  targets in the build. This should only be used to add essential compilation
+  options, such as those that affect the ABI. Use ``pw_add_library`` or
+  ``target_compile_options`` to apply other compile options.
+* ``pw_add_error_target`` -- Declares target which reports a message and causes
+  a build failure only when compiled. This is useful when ``FATAL_ERROR``
+  messages cannot be used to catch problems during the CMake configuration
+  phase.
+* ``pw_parse_arguments`` -- Helper to parse CMake function arguments.
+
+See ``pw_build/pigweed.cmake`` for the complete documentation of these
+functions.
+
+Special libraries that do not fit well with these functions are created with the
+standard CMake functions, such as ``add_library`` and ``target_link_libraries``.
+
+Facades and backends
+--------------------
+The CMake build uses CMake cache variables for configuring
+:ref:`facades<docs-module-structure-facades>` and backends. Cache variables are
+similar to GN's build args set with ``gn args``. Unlike GN, CMake does not
+support multi-toolchain builds, so these variables have a single global value
+per build directory.
+
+The ``pw_add_module_facade`` function declares a cache variable named
+``<module_name>_BACKEND`` for each facade. Cache variables can be awkward to
+work with, since their values only change when they're assigned, but then
+persist accross CMake invocations. These variables should be set in one of the
+following ways:
+
+* Prior to setting a backend, your application should include
+  ``$ENV{PW_ROOT}/backends.cmake``. This file will setup all the backend targets
+  such that any misspelling of a facade or backend will yield a warning.
+
+  .. note::
+    Zephyr developers do not need to do this, backends can be set automatically
+    by enabling the appropriate Kconfig options.
+
+* Call ``pw_set_backend`` to set backends appropriate for the target in the
+  target's toolchain file. The toolchain file is provided to ``cmake`` with
+  ``-DCMAKE_TOOLCHAIN_FILE=<toolchain file>``.
+* Call ``pw_set_backend`` in the top-level ``CMakeLists.txt`` before other
+  CMake code executes.
+* Set the backend variable at the command line with the ``-D`` option.
+
+  .. code-block:: sh
+
+    cmake -B out/cmake_host -S "$PW_ROOT" -G Ninja \
+        -DCMAKE_TOOLCHAIN_FILE=$PW_ROOT/pw_toolchain/host_clang/toolchain.cmake \
+        -Dpw_log_BACKEND=pw_log_basic
+
+* Temporarily override a backend by setting it interactively with ``ccmake`` or
+  ``cmake-gui``.
+
+If the backend is set to a build target that does not exist, there will be an
+error message like the following:
+
+.. code-block::
+
+   CMake Error at pw_build/pigweed.cmake:257 (message):
+     my_module.my_facade's INTERFACE dep "my_nonexistent_backend" is not
+     a target.
+   Call Stack (most recent call first):
+     pw_build/pigweed.cmake:238:EVAL:1 (_pw_target_link_targets_deferred_check)
+     CMakeLists.txt:DEFERRED
+
+
+Toolchain setup
+---------------
+In CMake, the toolchain is configured by setting CMake variables, as described
+in the `CMake documentation <https://cmake.org/cmake/help/latest/manual/cmake-toolchains.7.html>`_.
+These variables are typically set in a toolchain CMake file passed to ``cmake``
+with the ``-D`` option (``-DCMAKE_TOOLCHAIN_FILE=path/to/file.cmake``).
+For Pigweed embedded builds, set ``CMAKE_SYSTEM_NAME`` to the empty string
+(``""``).
+
+Toolchains may set the ``pw_build_WARNINGS`` variable to a list of ``INTERFACE``
+libraries with compilation options for Pigweed's upstream libraries. This
+defaults to a strict set of warnings. Projects may need to use less strict
+compilation warnings to compile backends exposed to Pigweed code (such as
+``pw_log``) that cannot compile with Pigweed's flags. If desired, Projects can
+access these warnings by depending on ``pw_build.warnings``.
+
+Third party libraries
+---------------------
+The CMake build includes third-party libraries similarly to the GN build. A
+``dir_pw_third_party_<library>`` cache variable is defined for each third-party
+dependency. The variable must be set to the absolute path of the library in
+order to use it. If the variable is empty
+(``if("${dir_pw_third_party_<library>}" STREQUAL "")``), the dependency is not
+available.
+
+Third-party dependencies are not automatically added to the build. They can be
+manually added with ``add_subdirectory`` or by setting the
+``pw_third_party_<library>_ADD_SUBDIRECTORY`` option to ``ON``.
+
+Third party variables are set like any other cache global variable in CMake. It
+is recommended to set these in one of the following ways:
+
+* Set with the CMake ``set`` function in the toolchain file or a
+  ``CMakeLists.txt`` before other CMake code executes.
+
+  .. code-block:: cmake
+
+    set(dir_pw_third_party_nanopb ${CMAKE_CURRENT_SOURCE_DIR}/external/nanopb CACHE PATH "" FORCE)
+
+* Set the variable at the command line with the ``-D`` option.
+
+  .. code-block:: sh
+
+    cmake -B out/cmake_host -S "$PW_ROOT" -G Ninja \
+        -DCMAKE_TOOLCHAIN_FILE=$PW_ROOT/pw_toolchain/host_clang/toolchain.cmake \
+        -Ddir_pw_third_party_nanopb=/path/to/nanopb
+
+* Set the variable interactively with ``ccmake`` or ``cmake-gui``.
+
+Use Pigweed from an existing CMake project
+------------------------------------------
+To use Pigweed libraries form a CMake-based project, simply include the Pigweed
+repository from a ``CMakeLists.txt``.
+
+.. code-block:: cmake
+
+  add_subdirectory(path/to/pigweed pigweed)
+
+All module libraries will be available as ``module_name`` or
+``module_name.sublibrary``.
+
+If desired, modules can be included individually.
+
+.. code-block:: cmake
+
+  add_subdirectory(path/to/pigweed/pw_some_module pw_some_module)
+  add_subdirectory(path/to/pigweed/pw_another_module pw_another_module)
diff --git a/pw_build/coverage_report.gni b/pw_build/coverage_report.gni
index eb50b88..8c4996d 100644
--- a/pw_build/coverage_report.gni
+++ b/pw_build/coverage_report.gni
@@ -157,8 +157,7 @@
       module = "pw_build.merge_profraws"
       args = [
         "--llvm-profdata-path",
-        rebase_path("$pw_env_setup_CIPD_PIGWEED/bin/llvm-profdata",
-                    root_build_dir),
+        pw_toolchain_clang_tools.llvm_profdata,
         "--test-metadata-path",
         rebase_path(_test_metadata, root_build_dir),
         "--profdata-path",
@@ -201,8 +200,7 @@
         module = "pw_build.generate_report"
         args = [
           "--llvm-cov-path",
-          rebase_path("$pw_env_setup_CIPD_PIGWEED/bin/llvm-cov",
-                      root_build_dir),
+          pw_toolchain_clang_tools.llvm_cov,
           "--format",
           format,
           "--test-metadata-path",
diff --git a/pw_build/docs.rst b/pw_build/docs.rst
index addd373..16d2ad8 100644
--- a/pw_build/docs.rst
+++ b/pw_build/docs.rst
@@ -24,1829 +24,9 @@
 
 These are only supported in the GN build, so we recommend using it if possible.
 
-GN / Ninja
-==========
-The GN / Ninja build system is the primary build system used for upstream
-Pigweed development, and is the most tested and feature-rich build system
-Pigweed offers.
-
-This module's ``build.gn`` file contains a number of C/C++ ``config``
-declarations that are used by upstream Pigweed to set some architecture-agnostic
-compiler defaults. (See Pigweed's ``//BUILDCONFIG.gn``)
-
-``pw_build`` also provides several useful GN templates that are used throughout
-Pigweed.
-
-Build system philosophies
--------------------------
-While Pigweed's GN build is not hermetic, it strives to adhere to principles of
-`hermeticity <https://bazel.build/concepts/hermeticity>`_. Some guidelines to
-move towards the ideal of hermeticity include:
-
-* Only rely on pre-compiled tools provided by CIPD (or some other versioned,
-  pre-compiled binary distribution mechanism). This eliminates build artifact
-  differences caused by different tool versions or variations (e.g. same tool
-  version built with slightly different compilation flags).
-* Do not use absolute paths in Ninja commands. Typically, these appear when
-  using ``rebase_path("//path/to/my_script.py")``. Most of the time, Ninja
-  steps should be passed paths rebased relative to the build directory (i.e.
-  ``rebase_path("//path/to/my_script.py", root_build_dir)``). This ensures build
-  commands are the same across different machines.
-* Prevent produced artifacts from relying on or referencing system state. This
-  includes time stamps, writing absolute paths to generated artifacts, or
-  producing artifacts that reference system state in a way that prevents them
-  from working the same way on a different machine.
-* Isolate build actions to the build directory. In general, the build system
-  should not add or modify files outside of the build directory. This can cause
-  confusion to users, and makes the concept of a clean build more ambiguous.
-
-Target types
-------------
-.. code-block::
-
-  import("$dir_pw_build/target_types.gni")
-
-  pw_source_set("my_library") {
-    sources = [ "lib.cc" ]
-  }
-
-Pigweed defines wrappers around the four basic GN binary types ``source_set``,
-``executable``, ``static_library``, and ``shared_library``. These templates
-do several things:
-
-#. **Add default configs/deps**
-
-   Rather than binding the majority of compiler flags related to C++ standard,
-   cross-compilation, warning/error policy, etc.  directly to toolchain
-   invocations, these flags are applied as configs to all ``pw_*`` C/C++ target
-   types. The primary motivations for this are to allow some targets to modify
-   the default set of flags when needed by specifying ``remove_configs``, and to
-   reduce the complexity of building novel toolchains.
-
-   Pigweed's global default configs are set in ``pw_build/default.gni``, and
-   individual platform-specific toolchains extend the list by appending to the
-   ``default_configs`` build argument.
-
-   Default deps were added to support polyfill, which has since been deprecated.
-   Default dependency functionality continues to exist for backwards
-   compatibility.
-
-#. **Optionally add link-time binding**
-
-   Some libraries like pw_assert and pw_log are borderline impossible to
-   implement well without introducing circular dependencies. One solution for
-   addressing this is to break apart the libraries into an interface with
-   minimal dependencies, and an implementation with the bulk of the
-   dependencies that would typically create dependency cycles. In order for the
-   implementation to be linked in, it must be added to the dependency tree of
-   linked artifacts (e.g. ``pw_executable``, ``pw_static_library``). Since
-   there's no way for the libraries themselves to just happily pull in the
-   implementation if someone depends on the interface, the implementation is
-   instead late-bound by adding it as a direct dependency of the final linked
-   artifact. This is all managed through ``pw_build_LINK_DEPS``, which is global
-   for each toolchain and applied to every ``pw_executable``,
-   ``pw_static_library``, and ``pw_shared_library``.
-
-#. **Apply a default visibility policy**
-
-   Projects can globally control the default visibility of pw_* target types by
-   specifying ``pw_build_DEFAULT_VISIBILITY``. This template applies that as the
-   default visibility for any pw_* targets that do not explicitly specify a
-   visibility.
-
-#. **Add source file names as metadata**
-
-   All source file names are collected as
-   `GN metadata <https://gn.googlesource.com/gn/+/main/docs/reference.md#metadata_collection>`_.
-   This list can be writen to a file at build time using ``generated_file``. The
-   primary use case for this is to generate a token database containing all the
-   source files. This allows :c:macro:`PW_ASSERT` to emit filename tokens even
-   though it can't add them to the elf file because of the reasons described at
-   :ref:`module-pw_assert-assert-api`.
-
-   .. note::
-      ``pw_source_files``, if not rebased will default to outputing module
-      relative paths from a ``generated_file`` target.  This is likely not
-      useful. Adding a ``rebase`` argument to ``generated_file`` such as
-      ``rebase = root_build_dir`` will result in usable paths.  For an example,
-      see ``//pw_tokenizer/database.gni``'s ``pw_tokenizer_filename_database``
-      template.
-
-The ``pw_executable`` template provides additional functionality around building
-complete binaries. As Pigweed is a collection of libraries, it does not know how
-its final targets are built. ``pw_executable`` solves this by letting each user
-of Pigweed specify a global executable template for their target, and have
-Pigweed build against it. This is controlled by the build variable
-``pw_executable_config.target_type``, specifying the name of the executable
-template for a project.
-
-In some uncommon cases, a project's ``pw_executable`` template definition may
-need to stamp out some ``pw_source_set``\s. Since a pw_executable template can't
-import ``$dir_pw_build/target_types.gni`` due to circular imports, it should
-import ``$dir_pw_build/cc_library.gni`` instead.
-
-.. tip::
-
-  Prefer to use ``pw_executable`` over plain ``executable`` targets to allow
-  cleanly building the same code for multiple target configs.
-
-Arguments
-^^^^^^^^^
-All of the ``pw_*`` target type overrides accept any arguments supported by
-the underlying native types, as they simply forward them through to the
-underlying target.
-
-Additionally, the following arguments are also supported:
-
-* **remove_configs**: (optional) A list of configs / config patterns to remove
-  from the set of default configs specified by the current toolchain
-  configuration.
-* **remove_public_deps**: (optional) A list of targets to remove from the set of
-  default public_deps specified by the current toolchain configuration.
-
-.. _module-pw_build-link-deps:
-
-Link-only deps
---------------
-It may be necessary to specify additional link-time dependencies that may not be
-explicitly depended on elsewhere in the build. One example of this is a
-``pw_assert`` backend, which may need to leave out dependencies to avoid
-circular dependencies. Its dependencies need to be linked for executables and
-libraries, even if they aren't pulled in elsewhere.
-
-The ``pw_build_LINK_DEPS`` build arg is a list of dependencies to add to all
-``pw_executable``, ``pw_static_library``, and ``pw_shared_library`` targets.
-This should only be used as a last resort when dependencies cannot be properly
-expressed in the build.
-
-.. _module-pw_build-third-party:
-
-Third party libraries
----------------------
-Pigweed includes build files for a selection of third-party libraries. For a
-given library, these include:
-
-* ``third_party/<library>/library.gni``: Declares build arguments like
-  ``dir_pw_third_party_<library>`` that default to ``""`` but can be set to the
-  absolute path of the library in order to use it.
-* ``third_party/<library>/BUILD.gn``: Describes how to build the library. This
-  should import ``third_party/<library>/library.gni`` and refer to source paths
-  relative to ``dir_pw_third_party_<library>``.
-
-To add or update GN build files for libraries that only offer Bazel build files,
-the Python script at ``pw_build/py/pw_build/generate_3p_gn.py`` may be used.
-
-  .. note::
-    The ``generate_3p_gn.py`` script is experimental, and may not work on an
-    arbitrary Bazel library.
-
-To generate or update the GN offered by Pigweed from an Bazel upstream project,
-first create a ``third_party/<library>/repo.json`` file. This file should
-describe a single JSON object, with the following fields:
-
-* ``name``: String containg the project name.
-
-  .. code-block::
-
-     "name": "FuzzTest"
-
-* ``repos``: Object mapping Bazel repositories to library names.
-
-  .. code-block::
-
-     "repos": { "com_google_absl": "abseil-cpp" }
-
-* ``aliases``: Object mapping GN labels to other GN labels. In some cases, a
-  third party library may have a dependency on another library already supported
-  by Pigweed, but with a label that differs from what the script would generate.
-  This field allows those labels to be rewritten.
-
-  .. code-block::
-
-     "aliases": {
-       "$dir_pw_third_party/googletest:gtest": "$dir_pw_third_party/googletest"
-     }
-
-* ``add``: List of labels to existing GN configs. These will be added to every
-  target in the library.
-
-  .. code-block::
-
-     "add": [ "$dir_pw_third_party/re2/configs:disabled_warnings" ]
-
-* ``remove``: List of labels to default GN configs. These will be removed from
-  every target.
-
-  .. code-block::
-
-     "remove" = [ "$dir_pw_fuzzer:instrumentation" ]
-
-* ``allow_testonly``: Boolean indicating whether to generate GN for Bazel
-  targets marked test-only. Defaults to false.
-
-  .. code-block::
-
-     "allow_testonly": true
-
-* ``no_gn_check``: List of Bazel targets that violate ``gn check``'s
-  `rules`__. Third-party targets that do not conform can be excluded.
-
-  .. code-block::
-
-     "no_gn_check": [ "//fuzztest:regexp_dfa" ]
-
-* ``extra_files``: Object mapping additional files to create to Bazel targets
-  that create them. These targets will be passed to ``bazel run`` and their
-  output saved to the named file within ``third_party/<library>``. For example:
-
-  .. code-block::
-
-     "extra_files": {
-       "fuzztest.bazelrc": "@com_google_fuzztest//bazel:setup_configs"
-     }
-
-.. __: https://gn.googlesource.com/gn/+/main/docs/reference.md#cmd_check
-
-Python packages
----------------
-GN templates for :ref:`Python build automation <docs-python-build>` are
-described in :ref:`module-pw_build-python`.
-
 .. toctree::
-  :hidden:
+   :maxdepth: 1
 
-  python
-
-
-.. _module-pw_build-cc_blob_library:
-
-pw_cc_blob_library
-------------------
-The ``pw_cc_blob_library`` template is useful for embedding binary data into a
-program. The template takes in a mapping of symbol names to file paths, and
-generates a set of C++ source and header files that embed the contents of the
-passed-in files as arrays of ``std::byte``.
-
-The blob byte arrays are constant initialized and are safe to access at any
-time, including before ``main()``.
-
-``pw_cc_blob_library`` is also available in the CMake build. It is provided by
-``pw_build/cc_blob_library.cmake``.
-
-Arguments
-^^^^^^^^^
-* ``blobs``: A list of GN scopes, where each scope corresponds to a binary blob
-  to be transformed from file to byte array. This is a required field. Blob
-  fields include:
-
-  * ``symbol_name``: The C++ symbol for the byte array.
-  * ``file_path``: The file path for the binary blob.
-  * ``linker_section``: If present, places the byte array in the specified
-    linker section.
-  * ``alignas``: If present, uses the specified string or integer verbatim in
-    the ``alignas()`` specifier for the byte array.
-
-* ``out_header``: The header file to generate. Users will include this file
-  exactly as it is written here to reference the byte arrays.
-* ``namespace``: An optional (but highly recommended!) C++ namespace to place
-  the generated blobs within.
-
-Example
-^^^^^^^
-**BUILD.gn**
-
-.. code-block::
-
-  pw_cc_blob_library("foo_bar_blobs") {
-    blobs: [
-      {
-        symbol_name: "kFooBlob"
-        file_path: "${target_out_dir}/stuff/bin/foo.bin"
-      },
-      {
-        symbol_name: "kBarBlob"
-        file_path: "//stuff/bin/bar.bin"
-        linker_section: ".bar_section"
-      },
-    ]
-    out_header: "my/stuff/foo_bar_blobs.h"
-    namespace: "my::stuff"
-    deps = [ ":generate_foo_bin" ]
-  }
-
-.. note:: If the binary blobs are generated as part of the build, be sure to
-          list them as deps to the pw_cc_blob_library target.
-
-**Generated Header**
-
-.. code-block::
-
-  #pragma once
-
-  #include <array>
-  #include <cstddef>
-
-  namespace my::stuff {
-
-  extern const std::array<std::byte, 100> kFooBlob;
-
-  extern const std::array<std::byte, 50> kBarBlob;
-
-  }  // namespace my::stuff
-
-**Generated Source**
-
-.. code-block::
-
-  #include "my/stuff/foo_bar_blobs.h"
-
-  #include <array>
-  #include <cstddef>
-
-  #include "pw_preprocessor/compiler.h"
-
-  namespace my::stuff {
-
-  const std::array<std::byte, 100> kFooBlob = { ... };
-
-  PW_PLACE_IN_SECTION(".bar_section")
-  const std::array<std::byte, 50> kBarBlob = { ... };
-
-  }  // namespace my::stuff
-
-.. _module-pw_build-facade:
-
-pw_facade
----------
-In their simplest form, a :ref:`facade<docs-module-structure-facades>` is a GN
-build arg used to change a dependency at compile time. Pigweed targets configure
-these facades as needed.
-
-The ``pw_facade`` template bundles a ``pw_source_set`` with a facade build arg.
-This allows the facade to provide header files, compilation options or anything
-else a GN ``source_set`` provides.
-
-The ``pw_facade`` template declares two targets:
-
-* ``$target_name``: the public-facing ``pw_source_set``, with a ``public_dep``
-  on the backend
-* ``$target_name.facade``: target used by the backend to avoid circular
-  dependencies
-
-.. code-block::
-
-  # Declares ":foo" and ":foo.facade" GN targets
-  pw_facade("foo") {
-    backend = pw_log_BACKEND
-    public_configs = [ ":public_include_path" ]
-    public = [ "public/pw_foo/foo.h" ]
-  }
-
-Low-level facades like ``pw_assert`` cannot express all of their dependencies
-due to the potential for dependency cycles. Facades with this issue may require
-backends to place their implementations in a separate build target to be listed
-in ``pw_build_LINK_DEPS`` (see :ref:`module-pw_build-link-deps`). The
-``require_link_deps`` variable in ``pw_facade`` asserts that all specified build
-targets are present in ``pw_build_LINK_DEPS`` if the facade's backend variable
-is set.
-
-.. _module-pw_build-python-action:
-
-pw_python_action
-----------------
-.. seealso::
-   - :ref:`module-pw_build-python` for all of Pigweed's Python build GN templates.
-   - :ref:`docs-python-build` for details on how the GN Python build works.
-
-The ``pw_python_action`` template is a convenience wrapper around GN's `action
-function <https://gn.googlesource.com/gn/+/main/docs/reference.md#func_action>`_
-for running Python scripts. The main benefit it provides is resolution of GN
-target labels to compiled binary files. This allows Python scripts to be written
-independently of GN, taking only filesystem paths as arguments.
-
-Another convenience provided by the template is to allow running scripts without
-any outputs. Sometimes scripts run in a build do not directly produce output
-files, but GN requires that all actions have an output. ``pw_python_action``
-solves this by accepting a boolean ``stamp`` argument which tells it to create a
-placeholder output file for the action.
-
-Arguments
-^^^^^^^^^
-``pw_python_action`` accepts all of the arguments of a regular ``action``
-target. Additionally, it has some of its own arguments:
-
-* ``module``: Run the specified Python module instead of a script. Either
-  ``script`` or ``module`` must be specified, but not both.
-* ``capture_output``: Optional boolean. If true, script output is hidden unless
-  the script fails with an error. Defaults to true.
-* ``stamp``: Optional variable indicating whether to automatically create a
-  placeholder output file for the script. This allows running scripts without
-  specifying ``outputs``. If ``stamp`` is true, a generic output file is
-  used. If ``stamp`` is a file path, that file is used as a stamp file. Like any
-  output file, ``stamp`` must be in the build directory. Defaults to false.
-* ``environment``: Optional list of strings. Environment variables to set,
-  passed as NAME=VALUE strings.
-* ``working_directory``: Optional file path. When provided the current working
-  directory will be set to this location before the Python module or script is
-  run.
-* ``command_launcher``: Optional string. Arguments to prepend to the Python
-  command, e.g. ``'/usr/bin/fakeroot --'`` will run the Python script within a
-  fakeroot environment.
-* ``venv``: Optional gn target of the pw_python_venv that should be used to run
-  this action.
-* ``python_deps``: Extra dependencies that are required for running the Python
-  script for the ``action``. This must be used with ``module`` to specify the
-  build dependency of the ``module`` if it is user-defined code.
-* ``python_metadata_deps``: Extra dependencies that are ensured completed before
-  generating a Python package metadata manifest, not the overall Python script
-  ``action``. This should rarely be used by non-Pigweed code.
-
-.. _module-pw_build-python-action-test:
-
-pw_python_action_test
----------------------
-The ``pw_python_action_test`` extends :ref:`module-pw_build-python-action` to
-create a test that is run by a Python script, and its associated test metadata.
-
-Include action tests in the :ref:`module-pw_unit_test-pw_test_group` to produce
-the JSON metadata that :ref:`module-pw_build-test-info` adds.
-
-This template derives several additional targets:
-
-* ``<target_name>.metadata`` produces the test metadata when included in a
-  ``pw_test_group``. This metadata includes the Ninja target that runs the test.
-* If``action`` is not provided as a label, ``<target_name>.script`` wraps a
-  ``pw_python_action`` to run the test as a standalone ``pw_python_package``.
-* ``<target_name>.group`` creates a ``pw_python_group`` in order to apply tools,
-  e.g. linters, to the standalone package.
-* ``<target_name>.lib`` is an empty group for compatibility with
-  ``pw_test_group``.
-* ``<target_name>.run`` invokes the test.
-
-Targets defined using this template will produce test metadata with a
-``test_type`` of "action_test" and a ``ninja_target`` value that will invoke the
-test when passed to Ninja, i.e. ``ninja -C out <ninja_target>``.
-
-Arguments
-^^^^^^^^^
-``pw_python_action_test`` accepts the following arguments:
-
-* All of the arguments of :ref:`module-pw_unit_test-pw_test`.
-* ``action``: An optional string or scope. If a string, this should be a label
-  to a ``pw_python_action`` target that performs the test. If a scope, this has
-  the same meaning as for ``pw_python_script``.
-* Optionally, the ``test_type`` and ``extra_metadata`` arguments of the
-  :ref:`module-pw_build-test-info` template.
-* Optionally, all of the arguments of the :ref:`module-pw_build-python-action`
-  template except ``module``, ``capture_output``, ``stamp``, and
-  ``python_metadata_deps``.
-* Optionally, all of the arguments of the ``pw_python_package`` template except
-  ``setup``, ``generate_setup``, ``tests``, ``python_test_deps``, and
-  ``proto_library``.
-
-.. _module-pw_build-test-info:
-
-pw_test_info
-------------
-``pw_test_info`` generates metadata describing tests. To produce a JSON file
-containing this metadata:
-
-#. For new modules, add a :ref:`module-pw_unit_test-pw_test_group` to the
-   BUILD.gn file. All modules are required to have a ``tests`` target.
-#. Include one or more tests or test groups via ``tests`` or ``group_deps``,
-   respectively, in the ``pw_test_group``.
-#. Set ``output_metadata`` to ``true`` in the ``pw_test_group`` definition.
-
-This template does not typically need to be used directly, unless adding new
-types of tests. It is typically used by other templates, such as the
-:ref:`module-pw_unit_test-pw_test` and the
-:ref:`module-pw_unit_test-pw_test_group`.
-
-Arguments
-^^^^^^^^^
-* ``test_type``: One of "test_group", "unit_test", "action_test", or
-  "perf_test".
-* ``test_name``: Name of the test. Defaults to the target name.
-* ``build_label``: GN label for the test. Defaults to the test name.
-* ``extra_metadata``: Additional variables to add to the metadata.
-
-Specific test templates add additional details using ``extra_metadata``. For
-example:
-
-* The :ref:`module-pw_unit_test-pw_test` includes the ``test_directory``
-  that contains the test executable.
-* The :ref:`module-pw_unit_test-pw_test_group` includes its collected list of
-  tests and test groups as ``deps``.
-* The :ref:`module-pw_build-python-action-test` includes the Ninja target that
-  can be used to invoke the Python action and run the test.
-
-Example
-^^^^^^^
-Let ``//my_module/BUILD.gn`` contain the following:
-
-.. code::
-
-   import("$dir_pw_build/python_action_test.gni")
-   import("$dir_pw_perf_test/perf_test.gni")
-   import("$dir_pw_unit_test/test.gni")
-
-   pw_test("my_unit_test") {
-     sources = [ ... ]
-     deps = [ ... ]
-   }
-
-   pw_python_action_test("my_action_test") {
-     script = [ ... ]
-     args = [ ... ]
-     deps = [ ... ]
-   }
-
-   pw_python_action_test("my_integration_test") {
-     script = [ ... ]
-     args = [ ... ]
-     deps = [ ... ]
-     tags = [ "integration" ]
-   }
-
-   pw_perf_test("my_perf_test") {
-     sources = [ ... ]
-     deps = [ ... ]
-   }
-
-   pw_test_group("tests") {
-     tests = [
-      ":my_unit_test",
-      ":my_action_test",
-      ":my_integration_test",
-     ]
-   }
-
-Let `//BUILD.gn`` contain the following:
-
-.. code::
-
-   import("$dir_pw_unit_test/test.gni")
-
-   group("run_tests") {
-     deps = [ ":my_module_tests(//targets/my_targets:my_toolchain)" ]
-   }
-
-   pw_test_group("my_module_tests") {
-     group_deps = [ "//my_module:tests" ]
-     output_metadata = true
-   }
-
-Then running ``gn gen out`` will produce the following JSON file at
-``out/my_toolchain/my_module_tests.testinfo.json``:
-
-.. code:: json
-
-   [
-     {
-       "build_label": "//my_module:my_unit_test",
-       "test_directory": "my_toolchain/obj/my_module/test",
-       "test_name": "my_unit_test",
-       "test_type": "unit_test"
-     },
-     {
-       "build_label": "//my_module:my_action_test",
-       "ninja_target": "my_toolchain/obj/my_module/my_action_test.run.stamp",
-       "test_name": "my_action_test",
-       "test_type": "action_test"
-     },
-     {
-       "build_label": "//my_module:my_integration_test",
-       "ninja_target": "my_toolchain/obj/my_module/my_integration_test.run.stamp",
-       "tags": [
-         "integration"
-       ],
-       "test_name": "my_integration_test",
-       "test_type": "action_test"
-     },
-     {
-       "build_label": "//my_module:my_perf_test",
-       "test_directory": "my_toolchain/obj/my_module/test",
-       "test_name": "my_perf_test",
-       "test_type": "perf_test"
-     },
-     {
-       "build_label": "//my_module:tests",
-       "deps": [
-         "//my_module:my_unit_test",
-         "//my_module:my_action_test",
-         "//my_module:my_integration_test",
-       ],
-       "test_name": "my_module/tests",
-       "test_type": "test_group"
-     },
-     {
-       "build_label": "//:my_module_tests",
-       "deps": [
-         "//my_module:tests",
-       ],
-       "test_name": "my_module_tests",
-       "test_type": "test_group"
-     }
-   ]
-
-.. _module-pw_build-python-action-expressions:
-
-Expressions
-^^^^^^^^^^^
-``pw_python_action`` evaluates expressions in ``args``, the arguments passed to
-the script. These expressions function similarly to generator expressions in
-CMake. Expressions may be passed as a standalone argument or as part of another
-argument. A single argument may contain multiple expressions.
-
-Generally, these expressions are used within templates rather than directly in
-BUILD.gn files. This allows build code to use GN labels without having to worry
-about converting them to files.
-
-.. note::
-
-  We intend to replace these expressions with native GN features when possible.
-  See `b/234886742 <http://issuetracker.google.com/234886742>`_.
-
-The following expressions are supported:
-
-.. describe:: <TARGET_FILE(gn_target)>
-
-  Evaluates to the output file of the provided GN target. For example, the
-  expression
-
-  .. code-block::
-
-    "<TARGET_FILE(//foo/bar:static_lib)>"
-
-  might expand to
-
-  .. code-block::
-
-    "/home/User/project_root/out/obj/foo/bar/static_lib.a"
-
-  ``TARGET_FILE`` parses the ``.ninja`` file for the GN target, so it should
-  always find the correct output file, regardless of the toolchain's or target's
-  configuration. Some targets, such as ``source_set`` and ``group`` targets, do
-  not have an output file, and attempting to use ``TARGET_FILE`` with them
-  results in an error.
-
-  ``TARGET_FILE`` only resolves GN target labels to their outputs. To resolve
-  paths generally, use the standard GN approach of applying the
-  ``rebase_path(path, root_build_dir)`` function. This function
-  converts the provided GN path or list of paths to be relative to the build
-  directory, from which all build commands and scripts are executed.
-
-.. describe:: <TARGET_FILE_IF_EXISTS(gn_target)>
-
-  ``TARGET_FILE_IF_EXISTS`` evaluates to the output file of the provided GN
-  target, if the output file exists. If the output file does not exist, the
-  entire argument that includes this expression is omitted, even if there is
-  other text or another expression.
-
-  For example, consider this expression:
-
-  .. code-block::
-
-    "--database=<TARGET_FILE_IF_EXISTS(//alpha/bravo)>"
-
-  If the ``//alpha/bravo`` target file exists, this might expand to the
-  following:
-
-  .. code-block::
-
-    "--database=/home/User/project/out/obj/alpha/bravo/bravo.elf"
-
-  If the ``//alpha/bravo`` target file does not exist, the entire
-  ``--database=`` argument is omitted from the script arguments.
-
-.. describe:: <TARGET_OBJECTS(gn_target)>
-
-  Evaluates to the object files of the provided GN target. Expands to a separate
-  argument for each object file. If the target has no object files, the argument
-  is omitted entirely. Because it does not expand to a single expression, the
-  ``<TARGET_OBJECTS(...)>`` expression may not have leading or trailing text.
-
-  For example, the expression
-
-  .. code-block::
-
-    "<TARGET_OBJECTS(//foo/bar:a_source_set)>"
-
-  might expand to multiple separate arguments:
-
-  .. code-block::
-
-    "/home/User/project_root/out/obj/foo/bar/a_source_set.file_a.cc.o"
-    "/home/User/project_root/out/obj/foo/bar/a_source_set.file_b.cc.o"
-    "/home/User/project_root/out/obj/foo/bar/a_source_set.file_c.cc.o"
-
-Example
-^^^^^^^
-.. code-block::
-
-  import("$dir_pw_build/python_action.gni")
-
-  pw_python_action("postprocess_main_image") {
-    script = "py/postprocess_binary.py"
-    args = [
-      "--database",
-      rebase_path("my/database.csv", root_build_dir),
-      "--binary=<TARGET_FILE(//firmware/images:main)>",
-    ]
-    stamp = true
-  }
-
-.. _module-pw_build-evaluate-path-expressions:
-
-pw_evaluate_path_expressions
-----------------------------
-It is not always feasible to pass information to a script through command line
-arguments. If a script requires a large amount of input data, writing to a file
-is often more convenient. However, doing so bypasses ``pw_python_action``'s GN
-target label resolution, preventing such scripts from working with build
-artifacts in a build system-agnostic manner.
-
-``pw_evaluate_path_expressions`` is designed to address this use case. It takes
-a list of input files and resolves target expressions within them, modifying the
-files in-place.
-
-Refer to ``pw_python_action``'s :ref:`module-pw_build-python-action-expressions`
-section for the list of supported expressions.
-
-.. note::
-
-  ``pw_evaluate_path_expressions`` is typically used as an intermediate
-  sub-target of a larger template, rather than a standalone build target.
-
-Arguments
-^^^^^^^^^
-* ``files``: A list of scopes, each containing a ``source`` file to process and
-  a ``dest`` file to which to write the result.
-
-Example
-^^^^^^^
-The following template defines an executable target which additionally outputs
-the list of object files from which it was compiled, making use of
-``pw_evaluate_path_expressions`` to resolve their paths.
-
-.. code-block::
-
-  import("$dir_pw_build/evaluate_path_expressions.gni")
-
-  template("executable_with_artifacts") {
-    executable("${target_name}.exe") {
-      sources = invoker.sources
-      if defined(invoker.deps) {
-        deps = invoker.deps
-      }
-    }
-
-    _artifacts_input = "$target_gen_dir/${target_name}_artifacts.json.in"
-    _artifacts_output = "$target_gen_dir/${target_name}_artifacts.json"
-    _artifacts = {
-      binary = "<TARGET_FILE(:${target_name}.exe)>"
-      objects = "<TARGET_OBJECTS(:${target_name}.exe)>"
-    }
-    write_file(_artifacts_input, _artifacts, "json")
-
-    pw_evaluate_path_expressions("${target_name}.evaluate") {
-      files = [
-        {
-          source = _artifacts_input
-          dest = _artifacts_output
-        },
-      ]
-    }
-
-    group(target_name) {
-      deps = [
-        ":${target_name}.exe",
-        ":${target_name}.evaluate",
-      ]
-    }
-  }
-
-.. _module-pw_build-pw_exec:
-
-pw_exec
--------
-``pw_exec`` allows for execution of arbitrary programs. It is a wrapper around
-``pw_python_action`` but allows for specifying the program to execute.
-
-.. note::
-
-   Prefer to use ``pw_python_action`` instead of calling out to shell
-   scripts, as the Python will be more portable. ``pw_exec`` should generally
-   only be used for interacting with legacy/existing scripts.
-
-Arguments
-^^^^^^^^^
-* ``program``: The program to run. Can be a full path or just a name (in which
-  case $PATH is searched).
-* ``args``: Optional list of arguments to the program.
-* ``deps``: Dependencies for this target.
-* ``public_deps``: Public dependencies for this target. In addition to outputs
-  from this target, outputs generated by public dependencies can be used as
-  inputs from targets that depend on this one. This is not the case for private
-  deps.
-* ``inputs``: Optional list of build inputs to the program.
-* ``outputs``: Optional list of artifacts produced by the program's execution.
-* ``env``: Optional list of key-value pairs defining environment variables for
-  the program.
-* ``env_file``: Optional path to a file containing a list of newline-separated
-  key-value pairs defining environment variables for the program.
-* ``args_file``: Optional path to a file containing additional positional
-  arguments to the program. Each line of the file is appended to the
-  invocation. Useful for specifying arguments from GN metadata.
-* ``skip_empty_args``: If args_file is provided, boolean indicating whether to
-  skip running the program if the file is empty. Used to avoid running
-  commands which error when called without arguments.
-* ``capture_output``: If true, output from the program is hidden unless the
-  program exits with an error. Defaults to true.
-* ``working_directory``: The working directory to execute the subprocess with.
-  If not specified it will not be set and the subprocess will have whatever
-  the parent current working directory is.
-* ``venv``: Python virtualenv to pass along to the underlying
-  :ref:`module-pw_build-pw_python_action`.
-* ``visibility``: GN visibility to apply to the underlying target.
-
-Example
-^^^^^^^
-.. code-block::
-
-   import("$dir_pw_build/exec.gni")
-
-   pw_exec("hello_world") {
-     program = "/bin/sh"
-     args = [
-       "-c",
-       "echo hello \$WORLD",
-     ]
-     env = [
-       "WORLD=world",
-     ]
-   }
-
-pw_input_group
---------------
-``pw_input_group`` defines a group of input files which are not directly
-processed by the build but are still important dependencies of later build
-steps. This is commonly used alongside metadata to propagate file dependencies
-through the build graph and force rebuilds on file modifications.
-
-For example ``pw_docgen`` defines a ``pw_doc_group`` template which outputs
-metadata from a list of input files. The metadata file is not actually part of
-the build, and so changes to any of the input files do not trigger a rebuild.
-This is problematic, as targets that depend on the metadata should rebuild when
-the inputs are modified but GN cannot express this dependency.
-
-``pw_input_group`` solves this problem by allowing a list of files to be listed
-in a target that does not output any build artifacts, causing all dependent
-targets to correctly rebuild.
-
-Arguments
-^^^^^^^^^
-``pw_input_group`` accepts all arguments that can be passed to a ``group``
-target, as well as requiring one extra:
-
-* ``inputs``: List of input files.
-
-Example
-^^^^^^^
-.. code-block::
-
-  import("$dir_pw_build/input_group.gni")
-
-  pw_input_group("foo_metadata") {
-    metadata = {
-      files = [
-        "x.foo",
-        "y.foo",
-        "z.foo",
-      ]
-    }
-    inputs = metadata.files
-  }
-
-Targets depending on ``foo_metadata`` will rebuild when any of the ``.foo``
-files are modified.
-
-pw_zip
-------
-``pw_zip`` is a target that allows users to zip up a set of input files and
-directories into a single output ``.zip`` file—a simple automation of a
-potentially repetitive task.
-
-Arguments
-^^^^^^^^^
-* ``inputs``: List of source files as well as the desired relative zip
-  destination. See below for the input syntax.
-* ``dirs``: List of entire directories to be zipped as well as the desired
-  relative zip destination. See below for the input syntax.
-* ``output``: Filename of output ``.zip`` file.
-* ``deps``: List of dependencies for the target.
-
-Input Syntax
-^^^^^^^^^^^^
-Inputs all need to follow the correct syntax:
-
-#. Path to source file or directory. Directories must end with a ``/``.
-#. The delimiter (defaults to ``>``).
-#. The desired destination of the contents within the ``.zip``. Must start
-   with ``/`` to indicate the zip root. Any number of subdirectories are
-   allowed. If the source is a file it can be put into any subdirectory of the
-   root. If the source is a file, the zip copy can also be renamed by ending
-   the zip destination with a filename (no trailing ``/``).
-
-Thus, it should look like the following: ``"[source file or dir] > /"``.
-
-Example
-^^^^^^^
-Let's say we have the following structure for a ``//source/`` directory:
-
-.. code-block::
-
-  source/
-  ├── file1.txt
-  ├── file2.txt
-  ├── file3.txt
-  └── some_dir/
-      ├── file4.txt
-      └── some_other_dir/
-          └── file5.txt
-
-And we create the following build target:
-
-.. code-block::
-
-  import("$dir_pw_build/zip.gni")
-
-  pw_zip("target_name") {
-    inputs = [
-      "//source/file1.txt > /",             # Copied to the zip root dir.
-      "//source/file2.txt > /renamed.txt",  # File renamed.
-      "//source/file3.txt > /bar/",         # File moved to the /bar/ dir.
-    ]
-
-    dirs = [
-      "//source/some_dir/ > /bar/some_dir/",  # All /some_dir/ contents copied
-                                              # as /bar/some_dir/.
-    ]
-
-    # Note on output: if the specific output directory isn't defined
-    # (such as output = "zoo.zip") then the .zip will output to the
-    # same directory as the BUILD.gn file that called the target.
-    output = "//$target_out_dir/foo.zip"  # Where the foo.zip will end up
-  }
-
-This will result in a ``.zip`` file called ``foo.zip`` stored in
-``//$target_out_dir`` with the following structure:
-
-.. code-block::
-
-  foo.zip
-  ├── bar/
-  │   ├── file3.txt
-  │   └── some_dir/
-  │       ├── file4.txt
-  │       └── some_other_dir/
-  │           └── file5.txt
-  ├── file1.txt
-  └── renamed.txt
-
-.. _module-pw_build-relative-source-file-names:
-
-pw_relative_source_file_names
------------------------------
-This template recursively walks the listed dependencies and collects the names
-of all the headers and source files required by the targets, and then transforms
-them such that they reflect the ``__FILE__`` when pw_build's ``relative_paths``
-config is applied. This is primarily intended for side-band generation of
-pw_tokenizer tokens so file name tokens can be utilized in places where
-pw_tokenizer is unable to embed token information as part of C/C++ compilation.
-
-This template produces a JSON file containing an array of strings (file paths
-with ``-ffile-prefix-map``-like transformations applied) that can be used to
-:ref:`generate a token database <module-pw_tokenizer-database-creation>`.
-
-Arguments
-^^^^^^^^^
-* ``deps``: A required list of targets to recursively extract file names from.
-* ``outputs``: A required array with a single element: the path to write the
-  final JSON file to.
-
-Example
-^^^^^^^
-Let's say we have the following project structure:
-
-.. code-block::
-
-  project root
-  ├── foo/
-  │   ├── foo.h
-  │   └── foo.cc
-  ├── bar/
-  │   ├── bar.h
-  │   └── bar.cc
-  ├── unused/
-  │   ├── unused.h
-  │   └── unused.cc
-  └── main.cc
-
-And a BUILD.gn at the root:
-
-.. code-block::
-
-  pw_source_set("bar") {
-    public_configs = [ ":bar_headers" ]
-    public = [ "bar/bar.h" ]
-    sources = [ "bar/bar.cc" ]
-  }
-
-  pw_source_set("foo") {
-    public_configs = [ ":foo_headers" ]
-    public = [ "foo/foo.h" ]
-    sources = [ "foo/foo.cc" ]
-    deps = [ ":bar" ]
-  }
-
-
-  pw_source_set("unused") {
-    public_configs = [ ":unused_headers" ]
-    public = [ "unused/unused.h" ]
-    sources = [ "unused/unused.cc" ]
-    deps = [ ":bar" ]
-  }
-
-  pw_executable("main") {
-    sources = [ "main.cc" ]
-    deps = [ ":foo" ]
-  }
-
-  pw_relative_source_file_names("main_source_files") {
-    deps = [ ":main" ]
-    outputs = [ "$target_gen_dir/main_source_files.json" ]
-  }
-
-The json file written to `out/gen/main_source_files.json` will contain:
-
-.. code-block::
-
-  [
-    "bar/bar.cc",
-    "bar/bar.h",
-    "foo/foo.cc",
-    "foo/foo.h",
-    "main.cc"
-  ]
-
-Since ``unused`` isn't a transitive dependency of ``main``, its source files
-are not included. Similarly, even though ``bar`` is not a direct dependency of
-``main``, its source files *are* included because ``foo`` brings in ``bar`` as
-a transitive dependency.
-
-Note how the file paths in this example are relative to the project root rather
-than being absolute paths (e.g. ``/home/user/ralph/coding/my_proj/main.cc``).
-This is a result of transformations applied to strip absolute pathing prefixes,
-matching the behavior of pw_build's ``$dir_pw_build:relative_paths`` config.
-
-Build time errors: pw_error and pw_build_assert
------------------------------------------------
-In Pigweed's complex, multi-toolchain GN build it is not possible to build every
-target in every configuration. GN's ``assert`` statement is not ideal for
-enforcing the correct configuration because it may prevent the GN build files or
-targets from being referred to at all, even if they aren't used.
-
-The ``pw_error`` GN template results in an error if it is executed during the
-build. These error targets can exist in the build graph, but cannot be depended
-on without an error.
-
-``pw_build_assert`` evaluates to a ``pw_error`` if a condition fails or nothing
-(an empty group) if the condition passes. Targets can add a dependency on a
-``pw_build_assert`` to enforce a condition at build time.
-
-The templates for build time errors are defined in ``pw_build/error.gni``.
-
-Generate code coverage reports: ``pw_coverage_report``
-------------------------------------------------------
-Pigweed supports generating coverage reports, in a variety of formats, for C/C++
-code using the ``pw_coverage_report`` GN template.
-
-Coverage Caveats
-^^^^^^^^^^^^^^^^
-There are currently two code coverage caveats when enabled:
-
-#. Coverage reports are only populated based on host tests that use a ``clang``
-   toolchain.
-
-#. Coverage reports will only show coverage information for headers included in
-   a test binary.
-
-These two caveats mean that all device-specific code that cannot be compiled for
-and run on the host will not be able to have reports generated for them, and
-that the existence of these files will not appear in any coverage report.
-
-Try to ensure that your code can be written in a way that it can be compiled
-into a host test for the purpose of coverage reporting, although this is
-sometimes impossible due to requiring hardware-specific APIs to be available.
-
-Coverage Instrumentation
-^^^^^^^^^^^^^^^^^^^^^^^^
-For the ``pw_coverage_report`` to generate meaningful output, you must ensure
-that it is invoked by a toolchain that instruments tests for code coverage
-collection and output.
-
-Instrumentation is controlled by two GN build arguments:
-
-- ``pw_toolchain_COVERAGE_ENABLED`` being set to ``true``.
-- ``pw_toolchain_PROFILE_SOURCE_FILES`` is an optional argument that provides a
-  list of source files to selectively collect coverage.
-
-.. note::
-
-  It is possible to also instrument binaries for UBSAN, ASAN, or TSAN at the
-  same time as coverage. However, TSAN will find issues in the coverage
-  instrumentation code and fail to properly build.
-
-This can most easily be done by using the ``host_clang_coverage`` toolchain
-provided in :ref:`module-pw_toolchain`, but you can also create custom
-toolchains that manually set these GN build arguments as well.
-
-``pw_coverage_report``
-^^^^^^^^^^^^^^^^^^^^^^
-``pw_coverage_report`` is basically a GN frontend to the ``llvm-cov``
-`tool <https://llvm.org/docs/CommandGuide/llvm-cov.html>`_ that can be
-integrated into the normal build.
-
-It can be found at ``pw_build/coverage_report.gni`` and is available through
-``import("$dir_pw_build/coverage_report.gni")``.
-
-The supported report formats are:
-
-- ``text``: A text representation of the code coverage report. This
-  format is not suitable for further machine manipulation and is instead only
-  useful for cases where a human needs to interpret the report. The text format
-  provides a nice summary, but if you desire to drill down into the coverage
-  details more, please consider using ``html`` instead.
-
-  - This is equivalent to ``llvm-cov show --format text`` and similar to
-    ``llvm-cov report``.
-
-- ``html``: A static HTML site that provides an overall coverage summary and
-  per-file information. This format is not suitable for further machine
-  manipulation and is instead only useful for cases where a human needs to
-  interpret the report.
-
-  - This is equivalent to ``llvm-cov show --format html``.
-
-- ``lcov``: A machine-friendly coverage report format. This format is not human-
-  friendly. If that is necessary, use ``text`` or ``html`` instead.
-
-  - This is equivalent to ``llvm-cov export --format lcov``.
-
-- ``json``: A machine-friendly coverage report format. This format is not human-
-  friendly. If that is necessary, use ``text`` or ``html`` instead.
-
-  - This is equivalent to ``llvm-cov export --format json``.
-
-Arguments
-"""""""""
-There are three classes of ``template`` arguments: build, coverage, and test.
-
-**Build Arguments:**
-
-- ``enable_if`` (optional): Conditionally activates coverage report generation when set to
-  a boolean expression that evaluates to ``true``. This can be used to allow
-  project builds to conditionally enable or disable coverage reports to minimize
-  work needed for certain build configurations.
-
-- ``failure_mode`` (optional/unstable): Specify the failure mode for
-  ``llvm-profdata`` (used to merge inidividual profraw files from ``pw_test``
-  runs). Available options are ``"any"`` (default) or ``"all"``.
-
-  - This should be considered an unstable/deprecated argument that should only
-    be used as a last resort to get a build working again. Using
-    ``failure_mode = "all"`` usually indicates that there are underlying
-    problems in the build or test infrastructure that should be independently
-    resolved. Please reach out to the Pigweed team for assistance.
-
-**Coverage Arguments:**
-
-- ``filter_paths`` (optional): List of file paths to include when generating the
-  coverage report. These cannot be regular expressions, but can be concrete file
-  or folder paths. Folder paths will allow all files in that directory or any
-  recursive child directory.
-
-  - These are passed to ``llvm-cov`` by the optional trailing positional
-    ``[SOURCES]`` arguments.
-
-- ``ignore_filename_patterns`` (optional): List of file path regular expressions
-  to ignore when generating the coverage report.
-
-  - These are passed to ``llvm-cov`` via ``--ignore-filename-regex`` named
-    parameters.
-
-**Test Arguments (one of these is required to be provided):**
-
-- ``tests``: A list of ``pw_test`` :ref:`targets<module-pw_unit_test-pw_test>`.
-
-- ``group_deps``: A list of ``pw_test_group``
-  :ref:`targets<module-pw_unit_test-pw_test_group>`.
-
-.. note::
-
-  ``tests`` and ``group_deps`` are treated exactly the same by
-  ``pw_coverage_report``, so it is not that important to ensure they are used
-  properly.
-
-Target Expansion
-""""""""""""""""
-``pw_coverage_report(<target_name>)`` expands to one concrete target for each
-report format.
-
-- ``<target_name>.text``: Generates the ``text`` coverage report.
-
-- ``<target_name>.html``: Generates the ``html`` coverage report.
-
-- ``<target_name>.lcov``: Generates the ``lcov`` coverage report.
-
-- ``<target_name>.json``: Generates the ``json`` coverage report.
-
-To use any of these targets, you need only to add a dependency on the desired
-target somewhere in your build.
-
-There is also a ``<target_name>`` target generated that is a ``group`` that adds
-a dependency on all of the format-specific targets listed above.
-
-.. note::
-  These targets are always available, even when the toolchain executing the
-  target does not support coverage or coverage is not enabled. In these cases,
-  the targets are set to empty groups.
-
-Coverage Output
-^^^^^^^^^^^^^^^
-Coverage reports are currently generated and placed into the build output
-directory associated with the path to the GN file where the
-``pw_coverage_report`` is used in a subfolder named
-``<target_name>.<report_type>``.
-
-.. note::
-
-  Due to limitations with telling GN the entire output of coverage reports
-  (stemming from per-source-file generation for HTML and text representations),
-  it is not as simple as using GN's built-in ``copy`` to be able to move these
-  coverage reports to another output location. However, it seems possible to add
-  a target that can use Python to copy the entire output directory.
-
-Improved Ninja interface
-------------------------
-Ninja includes a basic progress display, showing in a single line the number of
-targets finished, the total number of targets, and the name of the most recent
-target it has either started or finished.
-
-For additional insight into the status of the build, Pigweed includes a Ninja
-wrapper, ``pw-wrap-ninja``, that displays additional real-time information about
-the progress of the build. The wrapper is invoked the same way you'd normally
-invoke Ninja:
-
-.. code-block:: sh
-
-  pw-wrap-ninja -C out
-
-The script lists the progress of the build, as well as the list of targets that
-Ninja is currently building, along with a timer that measures how long each
-target has been building for:
-
-.. code-block::
-
-  [51.3s] Building [8924/10690] ...
-    [10.4s] c++ pw_strict_host_clang_debug/obj/pw_string/string_test.lib.string_test.cc.o
-    [ 9.5s] ACTION //pw_console/py:py.lint.mypy(//pw_build/python_toolchain:python)
-    [ 9.4s] ACTION //pw_console/py:py.lint.pylint(//pw_build/python_toolchain:python)
-    [ 6.1s] clang-tidy ../pw_log_rpc/log_service.cc
-    [ 6.1s] clang-tidy ../pw_log_rpc/log_service_test.cc
-    [ 6.1s] clang-tidy ../pw_log_rpc/rpc_log_drain.cc
-    [ 6.1s] clang-tidy ../pw_log_rpc/rpc_log_drain_test.cc
-    [ 5.4s] c++ pw_strict_host_clang_debug/obj/BUILD_DIR/pw_strict_host_clang_debug/gen/pw...
-    ... and 109 more
-
-This allows you to, at a glance, know what Ninja's currently building, which
-targets are bottlenecking the rest of the build, and which targets are taking
-an unusually long time to complete.
-
-``pw-wrap-ninja`` includes other useful functionality as well. The
-``--write-trace`` option writes a build trace to the specified path, which can
-be viewed in the `Perfetto UI <https://ui.perfetto.dev/>`_, or via Chrome's
-built-in ``chrome://tracing`` tool.
-
-CMake
-=====
-Pigweed's `CMake`_ support is provided primarily for projects that have an
-existing CMake build and wish to integrate Pigweed without switching to a new
-build system.
-
-The following command generates Ninja build files for a host build in the
-``out/cmake_host`` directory:
-
-.. code-block:: sh
-
-  cmake -B out/cmake_host -S "$PW_ROOT" -G Ninja -DCMAKE_TOOLCHAIN_FILE=$PW_ROOT/pw_toolchain/host_clang/toolchain.cmake
-
-The ``PW_ROOT`` environment variable must point to the root of the Pigweed
-directory. This variable is set by Pigweed's environment setup.
-
-Tests can be executed with the ``pw_run_tests.GROUP`` targets. To run Pigweed
-module tests, execute ``pw_run_tests.modules``:
-
-.. code-block:: sh
-
-  ninja -C out/cmake_host pw_run_tests.modules
-
-:ref:`module-pw_watch` supports CMake, so you can also run
-
-.. code-block:: sh
-
-  pw watch -C out/cmake_host pw_run_tests.modules
-
-CMake functions
----------------
-CMake convenience functions are defined in ``pw_build/pigweed.cmake``.
-
-* ``pw_add_library_generic`` -- The base helper used to instantiate CMake
-  libraries. This is meant for use in downstream projects as upstream Pigweed
-  modules are expected to use ``pw_add_library``.
-* ``pw_add_library`` -- Add an upstream Pigweed library.
-* ``pw_add_facade_generic`` -- The base helper used to instantiate facade
-  libraries. This is meant for use in downstream projects as upstream Pigweed
-  modules are expected to use ``pw_add_facade``.
-* ``pw_add_facade`` -- Declare an upstream Pigweed facade.
-* ``pw_set_backend`` -- Set the backend library to use for a facade.
-* ``pw_add_test_generic`` -- The base helper used to instantiate test targets.
-  This is meant for use in downstrema projects as upstream Pigweed modules are
-  expected to use ``pw_add_test``.
-* ``pw_add_test`` -- Declare an upstream Pigweed test target.
-* ``pw_add_test_group`` -- Declare a target to group and bundle test targets.
-* ``pw_target_link_targets`` -- Helper wrapper around ``target_link_libraries``
-  which only supports CMake targets and detects when the target does not exist.
-  Note that generator expressions are not supported.
-* ``pw_add_global_compile_options`` -- Applies compilation options to all
-  targets in the build. This should only be used to add essential compilation
-  options, such as those that affect the ABI. Use ``pw_add_library`` or
-  ``target_compile_options`` to apply other compile options.
-* ``pw_add_error_target`` -- Declares target which reports a message and causes
-  a build failure only when compiled. This is useful when ``FATAL_ERROR``
-  messages cannot be used to catch problems during the CMake configuration
-  phase.
-* ``pw_parse_arguments`` -- Helper to parse CMake function arguments.
-
-See ``pw_build/pigweed.cmake`` for the complete documentation of these
-functions.
-
-Special libraries that do not fit well with these functions are created with the
-standard CMake functions, such as ``add_library`` and ``target_link_libraries``.
-
-Facades and backends
---------------------
-The CMake build uses CMake cache variables for configuring
-:ref:`facades<docs-module-structure-facades>` and backends. Cache variables are
-similar to GN's build args set with ``gn args``. Unlike GN, CMake does not
-support multi-toolchain builds, so these variables have a single global value
-per build directory.
-
-The ``pw_add_module_facade`` function declares a cache variable named
-``<module_name>_BACKEND`` for each facade. Cache variables can be awkward to
-work with, since their values only change when they're assigned, but then
-persist accross CMake invocations. These variables should be set in one of the
-following ways:
-
-* Prior to setting a backend, your application should include
-  ``$ENV{PW_ROOT}/backends.cmake``. This file will setup all the backend targets
-  such that any misspelling of a facade or backend will yield a warning.
-
-  .. note::
-    Zephyr developers do not need to do this, backends can be set automatically
-    by enabling the appropriate Kconfig options.
-
-* Call ``pw_set_backend`` to set backends appropriate for the target in the
-  target's toolchain file. The toolchain file is provided to ``cmake`` with
-  ``-DCMAKE_TOOLCHAIN_FILE=<toolchain file>``.
-* Call ``pw_set_backend`` in the top-level ``CMakeLists.txt`` before other
-  CMake code executes.
-* Set the backend variable at the command line with the ``-D`` option.
-
-  .. code-block:: sh
-
-    cmake -B out/cmake_host -S "$PW_ROOT" -G Ninja \
-        -DCMAKE_TOOLCHAIN_FILE=$PW_ROOT/pw_toolchain/host_clang/toolchain.cmake \
-        -Dpw_log_BACKEND=pw_log_basic
-
-* Temporarily override a backend by setting it interactively with ``ccmake`` or
-  ``cmake-gui``.
-
-If the backend is set to a build target that does not exist, there will be an
-error message like the following:
-
-.. code-block::
-
-  CMake Error at pw_build/pigweed.cmake:257 (message):
-    my_module.my_facade's INTERFACE dep "my_nonexistent_backend" is not
-    a target.
-  Call Stack (most recent call first):
-    pw_build/pigweed.cmake:238:EVAL:1 (_pw_target_link_targets_deferred_check)
-    CMakeLists.txt:DEFERRED
-
-
-Toolchain setup
----------------
-In CMake, the toolchain is configured by setting CMake variables, as described
-in the `CMake documentation <https://cmake.org/cmake/help/latest/manual/cmake-toolchains.7.html>`_.
-These variables are typically set in a toolchain CMake file passed to ``cmake``
-with the ``-D`` option (``-DCMAKE_TOOLCHAIN_FILE=path/to/file.cmake``).
-For Pigweed embedded builds, set ``CMAKE_SYSTEM_NAME`` to the empty string
-(``""``).
-
-Toolchains may set the ``pw_build_WARNINGS`` variable to a list of ``INTERFACE``
-libraries with compilation options for Pigweed's upstream libraries. This
-defaults to a strict set of warnings. Projects may need to use less strict
-compilation warnings to compile backends exposed to Pigweed code (such as
-``pw_log``) that cannot compile with Pigweed's flags. If desired, Projects can
-access these warnings by depending on ``pw_build.warnings``.
-
-Third party libraries
----------------------
-The CMake build includes third-party libraries similarly to the GN build. A
-``dir_pw_third_party_<library>`` cache variable is defined for each third-party
-dependency. The variable must be set to the absolute path of the library in
-order to use it. If the variable is empty
-(``if("${dir_pw_third_party_<library>}" STREQUAL "")``), the dependency is not
-available.
-
-Third-party dependencies are not automatically added to the build. They can be
-manually added with ``add_subdirectory`` or by setting the
-``pw_third_party_<library>_ADD_SUBDIRECTORY`` option to ``ON``.
-
-Third party variables are set like any other cache global variable in CMake. It
-is recommended to set these in one of the following ways:
-
-* Set with the CMake ``set`` function in the toolchain file or a
-  ``CMakeLists.txt`` before other CMake code executes.
-
-  .. code-block:: cmake
-
-    set(dir_pw_third_party_nanopb ${CMAKE_CURRENT_SOURCE_DIR}/external/nanopb CACHE PATH "" FORCE)
-
-* Set the variable at the command line with the ``-D`` option.
-
-  .. code-block:: sh
-
-    cmake -B out/cmake_host -S "$PW_ROOT" -G Ninja \
-        -DCMAKE_TOOLCHAIN_FILE=$PW_ROOT/pw_toolchain/host_clang/toolchain.cmake \
-        -Ddir_pw_third_party_nanopb=/path/to/nanopb
-
-* Set the variable interactively with ``ccmake`` or ``cmake-gui``.
-
-Use Pigweed from an existing CMake project
-------------------------------------------
-To use Pigweed libraries form a CMake-based project, simply include the Pigweed
-repository from a ``CMakeLists.txt``.
-
-.. code-block:: cmake
-
-  add_subdirectory(path/to/pigweed pigweed)
-
-All module libraries will be available as ``module_name`` or
-``module_name.sublibrary``.
-
-If desired, modules can be included individually.
-
-.. code-block:: cmake
-
-  add_subdirectory(path/to/pigweed/pw_some_module pw_some_module)
-  add_subdirectory(path/to/pigweed/pw_another_module pw_another_module)
-
-Bazel
-=====
-Bazel is currently very experimental, and only builds for host and ARM Cortex-M
-microcontrollers.
-
-Wrapper rules
--------------
-The common configuration for Bazel for all modules is in the ``pigweed.bzl``
-file. The built-in Bazel rules ``cc_binary``, ``cc_library``, and ``cc_test``
-are wrapped with ``pw_cc_binary``, ``pw_cc_library``, and ``pw_cc_test``.
-These wrappers add parameters to calls to the compiler and linker.
-
-pw_linker_script
-----------------
-In addition to wrapping the built-in rules, Pigweed also provides a custom
-rule for handling linker scripts with Bazel. e.g.
-
-.. code-block:: python
-
-  pw_linker_script(
-    name = "some_linker_script",
-    linker_script = ":some_configurable_linker_script.ld",
-    defines = [
-        "PW_BOOT_FLASH_BEGIN=0x08000200",
-        "PW_BOOT_FLASH_SIZE=1024K",
-        "PW_BOOT_HEAP_SIZE=112K",
-        "PW_BOOT_MIN_STACK_SIZE=1K",
-        "PW_BOOT_RAM_BEGIN=0x20000000",
-        "PW_BOOT_RAM_SIZE=192K",
-        "PW_BOOT_VECTOR_TABLE_BEGIN=0x08000000",
-        "PW_BOOT_VECTOR_TABLE_SIZE=512",
-    ],
-  )
-
-  pw_cc_binary(
-    name = "some_binary",
-    srcs = ["some_source.c"],
-    additional_linker_inputs = [":some_linker_script"],
-    linkopts = ["-T $(location :some_linker_script)"],
-  )
-
-pw_cc_facade
-------------
-In Bazel, a :ref:`facade <docs-module-structure-facades>` module has a few
-components:
-
-#. The **facade target**, i.e. the interface to the module. This is what
-   *backend implementations* depend on to know what interface they're supposed
-   to implement.  The facade is declared by creating a ``pw_cc_facade`` target,
-   which is just a thin wrapper for ``cc_library``. For example,
-
-   .. code-block:: python
-
-     pw_cc_facade(
-         name = "binary_semaphore_facade",
-         # The header that constitues the facade.
-         hdrs = [
-             "public/pw_sync/binary_semaphore.h",
-         ],
-         includes = ["public"],
-         # Dependencies of this header.
-         deps = [
-             "//pw_chrono:system_clock",
-             "//pw_preprocessor",
-         ],
-     )
-
-   .. note::
-     As pure interfaces, ``pw_cc_facade`` targets should not include any source
-     files. Backend-independent source files should be placed in the "library
-     target" instead.
-
-#. The **library target**, i.e. both the facade (interface) and backend
-   (implementation). This is what *users of the module* depend on. It's a
-   regular ``pw_cc_library`` that exposes the same headers as the facade, but
-   has a dependency on the "backend label flag" (discussed next). It may also
-   include some source files (if these are backend-independent). For example,
-
-   .. code-block:: python
-
-     pw_cc_library(
-         name = "binary_semaphore",
-         # A backend-independent source file.
-         srcs = [
-             "binary_semaphore.cc",
-         ],
-         # The same header as exposed by the facade.
-         hdrs = [
-             "public/pw_sync/binary_semaphore.h",
-         ],
-         deps = [
-             # Dependencies of this header
-             "//pw_chrono:system_clock",
-             "//pw_preprocessor",
-             # The backend, hidden behind a label_flag.
-             "@pigweed_config//:pw_sync_binary_semaphore_backend",
-         ],
-     )
-
-   .. note::
-     You may be tempted to reduce duplication in the BUILD.bazel files and
-     simply add the facade target to the ``deps`` of the library target,
-     instead of re-declaring the facade's ``hdrs`` and ``deps``. *Do not do
-     this!* It's a layering check violation: the facade headers provide the
-     module's interface, and should be directly exposed by the target the users
-     depend on.
-
-#. The **backend label flag**. This is a `label_flag
-   <https://bazel.build/extending/config#label-typed-build-settings>`_: a
-   dependency edge in the build graph that can be overridden by downstream projects.
-   For facades defined in upstream Pigweed, the ``label_flags`` are collected in
-   the :ref:`pigweed_config <docs-build_system-bazel_configuration>`.
-
-#. The **backend target** implements a particular backend for a facade. It's
-   just a plain ``pw_cc_library``, with a dependency on the facade target. For example,
-
-   .. code-block:: python
-
-     pw_cc_library(
-         name = "binary_semaphore",
-         srcs = [
-             "binary_semaphore.cc",
-         ],
-         hdrs = [
-             "public/pw_sync_stl/binary_semaphore_inline.h",
-             "public/pw_sync_stl/binary_semaphore_native.h",
-             "public_overrides/pw_sync_backend/binary_semaphore_inline.h",
-             "public_overrides/pw_sync_backend/binary_semaphore_native.h",
-         ],
-         includes = [
-             "public",
-             "public_overrides",
-         ],
-         deps = [
-             # Dependencies of the backend's headers and sources.
-             "//pw_assert",
-             "//pw_chrono:system_clock",
-             # A dependency on the facade target, which defines the interface
-             # this backend target implements.
-             "//pw_sync:binary_semaphore_facade",
-         ],
-     )
-
-   If a project uses only one backend for a given facade, the backend label
-   flag should point at that backend target.
-
-#. The **facade constraint setting** and **backend constraint values**. Every
-   facade has an associated `constraint setting
-   <https://bazel.build/concepts/platforms#api-review>`_ (enum used in platform
-   definition), and each backend for this facade has an associated
-   ``constraint_value`` (enum value). Example:
-
-   .. code-block:: python
-
-     # //pw_sync/BUILD.bazel
-     constraint_setting(
-       name = "binary_semaphore_backend_constraint_setting",
-     )
-
-     # //pw_sync_stl/BUILD.bazel
-     constraint_value(
-       name = "binary_semaphore_backend",
-       constraint_setting = "//pw_sync:binary_semaphore_backend_constraint_setting",
-     )
-
-     # //pw_sync_freertos/BUILD.bazel
-     constraint_value(
-       name = "binary_semaphore_backend",
-       constraint_setting = "//pw_sync:binary_semaphore_backend_constraint_setting",
-     )
-
-   `Target platforms <https://bazel.build/extending/platforms>`_ for Pigweed
-   projects should indicate which backend they select for each facade by
-   listing the corresponding ``constraint_value`` in their definition. This can
-   be used in a couple of ways:
-
-   #.  It allows projects to switch between multiple backends based only on the
-       `target platform <https://bazel.build/extending/platforms>`_ using a
-       *backend multiplexer* (see below) instead of setting label flags in
-       their ``.bazelrc``.
-
-   #.  It allows tests or libraries that only support a particular backend to
-       express this through the `target_compatible_with
-       <https://bazel.build/reference/be/common-definitions#common.target_compatible_with>`_
-       attribute. Bazel will use this to `automatically skip incompatible
-       targets in wildcard builds
-       <https://bazel.build/extending/platforms#skipping-incompatible-targets>`_.
-
-#. The **backend multiplexer**. If a project uses more than one backend for a
-   given facade (e.g., it uses different backends for host and embedded target
-   builds), the backend label flag will point to a target that resolves to the
-   correct backend based on the target platform. This will typically be an
-   `alias <https://bazel.build/reference/be/general#alias>`_ with a ``select``
-   statement mapping constraint values to the appropriate backend targets. For
-   example,
-
-   .. code-block:: python
-
-     alias(
-         name = "pw_sync_binary_semaphore_backend_multiplexer",
-         actual = select({
-             "//pw_sync_stl:binary_semaphore_backend": "@pigweed//pw_sync_stl:binary_semaphore",
-             "//pw_sync_freertos:binary_semaphore_backend": "@pigweed//pw_sync_freertos:binary_semaphore_backend",
-             # If we're building for a host OS, use the STL backend.
-             "@platforms//os:macos": "@pigweed//pw_sync_stl:binary_semaphore",
-             "@platforms//os:linux": "@pigweed//pw_sync_stl:binary_semaphore",
-             "@platforms//os:windows": "@pigweed//pw_sync_stl:binary_semaphore",
-             # Unless the target platform is the host platform, it must
-             # explicitly specify which backend to use. The unspecified_backend
-             # is not compatible with any platform; taking this branch will produce
-             # an informative error.
-             "//conditions:default": "@pigweed//pw_build:unspecified_backend",
-         }),
-     )
-
-Toolchains and platforms
-------------------------
-Currently Pigweed is making use of a set of
-`open source <https://github.com/silvergasp/bazel-embedded>`_ toolchains. The
-host builds are only supported on Linux/Mac based systems. Additionally the
-host builds are not entirely hermetic, and will make use of system
-libraries and headers. This is close to the default configuration for Bazel,
-though slightly more hermetic. The host toolchain is based around clang-11 which
-has a system dependency on 'libtinfo.so.5' which is often included as part of
-the libncurses packages. On Debian based systems this can be installed using the
-command below:
-
-.. code-block:: sh
-
-  sudo apt install libncurses5
-
-The host toolchain does not currently support native Windows, though using WSL
-is a viable alternative.
-
-The ARM Cortex-M Bazel toolchains are based around gcc-arm-non-eabi and are
-entirely hermetic. You can target Cortex-M, by using the platforms command line
-option. This set of toolchains is supported from hosts; Windows, Mac and Linux.
-The platforms that are currently supported are listed below:
-
-.. code-block:: sh
-
-  bazel build //:your_target --platforms=@pigweed//pw_build/platforms:cortex_m0
-  bazel build //:your_target --platforms=@pigweed//pw_build/platforms:cortex_m1
-  bazel build //:your_target --platforms=@pigweed//pw_build/platforms:cortex_m3
-  bazel build //:your_target --platforms=@pigweed//pw_build/platforms:cortex_m4
-  bazel build //:your_target --platforms=@pigweed//pw_build/platforms:cortex_m7
-  bazel build //:your_target \
-    --platforms=@pigweed//pw_build/platforms:cortex_m4_fpu
-  bazel build //:your_target \
-    --platforms=@pigweed//pw_build/platforms:cortex_m7_fpu
-
-
-The above examples are cpu/fpu oriented platforms and can be used where
-applicable for your application. There some more specific platforms for the
-types of boards that are included as examples in Pigweed. It is strongly
-encouraged that you create your own set of platforms specific for your project,
-that implement the constraint_settings in this repository. e.g.
-
-New board constraint_value:
-
-.. code-block:: python
-
-  #your_repo/build_settings/constraints/board/BUILD
-  constraint_value(
-    name = "nucleo_l432kc",
-    constraint_setting = "@pigweed//pw_build/constraints/board",
-  )
-
-New chipset constraint_value:
-
-.. code-block:: python
-
-  # your_repo/build_settings/constraints/chipset/BUILD
-  constraint_value(
-    name = "stm32l432kc",
-    constraint_setting = "@pigweed//pw_build/constraints/chipset",
-  )
-
-New platforms for chipset and board:
-
-.. code-block:: python
-
-  #your_repo/build_settings/platforms/BUILD
-  # Works with all stm32l432kc
-  platforms(
-    name = "stm32l432kc",
-    parents = ["@pigweed//pw_build/platforms:cortex_m4"],
-    constraint_values =
-      ["@your_repo//build_settings/constraints/chipset:stm32l432kc"],
-  )
-
-  # Works with only the nucleo_l432kc
-  platforms(
-    name = "nucleo_l432kc",
-    parents = [":stm32l432kc"],
-    constraint_values =
-      ["@your_repo//build_settings/constraints/board:nucleo_l432kc"],
-  )
-
-In the above example you can build your code with the command line:
-
-.. code-block:: python
-
-  bazel build //:your_target_for_nucleo_l432kc \
-    --platforms=@your_repo//build_settings:nucleo_l432kc
-
-
-You can also specify that a specific target is only compatible with one
-platform:
-
-.. code-block:: python
-
-  cc_library(
-    name = "compatible_with_all_stm32l432kc",
-    srcs = ["tomato_src.c"],
-    target_compatible_with =
-      ["@your_repo//build_settings/constraints/chipset:stm32l432kc"],
-  )
-
-  cc_library(
-    name = "compatible_with_only_nucleo_l432kc",
-    srcs = ["bbq_src.c"],
-    target_compatible_with =
-      ["@your_repo//build_settings/constraints/board:nucleo_l432kc"],
-  )
-
+   gn
+   cmake
+   bazel
diff --git a/pw_build/generated_pigweed_modules_lists.gni b/pw_build/generated_pigweed_modules_lists.gni
index c9ebaab..f4e33a6 100644
--- a/pw_build/generated_pigweed_modules_lists.gni
+++ b/pw_build/generated_pigweed_modules_lists.gni
@@ -54,6 +54,7 @@
   dir_pw_build_mcuxpresso = get_path_info("../pw_build_mcuxpresso", "abspath")
   dir_pw_bytes = get_path_info("../pw_bytes", "abspath")
   dir_pw_checksum = get_path_info("../pw_checksum", "abspath")
+  dir_pw_chre = get_path_info("../pw_chre", "abspath")
   dir_pw_chrono = get_path_info("../pw_chrono", "abspath")
   dir_pw_chrono_embos = get_path_info("../pw_chrono_embos", "abspath")
   dir_pw_chrono_freertos = get_path_info("../pw_chrono_freertos", "abspath")
@@ -145,6 +146,7 @@
   dir_pw_sync_threadx = get_path_info("../pw_sync_threadx", "abspath")
   dir_pw_sync_zephyr = get_path_info("../pw_sync_zephyr", "abspath")
   dir_pw_sys_io = get_path_info("../pw_sys_io", "abspath")
+  dir_pw_sys_io_ambiq_sdk = get_path_info("../pw_sys_io_ambiq_sdk", "abspath")
   dir_pw_sys_io_arduino = get_path_info("../pw_sys_io_arduino", "abspath")
   dir_pw_sys_io_baremetal_lm3s6965evb =
       get_path_info("../pw_sys_io_baremetal_lm3s6965evb", "abspath")
@@ -216,6 +218,7 @@
     dir_pw_build_mcuxpresso,
     dir_pw_bytes,
     dir_pw_checksum,
+    dir_pw_chre,
     dir_pw_chrono,
     dir_pw_chrono_embos,
     dir_pw_chrono_freertos,
@@ -300,6 +303,7 @@
     dir_pw_sync_threadx,
     dir_pw_sync_zephyr,
     dir_pw_sys_io,
+    dir_pw_sys_io_ambiq_sdk,
     dir_pw_sys_io_arduino,
     dir_pw_sys_io_baremetal_lm3s6965evb,
     dir_pw_sys_io_baremetal_stm32f429,
@@ -363,6 +367,7 @@
     "$dir_pw_build_mcuxpresso:tests",
     "$dir_pw_bytes:tests",
     "$dir_pw_checksum:tests",
+    "$dir_pw_chre:tests",
     "$dir_pw_chrono:tests",
     "$dir_pw_chrono_embos:tests",
     "$dir_pw_chrono_freertos:tests",
@@ -447,6 +452,7 @@
     "$dir_pw_sync_threadx:tests",
     "$dir_pw_sync_zephyr:tests",
     "$dir_pw_sys_io:tests",
+    "$dir_pw_sys_io_ambiq_sdk:tests",
     "$dir_pw_sys_io_arduino:tests",
     "$dir_pw_sys_io_baremetal_lm3s6965evb:tests",
     "$dir_pw_sys_io_baremetal_stm32f429:tests",
@@ -510,6 +516,7 @@
     "$dir_pw_build_mcuxpresso:docs",
     "$dir_pw_bytes:docs",
     "$dir_pw_checksum:docs",
+    "$dir_pw_chre:docs",
     "$dir_pw_chrono:docs",
     "$dir_pw_chrono_embos:docs",
     "$dir_pw_chrono_freertos:docs",
@@ -594,6 +601,7 @@
     "$dir_pw_sync_threadx:docs",
     "$dir_pw_sync_zephyr:docs",
     "$dir_pw_sys_io:docs",
+    "$dir_pw_sys_io_ambiq_sdk:docs",
     "$dir_pw_sys_io_arduino:docs",
     "$dir_pw_sys_io_baremetal_lm3s6965evb:docs",
     "$dir_pw_sys_io_baremetal_stm32f429:docs",
diff --git a/pw_build/gn.rst b/pw_build/gn.rst
new file mode 100644
index 0000000..997bcbd
--- /dev/null
+++ b/pw_build/gn.rst
@@ -0,0 +1,1315 @@
+GN / Ninja
+==========
+The GN / Ninja build system is the primary build system used for upstream
+Pigweed development, and is the most tested and feature-rich build system
+Pigweed offers.
+
+This module's ``build.gn`` file contains a number of C/C++ ``config``
+declarations that are used by upstream Pigweed to set some architecture-agnostic
+compiler defaults. (See Pigweed's ``//BUILDCONFIG.gn``)
+
+``pw_build`` also provides several useful GN templates that are used throughout
+Pigweed.
+
+Build system philosophies
+-------------------------
+While Pigweed's GN build is not hermetic, it strives to adhere to principles of
+`hermeticity <https://bazel.build/concepts/hermeticity>`_. Some guidelines to
+move towards the ideal of hermeticity include:
+
+* Only rely on pre-compiled tools provided by CIPD (or some other versioned,
+  pre-compiled binary distribution mechanism). This eliminates build artifact
+  differences caused by different tool versions or variations (e.g. same tool
+  version built with slightly different compilation flags).
+* Do not use absolute paths in Ninja commands. Typically, these appear when
+  using ``rebase_path("//path/to/my_script.py")``. Most of the time, Ninja
+  steps should be passed paths rebased relative to the build directory (i.e.
+  ``rebase_path("//path/to/my_script.py", root_build_dir)``). This ensures build
+  commands are the same across different machines.
+* Prevent produced artifacts from relying on or referencing system state. This
+  includes time stamps, writing absolute paths to generated artifacts, or
+  producing artifacts that reference system state in a way that prevents them
+  from working the same way on a different machine.
+* Isolate build actions to the build directory. In general, the build system
+  should not add or modify files outside of the build directory. This can cause
+  confusion to users, and makes the concept of a clean build more ambiguous.
+
+Target types
+------------
+.. code-block::
+
+   import("$dir_pw_build/target_types.gni")
+
+   pw_source_set("my_library") {
+     sources = [ "lib.cc" ]
+   }
+
+Pigweed defines wrappers around the four basic GN binary types ``source_set``,
+``executable``, ``static_library``, and ``shared_library``. These templates
+do several things:
+
+#. **Add default configs/deps**
+
+   Rather than binding the majority of compiler flags related to C++ standard,
+   cross-compilation, warning/error policy, etc.  directly to toolchain
+   invocations, these flags are applied as configs to all ``pw_*`` C/C++ target
+   types. The primary motivations for this are to allow some targets to modify
+   the default set of flags when needed by specifying ``remove_configs``, and to
+   reduce the complexity of building novel toolchains.
+
+   Pigweed's global default configs are set in ``pw_build/default.gni``, and
+   individual platform-specific toolchains extend the list by appending to the
+   ``default_configs`` build argument.
+
+   Default deps were added to support polyfill, which has since been deprecated.
+   Default dependency functionality continues to exist for backwards
+   compatibility.
+
+#. **Optionally add link-time binding**
+
+   Some libraries like pw_assert and pw_log are borderline impossible to
+   implement well without introducing circular dependencies. One solution for
+   addressing this is to break apart the libraries into an interface with
+   minimal dependencies, and an implementation with the bulk of the
+   dependencies that would typically create dependency cycles. In order for the
+   implementation to be linked in, it must be added to the dependency tree of
+   linked artifacts (e.g. ``pw_executable``, ``pw_static_library``). Since
+   there's no way for the libraries themselves to just happily pull in the
+   implementation if someone depends on the interface, the implementation is
+   instead late-bound by adding it as a direct dependency of the final linked
+   artifact. This is all managed through ``pw_build_LINK_DEPS``, which is global
+   for each toolchain and applied to every ``pw_executable``,
+   ``pw_static_library``, and ``pw_shared_library``.
+
+#. **Apply a default visibility policy**
+
+   Projects can globally control the default visibility of pw_* target types by
+   specifying ``pw_build_DEFAULT_VISIBILITY``. This template applies that as the
+   default visibility for any pw_* targets that do not explicitly specify a
+   visibility.
+
+#. **Add source file names as metadata**
+
+   All source file names are collected as
+   `GN metadata <https://gn.googlesource.com/gn/+/main/docs/reference.md#metadata_collection>`_.
+   This list can be writen to a file at build time using ``generated_file``. The
+   primary use case for this is to generate a token database containing all the
+   source files. This allows :c:macro:`PW_ASSERT` to emit filename tokens even
+   though it can't add them to the elf file because of the reasons described at
+   :ref:`module-pw_assert-assert-api`.
+
+   .. note::
+      ``pw_source_files``, if not rebased will default to outputing module
+      relative paths from a ``generated_file`` target.  This is likely not
+      useful. Adding a ``rebase`` argument to ``generated_file`` such as
+      ``rebase = root_build_dir`` will result in usable paths.  For an example,
+      see ``//pw_tokenizer/database.gni``'s ``pw_tokenizer_filename_database``
+      template.
+
+The ``pw_executable`` template provides additional functionality around building
+complete binaries. As Pigweed is a collection of libraries, it does not know how
+its final targets are built. ``pw_executable`` solves this by letting each user
+of Pigweed specify a global executable template for their target, and have
+Pigweed build against it. This is controlled by the build variable
+``pw_executable_config.target_type``, specifying the name of the executable
+template for a project.
+
+In some uncommon cases, a project's ``pw_executable`` template definition may
+need to stamp out some ``pw_source_set``\s. Since a pw_executable template can't
+import ``$dir_pw_build/target_types.gni`` due to circular imports, it should
+import ``$dir_pw_build/cc_library.gni`` instead.
+
+.. tip::
+
+  Prefer to use ``pw_executable`` over plain ``executable`` targets to allow
+  cleanly building the same code for multiple target configs.
+
+Arguments
+^^^^^^^^^
+All of the ``pw_*`` target type overrides accept any arguments supported by
+the underlying native types, as they simply forward them through to the
+underlying target.
+
+Additionally, the following arguments are also supported:
+
+* **remove_configs**: (optional) A list of configs / config patterns to remove
+  from the set of default configs specified by the current toolchain
+  configuration.
+* **remove_public_deps**: (optional) A list of targets to remove from the set of
+  default public_deps specified by the current toolchain configuration.
+
+.. _module-pw_build-link-deps:
+
+Link-only deps
+--------------
+It may be necessary to specify additional link-time dependencies that may not be
+explicitly depended on elsewhere in the build. One example of this is a
+``pw_assert`` backend, which may need to leave out dependencies to avoid
+circular dependencies. Its dependencies need to be linked for executables and
+libraries, even if they aren't pulled in elsewhere.
+
+The ``pw_build_LINK_DEPS`` build arg is a list of dependencies to add to all
+``pw_executable``, ``pw_static_library``, and ``pw_shared_library`` targets.
+This should only be used as a last resort when dependencies cannot be properly
+expressed in the build.
+
+.. _module-pw_build-third-party:
+
+Third party libraries
+---------------------
+Pigweed includes build files for a selection of third-party libraries. For a
+given library, these include:
+
+* ``third_party/<library>/library.gni``: Declares build arguments like
+  ``dir_pw_third_party_<library>`` that default to ``""`` but can be set to the
+  absolute path of the library in order to use it.
+* ``third_party/<library>/BUILD.gn``: Describes how to build the library. This
+  should import ``third_party/<library>/library.gni`` and refer to source paths
+  relative to ``dir_pw_third_party_<library>``.
+
+To add or update GN build files for libraries that only offer Bazel build files,
+the Python script at ``pw_build/py/pw_build/generate_3p_gn.py`` may be used.
+
+.. note::
+   The ``generate_3p_gn.py`` script is experimental, and may not work on an
+   arbitrary Bazel library.
+
+To generate or update the GN offered by Pigweed from an Bazel upstream project,
+first create a ``third_party/<library>/repo.json`` file. This file should
+describe a single JSON object, with the following fields:
+
+* ``name``: String containg the project name.
+
+  .. code-block::
+
+     "name": "FuzzTest"
+
+* ``repos``: Object mapping Bazel repositories to library names.
+
+  .. code-block::
+
+     "repos": { "com_google_absl": "abseil-cpp" }
+
+* ``aliases``: Object mapping GN labels to other GN labels. In some cases, a
+  third party library may have a dependency on another library already supported
+  by Pigweed, but with a label that differs from what the script would generate.
+  This field allows those labels to be rewritten.
+
+  .. code-block::
+
+     "aliases": {
+       "$dir_pw_third_party/googletest:gtest": "$dir_pw_third_party/googletest"
+     }
+
+* ``add``: List of labels to existing GN configs. These will be added to every
+  target in the library.
+
+  .. code-block::
+
+     "add": [ "$dir_pw_third_party/re2/configs:disabled_warnings" ]
+
+* ``remove``: List of labels to default GN configs. These will be removed from
+  every target.
+
+  .. code-block::
+
+     "remove" = [ "$dir_pw_fuzzer:instrumentation" ]
+
+* ``allow_testonly``: Boolean indicating whether to generate GN for Bazel
+  targets marked test-only. Defaults to false.
+
+  .. code-block::
+
+     "allow_testonly": true
+
+* ``no_gn_check``: List of Bazel targets that violate ``gn check``'s
+  `rules`__. Third-party targets that do not conform can be excluded.
+
+  .. code-block::
+
+     "no_gn_check": [ "//fuzztest:regexp_dfa" ]
+
+* ``extra_files``: Object mapping additional files to create to Bazel targets
+  that create them. These targets will be passed to ``bazel run`` and their
+  output saved to the named file within ``third_party/<library>``. For example:
+
+  .. code-block::
+
+     "extra_files": {
+       "fuzztest.bazelrc": "@com_google_fuzztest//bazel:setup_configs"
+     }
+
+.. __: https://gn.googlesource.com/gn/+/main/docs/reference.md#cmd_check
+
+Python packages
+---------------
+GN templates for :ref:`Python build automation <docs-python-build>` are
+described in :ref:`module-pw_build-python`.
+
+.. toctree::
+  :hidden:
+
+  python
+
+
+.. _module-pw_build-cc_blob_library:
+
+pw_cc_blob_library
+------------------
+The ``pw_cc_blob_library`` template is useful for embedding binary data into a
+program. The template takes in a mapping of symbol names to file paths, and
+generates a set of C++ source and header files that embed the contents of the
+passed-in files as arrays of ``std::byte``.
+
+The blob byte arrays are constant initialized and are safe to access at any
+time, including before ``main()``.
+
+``pw_cc_blob_library`` is also available in the CMake build. It is provided by
+``pw_build/cc_blob_library.cmake``.
+
+Arguments
+^^^^^^^^^
+* ``blobs``: A list of GN scopes, where each scope corresponds to a binary blob
+  to be transformed from file to byte array. This is a required field. Blob
+  fields include:
+
+  * ``symbol_name``: The C++ symbol for the byte array.
+  * ``file_path``: The file path for the binary blob.
+  * ``linker_section``: If present, places the byte array in the specified
+    linker section.
+  * ``alignas``: If present, uses the specified string or integer verbatim in
+    the ``alignas()`` specifier for the byte array.
+
+* ``out_header``: The header file to generate. Users will include this file
+  exactly as it is written here to reference the byte arrays.
+* ``namespace``: An optional (but highly recommended!) C++ namespace to place
+  the generated blobs within.
+
+Example
+^^^^^^^
+**BUILD.gn**
+
+.. code-block::
+
+   pw_cc_blob_library("foo_bar_blobs") {
+     blobs: [
+       {
+         symbol_name: "kFooBlob"
+         file_path: "${target_out_dir}/stuff/bin/foo.bin"
+       },
+       {
+         symbol_name: "kBarBlob"
+         file_path: "//stuff/bin/bar.bin"
+         linker_section: ".bar_section"
+       },
+     ]
+     out_header: "my/stuff/foo_bar_blobs.h"
+     namespace: "my::stuff"
+     deps = [ ":generate_foo_bin" ]
+   }
+
+.. note:: If the binary blobs are generated as part of the build, be sure to
+          list them as deps to the pw_cc_blob_library target.
+
+**Generated Header**
+
+.. code-block::
+
+   #pragma once
+
+   #include <array>
+   #include <cstddef>
+
+   namespace my::stuff {
+
+   extern const std::array<std::byte, 100> kFooBlob;
+
+   extern const std::array<std::byte, 50> kBarBlob;
+
+   }  // namespace my::stuff
+
+**Generated Source**
+
+.. code-block::
+
+   #include "my/stuff/foo_bar_blobs.h"
+
+   #include <array>
+   #include <cstddef>
+
+   #include "pw_preprocessor/compiler.h"
+
+   namespace my::stuff {
+
+   const std::array<std::byte, 100> kFooBlob = { ... };
+
+   PW_PLACE_IN_SECTION(".bar_section")
+   const std::array<std::byte, 50> kBarBlob = { ... };
+
+   }  // namespace my::stuff
+
+.. _module-pw_build-facade:
+
+pw_facade
+---------
+In their simplest form, a :ref:`facade<docs-module-structure-facades>` is a GN
+build arg used to change a dependency at compile time. Pigweed targets configure
+these facades as needed.
+
+The ``pw_facade`` template bundles a ``pw_source_set`` with a facade build arg.
+This allows the facade to provide header files, compilation options or anything
+else a GN ``source_set`` provides.
+
+The ``pw_facade`` template declares two targets:
+
+* ``$target_name``: the public-facing ``pw_source_set``, with a ``public_dep``
+  on the backend
+* ``$target_name.facade``: target used by the backend to avoid circular
+  dependencies
+
+.. code-block::
+
+   # Declares ":foo" and ":foo.facade" GN targets
+   pw_facade("foo") {
+     backend = pw_log_BACKEND
+     public_configs = [ ":public_include_path" ]
+     public = [ "public/pw_foo/foo.h" ]
+   }
+
+Low-level facades like ``pw_assert`` cannot express all of their dependencies
+due to the potential for dependency cycles. Facades with this issue may require
+backends to place their implementations in a separate build target to be listed
+in ``pw_build_LINK_DEPS`` (see :ref:`module-pw_build-link-deps`). The
+``require_link_deps`` variable in ``pw_facade`` asserts that all specified build
+targets are present in ``pw_build_LINK_DEPS`` if the facade's backend variable
+is set.
+
+.. _module-pw_build-python-action:
+
+pw_python_action
+----------------
+.. seealso::
+   - :ref:`module-pw_build-python` for all of Pigweed's Python build GN templates.
+   - :ref:`docs-python-build` for details on how the GN Python build works.
+
+The ``pw_python_action`` template is a convenience wrapper around GN's `action
+function <https://gn.googlesource.com/gn/+/main/docs/reference.md#func_action>`_
+for running Python scripts. The main benefit it provides is resolution of GN
+target labels to compiled binary files. This allows Python scripts to be written
+independently of GN, taking only filesystem paths as arguments.
+
+Another convenience provided by the template is to allow running scripts without
+any outputs. Sometimes scripts run in a build do not directly produce output
+files, but GN requires that all actions have an output. ``pw_python_action``
+solves this by accepting a boolean ``stamp`` argument which tells it to create a
+placeholder output file for the action.
+
+Arguments
+^^^^^^^^^
+``pw_python_action`` accepts all of the arguments of a regular ``action``
+target. Additionally, it has some of its own arguments:
+
+* ``module``: Run the specified Python module instead of a script. Either
+  ``script`` or ``module`` must be specified, but not both.
+* ``capture_output``: Optional boolean. If true, script output is hidden unless
+  the script fails with an error. Defaults to true.
+* ``stamp``: Optional variable indicating whether to automatically create a
+  placeholder output file for the script. This allows running scripts without
+  specifying ``outputs``. If ``stamp`` is true, a generic output file is
+  used. If ``stamp`` is a file path, that file is used as a stamp file. Like any
+  output file, ``stamp`` must be in the build directory. Defaults to false.
+* ``environment``: Optional list of strings. Environment variables to set,
+  passed as NAME=VALUE strings.
+* ``working_directory``: Optional file path. When provided the current working
+  directory will be set to this location before the Python module or script is
+  run.
+* ``command_launcher``: Optional string. Arguments to prepend to the Python
+  command, e.g. ``'/usr/bin/fakeroot --'`` will run the Python script within a
+  fakeroot environment.
+* ``venv``: Optional gn target of the pw_python_venv that should be used to run
+  this action.
+* ``python_deps``: Extra dependencies that are required for running the Python
+  script for the ``action``. This must be used with ``module`` to specify the
+  build dependency of the ``module`` if it is user-defined code.
+* ``python_metadata_deps``: Extra dependencies that are ensured completed before
+  generating a Python package metadata manifest, not the overall Python script
+  ``action``. This should rarely be used by non-Pigweed code.
+
+.. _module-pw_build-python-action-test:
+
+pw_python_action_test
+---------------------
+The ``pw_python_action_test`` extends :ref:`module-pw_build-python-action` to
+create a test that is run by a Python script, and its associated test metadata.
+
+Include action tests in the :ref:`module-pw_unit_test-pw_test_group` to produce
+the JSON metadata that :ref:`module-pw_build-test-info` adds.
+
+This template derives several additional targets:
+
+* ``<target_name>.metadata`` produces the test metadata when included in a
+  ``pw_test_group``. This metadata includes the Ninja target that runs the test.
+* If``action`` is not provided as a label, ``<target_name>.script`` wraps a
+  ``pw_python_action`` to run the test as a standalone ``pw_python_package``.
+* ``<target_name>.group`` creates a ``pw_python_group`` in order to apply tools,
+  e.g. linters, to the standalone package.
+* ``<target_name>.lib`` is an empty group for compatibility with
+  ``pw_test_group``.
+* ``<target_name>.run`` invokes the test.
+
+Targets defined using this template will produce test metadata with a
+``test_type`` of "action_test" and a ``ninja_target`` value that will invoke the
+test when passed to Ninja, i.e. ``ninja -C out <ninja_target>``.
+
+Arguments
+^^^^^^^^^
+``pw_python_action_test`` accepts the following arguments:
+
+* All of the arguments of :ref:`module-pw_unit_test-pw_test`.
+* ``action``: An optional string or scope. If a string, this should be a label
+  to a ``pw_python_action`` target that performs the test. If a scope, this has
+  the same meaning as for ``pw_python_script``.
+* Optionally, the ``test_type`` and ``extra_metadata`` arguments of the
+  :ref:`module-pw_build-test-info` template.
+* Optionally, all of the arguments of the :ref:`module-pw_build-python-action`
+  template except ``module``, ``capture_output``, ``stamp``, and
+  ``python_metadata_deps``.
+* Optionally, all of the arguments of the ``pw_python_package`` template except
+  ``setup``, ``generate_setup``, ``tests``, ``python_test_deps``, and
+  ``proto_library``.
+
+.. _module-pw_build-test-info:
+
+pw_test_info
+------------
+``pw_test_info`` generates metadata describing tests. To produce a JSON file
+containing this metadata:
+
+#. For new modules, add a :ref:`module-pw_unit_test-pw_test_group` to the
+   BUILD.gn file. All modules are required to have a ``tests`` target.
+#. Include one or more tests or test groups via ``tests`` or ``group_deps``,
+   respectively, in the ``pw_test_group``.
+#. Set ``output_metadata`` to ``true`` in the ``pw_test_group`` definition.
+
+This template does not typically need to be used directly, unless adding new
+types of tests. It is typically used by other templates, such as the
+:ref:`module-pw_unit_test-pw_test` and the
+:ref:`module-pw_unit_test-pw_test_group`.
+
+Arguments
+^^^^^^^^^
+* ``test_type``: One of "test_group", "unit_test", "action_test", "perf_test",
+  or "fuzz_test".
+* ``test_name``: Name of the test. Defaults to the target name.
+* ``build_label``: GN label for the test. Defaults to the test name.
+* ``extra_metadata``: Additional variables to add to the metadata.
+
+Specific test templates add additional details using ``extra_metadata``. For
+example:
+
+* The :ref:`module-pw_unit_test-pw_test_group` includes its collected list of
+  tests and test groups as ``deps``.
+* The :ref:`module-pw_unit_test-pw_test` and the
+  :ref:`module-pw_perf_test-pw_perf_test` includes the ``test_directory``
+  that contains the test executable.
+* The :ref:`module-pw_build-python-action-test` includes the Ninja target that
+  can be used to invoke the Python action and run the test.
+
+Example
+^^^^^^^
+Let ``//my_module/BUILD.gn`` contain the following:
+
+.. code-block::
+
+   import("$dir_pw_build/python_action_test.gni")
+   import("$dir_pw_perf_test/perf_test.gni")
+   import("$dir_pw_unit_test/test.gni")
+
+   pw_test("my_unit_test") {
+     sources = [ ... ]
+     deps = [ ... ]
+   }
+
+   pw_python_action_test("my_action_test") {
+     script = [ ... ]
+     args = [ ... ]
+     deps = [ ... ]
+   }
+
+   pw_python_action_test("my_integration_test") {
+     script = [ ... ]
+     args = [ ... ]
+     deps = [ ... ]
+     tags = [ "integration" ]
+   }
+
+   pw_perf_test("my_perf_test") {
+     sources = [ ... ]
+     deps = [ ... ]
+   }
+
+   pw_test_group("tests") {
+     tests = [
+      ":my_unit_test",
+      ":my_action_test",
+      ":my_integration_test",
+     ]
+   }
+
+Let `//BUILD.gn`` contain the following:
+
+.. code-block::
+
+   import("$dir_pw_unit_test/test.gni")
+
+   group("run_tests") {
+     deps = [ ":my_module_tests(//targets/my_targets:my_toolchain)" ]
+   }
+
+   pw_test_group("my_module_tests") {
+     group_deps = [ "//my_module:tests" ]
+     output_metadata = true
+   }
+
+Then running ``gn gen out`` will produce the following JSON file at
+``out/my_toolchain/my_module_tests.testinfo.json``:
+
+.. code-block:: json
+
+   [
+     {
+       "build_label": "//my_module:my_unit_test",
+       "test_directory": "my_toolchain/obj/my_module/test",
+       "test_name": "my_unit_test",
+       "test_type": "unit_test"
+     },
+     {
+       "build_label": "//my_module:my_action_test",
+       "ninja_target": "my_toolchain/obj/my_module/my_action_test.run.stamp",
+       "test_name": "my_action_test",
+       "test_type": "action_test"
+     },
+     {
+       "build_label": "//my_module:my_integration_test",
+       "ninja_target": "my_toolchain/obj/my_module/my_integration_test.run.stamp",
+       "tags": [
+         "integration"
+       ],
+       "test_name": "my_integration_test",
+       "test_type": "action_test"
+     },
+     {
+       "build_label": "//my_module:my_perf_test",
+       "test_directory": "my_toolchain/obj/my_module/test",
+       "test_name": "my_perf_test",
+       "test_type": "perf_test"
+     },
+     {
+       "build_label": "//my_module:tests",
+       "deps": [
+         "//my_module:my_unit_test",
+         "//my_module:my_action_test",
+         "//my_module:my_integration_test",
+       ],
+       "test_name": "my_module/tests",
+       "test_type": "test_group"
+     },
+     {
+       "build_label": "//:my_module_tests",
+       "deps": [
+         "//my_module:tests",
+       ],
+       "test_name": "my_module_tests",
+       "test_type": "test_group"
+     }
+   ]
+
+.. _module-pw_build-python-action-expressions:
+
+Expressions
+^^^^^^^^^^^
+``pw_python_action`` evaluates expressions in ``args``, the arguments passed to
+the script. These expressions function similarly to generator expressions in
+CMake. Expressions may be passed as a standalone argument or as part of another
+argument. A single argument may contain multiple expressions.
+
+Generally, these expressions are used within templates rather than directly in
+BUILD.gn files. This allows build code to use GN labels without having to worry
+about converting them to files.
+
+.. note::
+
+  We intend to replace these expressions with native GN features when possible.
+  See `b/234886742 <http://issuetracker.google.com/234886742>`_.
+
+The following expressions are supported:
+
+.. describe:: <TARGET_FILE(gn_target)>
+
+  Evaluates to the output file of the provided GN target. For example, the
+  expression
+
+  .. code-block::
+
+     "<TARGET_FILE(//foo/bar:static_lib)>"
+
+  might expand to
+
+  .. code-block::
+
+     "/home/User/project_root/out/obj/foo/bar/static_lib.a"
+
+  ``TARGET_FILE`` parses the ``.ninja`` file for the GN target, so it should
+  always find the correct output file, regardless of the toolchain's or target's
+  configuration. Some targets, such as ``source_set`` and ``group`` targets, do
+  not have an output file, and attempting to use ``TARGET_FILE`` with them
+  results in an error.
+
+  ``TARGET_FILE`` only resolves GN target labels to their outputs. To resolve
+  paths generally, use the standard GN approach of applying the
+  ``rebase_path(path, root_build_dir)`` function. This function
+  converts the provided GN path or list of paths to be relative to the build
+  directory, from which all build commands and scripts are executed.
+
+.. describe:: <TARGET_FILE_IF_EXISTS(gn_target)>
+
+  ``TARGET_FILE_IF_EXISTS`` evaluates to the output file of the provided GN
+  target, if the output file exists. If the output file does not exist, the
+  entire argument that includes this expression is omitted, even if there is
+  other text or another expression.
+
+  For example, consider this expression:
+
+  .. code-block::
+
+     "--database=<TARGET_FILE_IF_EXISTS(//alpha/bravo)>"
+
+  If the ``//alpha/bravo`` target file exists, this might expand to the
+  following:
+
+  .. code-block::
+
+     "--database=/home/User/project/out/obj/alpha/bravo/bravo.elf"
+
+  If the ``//alpha/bravo`` target file does not exist, the entire
+  ``--database=`` argument is omitted from the script arguments.
+
+.. describe:: <TARGET_OBJECTS(gn_target)>
+
+  Evaluates to the object files of the provided GN target. Expands to a separate
+  argument for each object file. If the target has no object files, the argument
+  is omitted entirely. Because it does not expand to a single expression, the
+  ``<TARGET_OBJECTS(...)>`` expression may not have leading or trailing text.
+
+  For example, the expression
+
+  .. code-block::
+
+     "<TARGET_OBJECTS(//foo/bar:a_source_set)>"
+
+  might expand to multiple separate arguments:
+
+  .. code-block::
+
+     "/home/User/project_root/out/obj/foo/bar/a_source_set.file_a.cc.o"
+     "/home/User/project_root/out/obj/foo/bar/a_source_set.file_b.cc.o"
+     "/home/User/project_root/out/obj/foo/bar/a_source_set.file_c.cc.o"
+
+Example
+^^^^^^^
+.. code-block::
+
+   import("$dir_pw_build/python_action.gni")
+
+   pw_python_action("postprocess_main_image") {
+     script = "py/postprocess_binary.py"
+     args = [
+       "--database",
+       rebase_path("my/database.csv", root_build_dir),
+       "--binary=<TARGET_FILE(//firmware/images:main)>",
+     ]
+     stamp = true
+   }
+
+.. _module-pw_build-evaluate-path-expressions:
+
+pw_evaluate_path_expressions
+----------------------------
+It is not always feasible to pass information to a script through command line
+arguments. If a script requires a large amount of input data, writing to a file
+is often more convenient. However, doing so bypasses ``pw_python_action``'s GN
+target label resolution, preventing such scripts from working with build
+artifacts in a build system-agnostic manner.
+
+``pw_evaluate_path_expressions`` is designed to address this use case. It takes
+a list of input files and resolves target expressions within them, modifying the
+files in-place.
+
+Refer to ``pw_python_action``'s :ref:`module-pw_build-python-action-expressions`
+section for the list of supported expressions.
+
+.. note::
+
+  ``pw_evaluate_path_expressions`` is typically used as an intermediate
+  sub-target of a larger template, rather than a standalone build target.
+
+Arguments
+^^^^^^^^^
+* ``files``: A list of scopes, each containing a ``source`` file to process and
+  a ``dest`` file to which to write the result.
+
+Example
+^^^^^^^
+The following template defines an executable target which additionally outputs
+the list of object files from which it was compiled, making use of
+``pw_evaluate_path_expressions`` to resolve their paths.
+
+.. code-block::
+
+   import("$dir_pw_build/evaluate_path_expressions.gni")
+
+   template("executable_with_artifacts") {
+     executable("${target_name}.exe") {
+       sources = invoker.sources
+       if defined(invoker.deps) {
+         deps = invoker.deps
+       }
+     }
+
+     _artifacts_input = "$target_gen_dir/${target_name}_artifacts.json.in"
+     _artifacts_output = "$target_gen_dir/${target_name}_artifacts.json"
+     _artifacts = {
+       binary = "<TARGET_FILE(:${target_name}.exe)>"
+       objects = "<TARGET_OBJECTS(:${target_name}.exe)>"
+     }
+     write_file(_artifacts_input, _artifacts, "json")
+
+     pw_evaluate_path_expressions("${target_name}.evaluate") {
+       files = [
+         {
+           source = _artifacts_input
+           dest = _artifacts_output
+         },
+       ]
+     }
+
+     group(target_name) {
+       deps = [
+         ":${target_name}.exe",
+         ":${target_name}.evaluate",
+       ]
+     }
+   }
+
+.. _module-pw_build-pw_exec:
+
+pw_exec
+-------
+``pw_exec`` allows for execution of arbitrary programs. It is a wrapper around
+``pw_python_action`` but allows for specifying the program to execute.
+
+.. note::
+
+   Prefer to use ``pw_python_action`` instead of calling out to shell
+   scripts, as the Python will be more portable. ``pw_exec`` should generally
+   only be used for interacting with legacy/existing scripts.
+
+Arguments
+^^^^^^^^^
+* ``program``: The program to run. Can be a full path or just a name (in which
+  case $PATH is searched).
+* ``args``: Optional list of arguments to the program.
+* ``deps``: Dependencies for this target.
+* ``public_deps``: Public dependencies for this target. In addition to outputs
+  from this target, outputs generated by public dependencies can be used as
+  inputs from targets that depend on this one. This is not the case for private
+  deps.
+* ``inputs``: Optional list of build inputs to the program.
+* ``outputs``: Optional list of artifacts produced by the program's execution.
+* ``env``: Optional list of key-value pairs defining environment variables for
+  the program.
+* ``env_file``: Optional path to a file containing a list of newline-separated
+  key-value pairs defining environment variables for the program.
+* ``args_file``: Optional path to a file containing additional positional
+  arguments to the program. Each line of the file is appended to the
+  invocation. Useful for specifying arguments from GN metadata.
+* ``skip_empty_args``: If args_file is provided, boolean indicating whether to
+  skip running the program if the file is empty. Used to avoid running
+  commands which error when called without arguments.
+* ``capture_output``: If true, output from the program is hidden unless the
+  program exits with an error. Defaults to true.
+* ``working_directory``: The working directory to execute the subprocess with.
+  If not specified it will not be set and the subprocess will have whatever
+  the parent current working directory is.
+* ``venv``: Python virtualenv to pass along to the underlying
+  :ref:`module-pw_build-pw_python_action`.
+* ``visibility``: GN visibility to apply to the underlying target.
+
+Example
+^^^^^^^
+.. code-block::
+
+   import("$dir_pw_build/exec.gni")
+
+   pw_exec("hello_world") {
+     program = "/bin/sh"
+     args = [
+       "-c",
+       "echo hello \$WORLD",
+     ]
+     env = [
+       "WORLD=world",
+     ]
+   }
+
+pw_input_group
+--------------
+``pw_input_group`` defines a group of input files which are not directly
+processed by the build but are still important dependencies of later build
+steps. This is commonly used alongside metadata to propagate file dependencies
+through the build graph and force rebuilds on file modifications.
+
+For example ``pw_docgen`` defines a ``pw_doc_group`` template which outputs
+metadata from a list of input files. The metadata file is not actually part of
+the build, and so changes to any of the input files do not trigger a rebuild.
+This is problematic, as targets that depend on the metadata should rebuild when
+the inputs are modified but GN cannot express this dependency.
+
+``pw_input_group`` solves this problem by allowing a list of files to be listed
+in a target that does not output any build artifacts, causing all dependent
+targets to correctly rebuild.
+
+Arguments
+^^^^^^^^^
+``pw_input_group`` accepts all arguments that can be passed to a ``group``
+target, as well as requiring one extra:
+
+* ``inputs``: List of input files.
+
+Example
+^^^^^^^
+.. code-block::
+
+   import("$dir_pw_build/input_group.gni")
+
+   pw_input_group("foo_metadata") {
+     metadata = {
+       files = [
+         "x.foo",
+         "y.foo",
+         "z.foo",
+       ]
+     }
+     inputs = metadata.files
+   }
+
+Targets depending on ``foo_metadata`` will rebuild when any of the ``.foo``
+files are modified.
+
+pw_zip
+------
+``pw_zip`` is a target that allows users to zip up a set of input files and
+directories into a single output ``.zip`` file—a simple automation of a
+potentially repetitive task.
+
+Arguments
+^^^^^^^^^
+* ``inputs``: List of source files as well as the desired relative zip
+  destination. See below for the input syntax.
+* ``dirs``: List of entire directories to be zipped as well as the desired
+  relative zip destination. See below for the input syntax.
+* ``output``: Filename of output ``.zip`` file.
+* ``deps``: List of dependencies for the target.
+
+Input Syntax
+^^^^^^^^^^^^
+Inputs all need to follow the correct syntax:
+
+#. Path to source file or directory. Directories must end with a ``/``.
+#. The delimiter (defaults to ``>``).
+#. The desired destination of the contents within the ``.zip``. Must start
+   with ``/`` to indicate the zip root. Any number of subdirectories are
+   allowed. If the source is a file it can be put into any subdirectory of the
+   root. If the source is a file, the zip copy can also be renamed by ending
+   the zip destination with a filename (no trailing ``/``).
+
+Thus, it should look like the following: ``"[source file or dir] > /"``.
+
+Example
+^^^^^^^
+Let's say we have the following structure for a ``//source/`` directory:
+
+.. code-block::
+
+   source/
+   ├── file1.txt
+   ├── file2.txt
+   ├── file3.txt
+   └── some_dir/
+       ├── file4.txt
+       └── some_other_dir/
+           └── file5.txt
+
+And we create the following build target:
+
+.. code-block::
+
+   import("$dir_pw_build/zip.gni")
+
+   pw_zip("target_name") {
+     inputs = [
+       "//source/file1.txt > /",             # Copied to the zip root dir.
+       "//source/file2.txt > /renamed.txt",  # File renamed.
+       "//source/file3.txt > /bar/",         # File moved to the /bar/ dir.
+     ]
+
+     dirs = [
+       "//source/some_dir/ > /bar/some_dir/",  # All /some_dir/ contents copied
+                                               # as /bar/some_dir/.
+     ]
+
+     # Note on output: if the specific output directory isn't defined
+     # (such as output = "zoo.zip") then the .zip will output to the
+     # same directory as the BUILD.gn file that called the target.
+     output = "//$target_out_dir/foo.zip"  # Where the foo.zip will end up
+   }
+
+This will result in a ``.zip`` file called ``foo.zip`` stored in
+``//$target_out_dir`` with the following structure:
+
+.. code-block::
+
+   foo.zip
+   ├── bar/
+   │   ├── file3.txt
+   │   └── some_dir/
+   │       ├── file4.txt
+   │       └── some_other_dir/
+   │           └── file5.txt
+   ├── file1.txt
+   └── renamed.txt
+
+.. _module-pw_build-relative-source-file-names:
+
+pw_relative_source_file_names
+-----------------------------
+This template recursively walks the listed dependencies and collects the names
+of all the headers and source files required by the targets, and then transforms
+them such that they reflect the ``__FILE__`` when pw_build's ``relative_paths``
+config is applied. This is primarily intended for side-band generation of
+pw_tokenizer tokens so file name tokens can be utilized in places where
+pw_tokenizer is unable to embed token information as part of C/C++ compilation.
+
+This template produces a JSON file containing an array of strings (file paths
+with ``-ffile-prefix-map``-like transformations applied) that can be used to
+:ref:`generate a token database <module-pw_tokenizer-database-creation>`.
+
+Arguments
+^^^^^^^^^
+* ``deps``: A required list of targets to recursively extract file names from.
+* ``outputs``: A required array with a single element: the path to write the
+  final JSON file to.
+
+Example
+^^^^^^^
+Let's say we have the following project structure:
+
+.. code-block::
+
+   project root
+   ├── foo/
+   │   ├── foo.h
+   │   └── foo.cc
+   ├── bar/
+   │   ├── bar.h
+   │   └── bar.cc
+   ├── unused/
+   │   ├── unused.h
+   │   └── unused.cc
+   └── main.cc
+
+And a BUILD.gn at the root:
+
+.. code-block::
+
+   pw_source_set("bar") {
+     public_configs = [ ":bar_headers" ]
+     public = [ "bar/bar.h" ]
+     sources = [ "bar/bar.cc" ]
+   }
+
+   pw_source_set("foo") {
+     public_configs = [ ":foo_headers" ]
+     public = [ "foo/foo.h" ]
+     sources = [ "foo/foo.cc" ]
+     deps = [ ":bar" ]
+   }
+
+
+   pw_source_set("unused") {
+     public_configs = [ ":unused_headers" ]
+     public = [ "unused/unused.h" ]
+     sources = [ "unused/unused.cc" ]
+     deps = [ ":bar" ]
+   }
+
+   pw_executable("main") {
+     sources = [ "main.cc" ]
+     deps = [ ":foo" ]
+   }
+
+   pw_relative_source_file_names("main_source_files") {
+     deps = [ ":main" ]
+     outputs = [ "$target_gen_dir/main_source_files.json" ]
+   }
+
+The json file written to `out/gen/main_source_files.json` will contain:
+
+.. code-block::
+
+   [
+     "bar/bar.cc",
+     "bar/bar.h",
+     "foo/foo.cc",
+     "foo/foo.h",
+     "main.cc"
+   ]
+
+Since ``unused`` isn't a transitive dependency of ``main``, its source files
+are not included. Similarly, even though ``bar`` is not a direct dependency of
+``main``, its source files *are* included because ``foo`` brings in ``bar`` as
+a transitive dependency.
+
+Note how the file paths in this example are relative to the project root rather
+than being absolute paths (e.g. ``/home/user/ralph/coding/my_proj/main.cc``).
+This is a result of transformations applied to strip absolute pathing prefixes,
+matching the behavior of pw_build's ``$dir_pw_build:relative_paths`` config.
+
+Build time errors: pw_error and pw_build_assert
+-----------------------------------------------
+In Pigweed's complex, multi-toolchain GN build it is not possible to build every
+target in every configuration. GN's ``assert`` statement is not ideal for
+enforcing the correct configuration because it may prevent the GN build files or
+targets from being referred to at all, even if they aren't used.
+
+The ``pw_error`` GN template results in an error if it is executed during the
+build. These error targets can exist in the build graph, but cannot be depended
+on without an error.
+
+``pw_build_assert`` evaluates to a ``pw_error`` if a condition fails or nothing
+(an empty group) if the condition passes. Targets can add a dependency on a
+``pw_build_assert`` to enforce a condition at build time.
+
+The templates for build time errors are defined in ``pw_build/error.gni``.
+
+Generate code coverage reports: ``pw_coverage_report``
+------------------------------------------------------
+Pigweed supports generating coverage reports, in a variety of formats, for C/C++
+code using the ``pw_coverage_report`` GN template.
+
+Coverage Caveats
+^^^^^^^^^^^^^^^^
+There are currently two code coverage caveats when enabled:
+
+#. Coverage reports are only populated based on host tests that use a ``clang``
+   toolchain.
+
+#. Coverage reports will only show coverage information for headers included in
+   a test binary.
+
+These two caveats mean that all device-specific code that cannot be compiled for
+and run on the host will not be able to have reports generated for them, and
+that the existence of these files will not appear in any coverage report.
+
+Try to ensure that your code can be written in a way that it can be compiled
+into a host test for the purpose of coverage reporting, although this is
+sometimes impossible due to requiring hardware-specific APIs to be available.
+
+Coverage Instrumentation
+^^^^^^^^^^^^^^^^^^^^^^^^
+For the ``pw_coverage_report`` to generate meaningful output, you must ensure
+that it is invoked by a toolchain that instruments tests for code coverage
+collection and output.
+
+Instrumentation is controlled by two GN build arguments:
+
+- ``pw_toolchain_COVERAGE_ENABLED`` being set to ``true``.
+- ``pw_toolchain_PROFILE_SOURCE_FILES`` is an optional argument that provides a
+  list of source files to selectively collect coverage.
+
+.. note::
+
+  It is possible to also instrument binaries for UBSAN, ASAN, or TSAN at the
+  same time as coverage. However, TSAN will find issues in the coverage
+  instrumentation code and fail to properly build.
+
+This can most easily be done by using the ``host_clang_coverage`` toolchain
+provided in :ref:`module-pw_toolchain`, but you can also create custom
+toolchains that manually set these GN build arguments as well.
+
+``pw_coverage_report``
+^^^^^^^^^^^^^^^^^^^^^^
+``pw_coverage_report`` is basically a GN frontend to the ``llvm-cov``
+`tool <https://llvm.org/docs/CommandGuide/llvm-cov.html>`_ that can be
+integrated into the normal build.
+
+It can be found at ``pw_build/coverage_report.gni`` and is available through
+``import("$dir_pw_build/coverage_report.gni")``.
+
+The supported report formats are:
+
+- ``text``: A text representation of the code coverage report. This
+  format is not suitable for further machine manipulation and is instead only
+  useful for cases where a human needs to interpret the report. The text format
+  provides a nice summary, but if you desire to drill down into the coverage
+  details more, please consider using ``html`` instead.
+
+  - This is equivalent to ``llvm-cov show --format text`` and similar to
+    ``llvm-cov report``.
+
+- ``html``: A static HTML site that provides an overall coverage summary and
+  per-file information. This format is not suitable for further machine
+  manipulation and is instead only useful for cases where a human needs to
+  interpret the report.
+
+  - This is equivalent to ``llvm-cov show --format html``.
+
+- ``lcov``: A machine-friendly coverage report format. This format is not human-
+  friendly. If that is necessary, use ``text`` or ``html`` instead.
+
+  - This is equivalent to ``llvm-cov export --format lcov``.
+
+- ``json``: A machine-friendly coverage report format. This format is not human-
+  friendly. If that is necessary, use ``text`` or ``html`` instead.
+
+  - This is equivalent to ``llvm-cov export --format json``.
+
+Arguments
+"""""""""
+There are three classes of ``template`` arguments: build, coverage, and test.
+
+**Build Arguments:**
+
+- ``enable_if`` (optional): Conditionally activates coverage report generation when set to
+  a boolean expression that evaluates to ``true``. This can be used to allow
+  project builds to conditionally enable or disable coverage reports to minimize
+  work needed for certain build configurations.
+
+- ``failure_mode`` (optional/unstable): Specify the failure mode for
+  ``llvm-profdata`` (used to merge inidividual profraw files from ``pw_test``
+  runs). Available options are ``"any"`` (default) or ``"all"``.
+
+  - This should be considered an unstable/deprecated argument that should only
+    be used as a last resort to get a build working again. Using
+    ``failure_mode = "all"`` usually indicates that there are underlying
+    problems in the build or test infrastructure that should be independently
+    resolved. Please reach out to the Pigweed team for assistance.
+
+**Coverage Arguments:**
+
+- ``filter_paths`` (optional): List of file paths to include when generating the
+  coverage report. These cannot be regular expressions, but can be concrete file
+  or folder paths. Folder paths will allow all files in that directory or any
+  recursive child directory.
+
+  - These are passed to ``llvm-cov`` by the optional trailing positional
+    ``[SOURCES]`` arguments.
+
+- ``ignore_filename_patterns`` (optional): List of file path regular expressions
+  to ignore when generating the coverage report.
+
+  - These are passed to ``llvm-cov`` via ``--ignore-filename-regex`` named
+    parameters.
+
+**Test Arguments (one of these is required to be provided):**
+
+- ``tests``: A list of ``pw_test`` :ref:`targets<module-pw_unit_test-pw_test>`.
+
+- ``group_deps``: A list of ``pw_test_group``
+  :ref:`targets<module-pw_unit_test-pw_test_group>`.
+
+.. note::
+
+  ``tests`` and ``group_deps`` are treated exactly the same by
+  ``pw_coverage_report``, so it is not that important to ensure they are used
+  properly.
+
+Target Expansion
+""""""""""""""""
+``pw_coverage_report(<target_name>)`` expands to one concrete target for each
+report format.
+
+- ``<target_name>.text``: Generates the ``text`` coverage report.
+
+- ``<target_name>.html``: Generates the ``html`` coverage report.
+
+- ``<target_name>.lcov``: Generates the ``lcov`` coverage report.
+
+- ``<target_name>.json``: Generates the ``json`` coverage report.
+
+To use any of these targets, you need only to add a dependency on the desired
+target somewhere in your build.
+
+There is also a ``<target_name>`` target generated that is a ``group`` that adds
+a dependency on all of the format-specific targets listed above.
+
+.. note::
+  These targets are always available, even when the toolchain executing the
+  target does not support coverage or coverage is not enabled. In these cases,
+  the targets are set to empty groups.
+
+Coverage Output
+^^^^^^^^^^^^^^^
+Coverage reports are currently generated and placed into the build output
+directory associated with the path to the GN file where the
+``pw_coverage_report`` is used in a subfolder named
+``<target_name>.<report_type>``.
+
+.. note::
+
+  Due to limitations with telling GN the entire output of coverage reports
+  (stemming from per-source-file generation for HTML and text representations),
+  it is not as simple as using GN's built-in ``copy`` to be able to move these
+  coverage reports to another output location. However, it seems possible to add
+  a target that can use Python to copy the entire output directory.
+
+Improved Ninja interface
+------------------------
+Ninja includes a basic progress display, showing in a single line the number of
+targets finished, the total number of targets, and the name of the most recent
+target it has either started or finished.
+
+For additional insight into the status of the build, Pigweed includes a Ninja
+wrapper, ``pw-wrap-ninja``, that displays additional real-time information about
+the progress of the build. The wrapper is invoked the same way you'd normally
+invoke Ninja:
+
+.. code-block:: sh
+
+  pw-wrap-ninja -C out
+
+The script lists the progress of the build, as well as the list of targets that
+Ninja is currently building, along with a timer that measures how long each
+target has been building for:
+
+.. code-block::
+
+   [51.3s] Building [8924/10690] ...
+     [10.4s] c++ pw_strict_host_clang_debug/obj/pw_string/string_test.lib.string_test.cc.o
+     [ 9.5s] ACTION //pw_console/py:py.lint.mypy(//pw_build/python_toolchain:python)
+     [ 9.4s] ACTION //pw_console/py:py.lint.pylint(//pw_build/python_toolchain:python)
+     [ 6.1s] clang-tidy ../pw_log_rpc/log_service.cc
+     [ 6.1s] clang-tidy ../pw_log_rpc/log_service_test.cc
+     [ 6.1s] clang-tidy ../pw_log_rpc/rpc_log_drain.cc
+     [ 6.1s] clang-tidy ../pw_log_rpc/rpc_log_drain_test.cc
+     [ 5.4s] c++ pw_strict_host_clang_debug/obj/BUILD_DIR/pw_strict_host_clang_debug/gen/pw...
+     ... and 109 more
+
+This allows you to, at a glance, know what Ninja's currently building, which
+targets are bottlenecking the rest of the build, and which targets are taking
+an unusually long time to complete.
+
+``pw-wrap-ninja`` includes other useful functionality as well. The
+``--write-trace`` option writes a build trace to the specified path, which can
+be viewed in the `Perfetto UI <https://ui.perfetto.dev/>`_, or via Chrome's
+built-in ``chrome://tracing`` tool.
diff --git a/pw_build/pigweed.bzl b/pw_build/pigweed.bzl
index c0813fa..e5feab6 100644
--- a/pw_build/pigweed.bzl
+++ b/pw_build/pigweed.bzl
@@ -43,7 +43,7 @@
     # TODO(b/234877642): Remove this implicit dependency once we have a better
     # way to handle the facades without introducing a circular dependency into
     # the build.
-    kwargs["deps"] = kwargs["deps"] + ["@pigweed_config//:pw_assert_backend"]
+    kwargs["deps"] = kwargs["deps"] + ["@pigweed_config//:pw_assert_backend_impl"]
     _add_defaults(kwargs)
     native.cc_binary(**kwargs)
 
@@ -89,7 +89,7 @@
     # TODO(b/234877642): Remove this implicit dependency once we have a better
     # way to handle the facades without introducing a circular dependency into
     # the build.
-    kwargs["deps"] = kwargs["deps"] + ["@pigweed_config//:pw_assert_backend"]
+    kwargs["deps"] = kwargs["deps"] + ["@pigweed_config//:pw_assert_backend_impl"]
     _add_defaults(kwargs)
 
     # Some tests may include FuzzTest, which includes headers that trigger
@@ -142,7 +142,7 @@
     """
     kwargs["deps"] = kwargs.get("deps", []) + \
                      ["@pigweed//pw_perf_test:logging_main"]
-    kwargs["deps"] = kwargs["deps"] + ["@pigweed_config//:pw_assert_backend"]
+    kwargs["deps"] = kwargs["deps"] + ["@pigweed_config//:pw_assert_backend_impl"]
     _add_defaults(kwargs)
     native.cc_binary(**kwargs)
 
diff --git a/pw_build/platforms/BUILD.bazel b/pw_build/platforms/BUILD.bazel
index 744cc48..a34e7d4 100644
--- a/pw_build/platforms/BUILD.bazel
+++ b/pw_build/platforms/BUILD.bazel
@@ -96,17 +96,21 @@
 platform(
     name = "lm3s6965evb",
     constraint_values = [
+        "@pw_toolchain//constraints/arm_mcpu:cortex-m3",
         "//pw_build/constraints/chipset:lm3s6965evb",
-        "@rust_crates//:no_std",
         "//pw_sys_io_baremetal_lm3s6965evb:backend",
+        "@platforms//cpu:armv7-m",
+        "@rust_crates//:no_std",
     ],
-    parents = [":cortex_m3"],
 )
 
 platform(
     name = "nrf52833",
-    constraint_values = ["//pw_build/constraints/chipset:nrf52833"],
-    parents = [":cortex_m0"],
+    constraint_values = [
+        "@pw_toolchain//constraints/arm_mcpu:cortex-m0",
+        "//pw_build/constraints/chipset:nrf52833",
+        "@platforms//cpu:armv6-m",
+    ],
 )
 
 # --- Boards ---
@@ -144,11 +148,16 @@
         # Use the baremetal pw_sys_io backend (because the default
         # pw_sys_io_stdio backend is not compatible with FreeRTOS).
         "//pw_sys_io_baremetal_stm32f429:backend",
+        # Select cortex-m backends
+        "//pw_cpu_exception:basic_handler_backend",
+        "//pw_cpu_exception_cortex_m:entry_backend",
+        "//pw_cpu_exception_cortex_m:support_backend",
+        "//pw_interrupt_cortex_m:backend",
         # os:none means, we're not building for any host platform (Windows,
         # Linux, or Mac). The pw_sys_io_baremetal_stm32f429 backend is only
         # compatible with os:none.
         "@platforms//os:none",
+        # For toolchain resolution.
+        "@pw_toolchain//constraints/arm_mcpu:cortex-m4",
     ],
-    # Inherit from cortex_m4_fpu to use the appropriate Arm toolchain.
-    parents = [":cortex_m4_fpu"],
 )
diff --git a/pw_build/py/pw_build/create_gn_venv.py b/pw_build/py/pw_build/create_gn_venv.py
index af84ac6..f03665d 100644
--- a/pw_build/py/pw_build/create_gn_venv.py
+++ b/pw_build/py/pw_build/create_gn_venv.py
@@ -16,7 +16,9 @@
 import argparse
 import os
 import pathlib
+import platform
 import shutil
+import stat
 import sys
 import venv
 
@@ -44,6 +46,22 @@
     return parser.parse_args()
 
 
+def _rm_dir(path_to_delete: pathlib.Path) -> None:
+    """Delete a directory recursively.
+
+    On Windows if a file can't be deleted, mark it as writable then delete.
+    """
+
+    def make_writable_and_delete(_func, path, _exc_info):
+        os.chmod(path, stat.S_IWRITE)
+        os.unlink(path)
+
+    on_rm_error = None
+    if platform.system() == 'Windows':
+        on_rm_error = make_writable_and_delete
+    shutil.rmtree(path_to_delete, onerror=on_rm_error)
+
+
 def main(
     depfile: pathlib.Path,
     destination_dir: pathlib.Path,
@@ -51,7 +69,7 @@
 ) -> None:
     # Create the virtualenv.
     if destination_dir.exists():
-        shutil.rmtree(destination_dir)
+        _rm_dir(destination_dir)
     venv.create(destination_dir, symlinks=True, with_pip=True)
 
     # Write out the depfile, making sure the Python path is
diff --git a/pw_build/py/pw_build/project_builder_presubmit_runner.py b/pw_build/py/pw_build/project_builder_presubmit_runner.py
index 51e6ed3..1afc1ab 100644
--- a/pw_build/py/pw_build/project_builder_presubmit_runner.py
+++ b/pw_build/py/pw_build/project_builder_presubmit_runner.py
@@ -651,10 +651,12 @@
     presubmit_programs: Optional[Programs] = None,
     default_presubmit_step_names: Optional[List[str]] = None,
     build_recipes: Optional[List[BuildRecipe]] = None,
+    default_build_recipe_names: Optional[List[str]] = None,
     repo_root: Optional[Path] = None,
     presubmit_out_dir: Optional[Path] = None,
     package_root: Optional[Path] = None,
     default_root_logfile: Path = Path('out/build.txt'),
+    force_pw_watch: bool = False,
 ) -> int:
     """Build upstream Pigweed presubmit steps."""
     # pylint: disable=too-many-locals
@@ -735,8 +737,15 @@
         _LOG.info('')
 
     selected_build_recipes: List[BuildRecipe] = []
-    if build_recipes and hasattr(args, 'recipe'):
-        selected_build_recipes = args.recipe
+    if build_recipes:
+        if hasattr(args, 'recipe'):
+            selected_build_recipes = args.recipe
+        if not selected_build_recipes and default_build_recipe_names:
+            selected_build_recipes = [
+                recipe
+                for recipe in build_recipes
+                if recipe.display_name in default_build_recipe_names
+            ]
 
     selected_presubmit_recipes: List[BuildRecipe] = []
     if presubmit_programs and hasattr(args, 'step'):
@@ -811,7 +820,9 @@
     if project_builder.should_use_progress_bars():
         project_builder.use_stdout_proxy()
 
-    if PW_WATCH_AVAILABLE and (args.watch or args.fullscreen):
+    if PW_WATCH_AVAILABLE and (
+        force_pw_watch or (args.watch or args.fullscreen)
+    ):
         event_handler, exclude_list = watch_setup(
             project_builder,
             parallel=args.parallel,
diff --git a/pw_build/python.rst b/pw_build/python.rst
index 945d2f4..473d2ed 100644
--- a/pw_build/python.rst
+++ b/pw_build/python.rst
@@ -279,6 +279,7 @@
      setup.cfg files
      <https://setuptools.pypa.io/en/latest/userguide/declarative_config.html>`_
 
+- ``output_logs``: (Default: true) If this is true then the virtual environment will output to logs.
 
 .. _module-pw_build-pw_python_pip_install:
 
diff --git a/pw_build/python_venv.gni b/pw_build/python_venv.gni
index 3f3abf4..6a5ae77 100644
--- a/pw_build/python_venv.gni
+++ b/pw_build/python_venv.gni
@@ -57,6 +57,8 @@
 #     this page for a setup.cfg example:
 #     https://setuptools.pypa.io/en/latest/userguide/declarative_config.html
 #
+#   output_logs: (Default: true) Commands will output logs.
+#
 template("pw_python_venv") {
   assert(defined(invoker.path), "pw_python_venv requires a 'path'")
 
@@ -77,6 +79,14 @@
                  "_generated_requirements_file",
                ])
   }
+  _output_logs = true
+  if (defined(invoker.output_logs)) {
+    _output_logs = invoker.output_logs
+  }
+  if (!defined(invoker.output_logs) ||
+      current_toolchain != pw_build_PYTHON_TOOLCHAIN) {
+    not_needed([ "_output_logs" ])
+  }
 
   _source_package_labels = []
   foreach(pkg, _source_packages) {
@@ -279,23 +289,25 @@
         _skip_installing_external_python_deps = true
         venv = get_label_info(":${invoker.target_name}", "label_no_toolchain")
 
-        _pip_install_log_file =
-            "$target_gen_dir/$target_name/pip_install_log.txt"
-
         _base_requirement_file = "$dir_pw_env_setup/py/pw_env_setup/virtualenv_setup/python_base_requirements.txt"
 
         args = [
-          "--log",
-          rebase_path(_pip_install_log_file, root_build_dir),
           "install",
           "--requirement",
           rebase_path(_base_requirement_file, root_build_dir),
         ]
+        if (_output_logs) {
+          _pip_install_log_file =
+              "$target_gen_dir/$target_name/pip_install_log.txt"
+          args += [
+            "--log",
+            rebase_path(_pip_install_log_file, root_build_dir),
+          ]
+          outputs = [ _pip_install_log_file ]
+        }
 
         # NOTE: Constraints should be ignored for this step.
 
-        outputs = [ _pip_install_log_file ]
-
         if (pw_build_PYTHON_PIP_INSTALL_OFFLINE) {
           args += [ "--no-index" ]
         }
@@ -325,19 +337,21 @@
         _skip_installing_external_python_deps = true
         venv = get_label_info(":${invoker.target_name}", "label_no_toolchain")
 
-        _pip_install_log_file =
-            "$target_gen_dir/$target_name/pip_install_log.txt"
-
-        args = [
-          "--log",
-          rebase_path(_pip_install_log_file, root_build_dir),
-        ]
-        args += pw_build_PYTHON_PIP_DEFAULT_OPTIONS
+        args = pw_build_PYTHON_PIP_DEFAULT_OPTIONS
         args += [
           "install",
           "--upgrade",
         ]
 
+        if (_output_logs) {
+          _pip_install_log_file =
+              "$target_gen_dir/$target_name/pip_install_log.txt"
+          args += [
+            "--log",
+            rebase_path(_pip_install_log_file, root_build_dir),
+          ]
+        }
+
         if (_pip_generate_hashes) {
           args += [ "--require-hashes" ]
         }
diff --git a/pw_build/test_info.gni b/pw_build/test_info.gni
index 783e070..9f3fc46 100644
--- a/pw_build/test_info.gni
+++ b/pw_build/test_info.gni
@@ -32,6 +32,7 @@
 #         "unit_test"
 #         "action_test"
 #         "perf_test"
+#         "fuzz_test"
 #   - test_name (optional): Name of the test as a string. Defaults to the
 #         target name.
 #   - build_label (optional): GN label for the test being described. Defaults to
@@ -86,6 +87,10 @@
       metadata = {
         perf_tests = [ _metadata ]
       }
+    } else if (_type == "fuzz_test") {
+      metadata = {
+        fuzz_tests = [ _metadata ]
+      }
     } else {
       assert(
           false,
diff --git a/pw_build_mcuxpresso/py/pw_build_mcuxpresso/bazel.py b/pw_build_mcuxpresso/py/pw_build_mcuxpresso/bazel.py
index bc8a845..3d6d3a9 100644
--- a/pw_build_mcuxpresso/py/pw_build_mcuxpresso/bazel.py
+++ b/pw_build_mcuxpresso/py/pw_build_mcuxpresso/bazel.py
@@ -72,17 +72,18 @@
     """
     print('cc_library(')
     _bazel_str_out('name', name, indent=1)
-    _bazel_str_list_out('defines', project.defines, indent=1)
-    _bazel_path_list_out(
-        'includes', project.include_dirs, path_prefix=path_prefix, indent=1
-    )
-    _bazel_path_list_out(
-        'hdrs', project.headers, path_prefix=path_prefix, indent=1
-    )
     _bazel_path_list_out(
         'srcs',
         project.sources + project.libs,
         path_prefix=path_prefix,
         indent=1,
     )
+    _bazel_path_list_out(
+        'hdrs', project.headers, path_prefix=path_prefix, indent=1
+    )
+    _bazel_str_list_out('defines', project.defines, indent=1)
+    _bazel_path_list_out(
+        'includes', project.include_dirs, path_prefix=path_prefix, indent=1
+    )
+
     print(')')
diff --git a/pw_checksum/docs.rst b/pw_checksum/docs.rst
index aa9a0a8..fcec3be 100644
--- a/pw_checksum/docs.rst
+++ b/pw_checksum/docs.rst
@@ -124,7 +124,7 @@
 
 Dependencies
 ============
-* ``pw_span``
+- :ref:`module-pw_span`
 
 .. _Module Configuration Options:
 
diff --git a/pw_chre/BUILD.bazel b/pw_chre/BUILD.bazel
new file mode 100644
index 0000000..e420014
--- /dev/null
+++ b/pw_chre/BUILD.bazel
@@ -0,0 +1,60 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+# TODO(b/298660977): Add bazel support for CHRE.
+filegroup(
+    name = "chre",
+    srcs = [
+        "chre.cc",
+        "chre_api_re.cc",
+        "chre_empty_host_link.cc",
+        "context.cc",
+        "docs.rst",
+        "example_init.cc",
+        "host_link.cc",
+        "include",
+        "include/chre/target_platform/atomic_base.h",
+        "include/chre/target_platform/atomic_base_impl.h",
+        "include/chre/target_platform/condition_variable_base.h",
+        "include/chre/target_platform/condition_variable_impl.h",
+        "include/chre/target_platform/fatal_error.h",
+        "include/chre/target_platform/host_link_base.h",
+        "include/chre/target_platform/log.h",
+        "include/chre/target_platform/mutex_base.h",
+        "include/chre/target_platform/mutex_base_impl.h",
+        "include/chre/target_platform/platform_nanoapp_base.h",
+        "include/chre/target_platform/platform_sensor_base.h",
+        "include/chre/target_platform/platform_sensor_manager_base.h",
+        "include/chre/target_platform/platform_sensor_type_helpers_base.h",
+        "include/chre/target_platform/power_control_manager_base.h",
+        "include/chre/target_platform/static_nanoapp_init.h",
+        "include/chre/target_platform/system_timer_base.h",
+        "memory.cc",
+        "memory_manager.cc",
+        "platform_debug_dump_manager.cc",
+        "platform_nanoapp.cc",
+        "platform_pal.cc",
+        "power_control_manager.cc",
+        "public",
+        "public/pw_chre/chre.h",
+        "public/pw_chre/host_link.h",
+        "static_nanoapps.cc",
+        "system_time.cc",
+        "system_timer.cc",
+    ],
+)
diff --git a/pw_chre/BUILD.gn b/pw_chre/BUILD.gn
new file mode 100644
index 0000000..aab25a9
--- /dev/null
+++ b/pw_chre/BUILD.gn
@@ -0,0 +1,135 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/module_config.gni")
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_third_party/chre/chre.gni")
+import("$dir_pw_unit_test/test.gni")
+
+config("disable_warnings") {
+  cflags = [
+    "-Wno-nested-anon-types",
+    "-Wno-gnu-anonymous-struct",
+    "-Wno-thread-safety-analysis",
+  ]
+  visibility = [ ":*" ]
+}
+
+config("public_overrides") {
+  include_dirs = [ "include" ]
+}
+
+pw_source_set("chre_empty_host_link") {
+  sources = [ "chre_empty_host_link.cc" ]
+  deps = [ ":chre" ]
+}
+
+pw_source_set("chre_backend") {
+  sources = [
+    "chre_api_re.cc",
+    "context.cc",
+    "host_link.cc",
+    "memory.cc",
+    "memory_manager.cc",
+    "platform_debug_dump_manager.cc",
+    "platform_nanoapp.cc",
+    "platform_pal.cc",
+    "power_control_manager.cc",
+    "system_time.cc",
+    "system_timer.cc",
+  ]
+  public = [
+    "include/chre/target_platform/atomic_base.h",
+    "include/chre/target_platform/atomic_base_impl.h",
+    "include/chre/target_platform/condition_variable_base.h",
+    "include/chre/target_platform/condition_variable_impl.h",
+    "include/chre/target_platform/fatal_error.h",
+    "include/chre/target_platform/host_link_base.h",
+    "include/chre/target_platform/log.h",
+    "include/chre/target_platform/mutex_base.h",
+    "include/chre/target_platform/mutex_base_impl.h",
+    "include/chre/target_platform/platform_nanoapp_base.h",
+    "include/chre/target_platform/platform_sensor_base.h",
+    "include/chre/target_platform/platform_sensor_manager_base.h",
+    "include/chre/target_platform/platform_sensor_type_helpers_base.h",
+    "include/chre/target_platform/power_control_manager_base.h",
+    "include/chre/target_platform/static_nanoapp_init.h",
+    "include/chre/target_platform/system_timer_base.h",
+  ]
+  deps = [
+    "$dir_pw_string:format",
+    "$dir_pw_third_party/chre:chre_headers",
+  ]
+  public_deps = [
+    "$dir_pw_chrono:system_timer",
+    "$dir_pw_log",
+    "$dir_pw_sync:mutex",
+    "$dir_pw_sync:timed_thread_notification",
+  ]
+
+  public_configs = [
+    ":public_include_path",
+    ":public_overrides",
+    ":disable_warnings",
+  ]
+  remove_configs = [ "$dir_pw_build:internal_strict_warnings" ]
+}
+
+config("public_include_path") {
+  include_dirs = [ "public" ]
+  visibility = [ ":*" ]
+}
+
+pw_source_set("chre") {
+  public_configs = [ ":public_include_path" ]
+  public = [
+    "public/pw_chre/chre.h",
+    "public/pw_chre/host_link.h",
+  ]
+  sources = [ "chre.cc" ]
+  deps = [ "$dir_pw_third_party/chre" ]
+}
+
+pw_executable("chre_example") {
+  sources = [
+    "example_init.cc",
+    "static_nanoapps.cc",
+  ]
+
+  deps = [
+    ":chre",
+    ":chre_empty_host_link",
+    "$dir_pw_system",
+    "$dir_pw_third_party/chre:example_apps",
+  ]
+}
+
+group("host_example") {
+  deps = [ ":chre_example($dir_pigweed/targets/host_device_simulator:host_device_simulator.speed_optimized)" ]
+}
+
+pw_test_group("tests") {
+  enable_if = dir_pw_third_party_chre != ""
+  tests = [
+    "//third_party/chre:unit_tests",
+    "//third_party/chre:integration_tests",
+  ]
+}
+
+pw_doc_group("docs") {
+  sources = [ "docs.rst" ]
+}
diff --git a/pw_chre/chre.cc b/pw_chre/chre.cc
new file mode 100644
index 0000000..7ed7ea1
--- /dev/null
+++ b/pw_chre/chre.cc
@@ -0,0 +1,64 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_chre/chre.h"
+
+#include "chre/core/event_loop.h"
+#include "chre/core/event_loop_manager.h"
+#include "chre/core/host_comms_manager.h"
+#include "chre/core/init.h"
+#include "chre/core/static_nanoapps.h"
+
+namespace pw::chre {
+
+void Init() {
+  ::chre::init();
+  ::chre::EventLoopManagerSingleton::get()->lateInit();
+  ::chre::loadStaticNanoapps();
+  ::chre::EventLoopManagerSingleton::get()->getEventLoop().run();
+}
+
+void Deinit() { ::chre::deinit(); }
+
+void RunEventLoop() {
+  ::chre::EventLoopManagerSingleton::get()->getEventLoop().run();
+}
+
+void StopEventLoop() {
+  ::chre::EventLoopManagerSingleton::get()->getEventLoop().stop();
+}
+
+void SendMessageToNanoapp(uint64_t nano_app_id,
+                          uint32_t message_type,
+                          uint16_t host_endpoint,
+                          const uint8_t* data,
+                          size_t len) {
+  ::chre::HostCommsManager& manager =
+      ::chre::EventLoopManagerSingleton::get()->getHostCommsManager();
+  manager.sendMessageToNanoappFromHost(
+      nano_app_id, message_type, host_endpoint, data, len);
+}
+
+void FreeMessageToAp(MessageToApContext context) {
+  auto& hostCommsManager =
+      ::chre::EventLoopManagerSingleton::get()->getHostCommsManager();
+  hostCommsManager.onMessageToHostComplete(
+      static_cast<const ::chre::MessageToHost*>(context));
+}
+
+void SetEstimatedHostTimeOffset(int64_t offset) {
+  ::chre::SystemTime::setEstimatedHostTimeOffset(offset);
+}
+
+}  // namespace pw::chre
diff --git a/pw_chre/chre_api_re.cc b/pw_chre/chre_api_re.cc
new file mode 100644
index 0000000..5e1ff84
--- /dev/null
+++ b/pw_chre/chre_api_re.cc
@@ -0,0 +1,48 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "chre/platform/log.h"
+#include "chre/util/macros.h"
+#include "chre_api/chre/re.h"
+#include "pw_log/log.h"
+#include "pw_string/format.h"
+
+namespace {
+int ToPigweedLogLevel(enum chreLogLevel level) {
+  switch (level) {
+    case CHRE_LOG_ERROR:
+      return PW_LOG_LEVEL_ERROR;
+    case CHRE_LOG_WARN:
+      return PW_LOG_LEVEL_WARN;
+    case CHRE_LOG_INFO:
+      return PW_LOG_LEVEL_INFO;
+    case CHRE_LOG_DEBUG:
+      return PW_LOG_LEVEL_DEBUG;
+  }
+}
+}  // namespace
+
+DLL_EXPORT void chreLog(enum chreLogLevel level,
+                        const char* format_string,
+                        ...) {
+  char log[512];
+
+  va_list args;
+  va_start(args, format_string);
+  pw::StatusWithSize status = pw::string::Format(log, format_string, args);
+  PW_ASSERT(status.ok());
+  va_end(args);
+
+  PW_LOG(ToPigweedLogLevel(level), "CHRE", PW_LOG_FLAGS, "%s", log);
+}
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h b/pw_chre/chre_empty_host_link.cc
similarity index 66%
copy from pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
copy to pw_chre/chre_empty_host_link.cc
index af31532..31a9676 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
+++ b/pw_chre/chre_empty_host_link.cc
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2023 The Pigweed Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
 // use this file except in compliance with the License. You may obtain a copy of
@@ -11,9 +11,15 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
-#pragma once
 
-// The HDLC address to which to write Base64-encoded tokenized logs.
-#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
-#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
-#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+#include "pw_chre/chre.h"
+#include "pw_chre/host_link.h"
+
+namespace pw::chre {
+
+bool SendMessageToAp(MessageToAp message) {
+  FreeMessageToAp(message.chre_context);
+  return true;
+}
+
+}  // namespace pw::chre
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h b/pw_chre/context.cc
similarity index 66%
copy from pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
copy to pw_chre/context.cc
index af31532..aeaf7b9 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
+++ b/pw_chre/context.cc
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2023 The Pigweed Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
 // use this file except in compliance with the License. You may obtain a copy of
@@ -11,9 +11,11 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
-#pragma once
 
-// The HDLC address to which to write Base64-encoded tokenized logs.
-#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
-#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
-#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+#include "chre/platform/context.h"
+
+namespace chre {
+
+bool inEventLoopThread() { return true; }
+
+}  // namespace chre
diff --git a/pw_chre/docs.rst b/pw_chre/docs.rst
new file mode 100644
index 0000000..0fe2af3
--- /dev/null
+++ b/pw_chre/docs.rst
@@ -0,0 +1,138 @@
+.. _module-pw_chre:
+
+=======
+pw_chre
+=======
+
+.. warning::
+
+  This module is extremely experimental. Parts of this module might be broken,
+  and the module does not provide a stable API.
+
+The `Context Hub Runtime Environment <https://source.android.com/docs/core/interaction/contexthub>`_
+(CHRE) is Android's platform for developing always-on applications called
+nanoapps. These nanoapps run on a vendor-specific processor which is more power
+efficient. Nanoapps use the CHRE API, which is standardized across platforms,
+allowing them to be code-compatible across devices.
+
+This module implements a Pigweed backend to CHRE. In order to use this module,
+``dir_pw_third_party_chre`` must point to the directory to the CHRE library.
+
+-----------
+Get started
+-----------
+
+To integrate ``pw_chre`` with your project:
+
+- Call the initialization functions and start the event loop.
+- Handle messages from the application processor and connect them through to
+  the CHRE runtime.
+- Implement the functions in ``pw_chre/host_link``. This is how CHRE sends
+  messages to the application processor.
+
+
+``$pw_chre:chre_example`` runs the CHRE environment using ``pw_system``.
+This also loads several static example nanoapps from the CHRE codebase by
+compiling them into the executable. This can be a helpful reference.
+
+CHRE is implemented using the following Pigweed modules for functionality:
+
+- ``pw_chrono:system_timer``: implements getting monotonic time
+- ``pw_log``: implements logging to the application processor
+- ``pw_assert``: implements crash handling
+- ``pw_sync``:  implements mutual exclusion primitives
+- ``malloc/free``: implements virtual memory allocation
+  (This may be eventually replaced with a pigweed module)
+
+----------------------
+Current implementation
+----------------------
+
+As mentioned at the top of this document, ``pw_chre`` is extremely experimental.
+Only a few parts of CHRE have been tested. There are likely to be bugs and
+unimplemented behavior. The lists below track the current state and will
+be updated as things change.
+
+Supported and tested behavior:
+
+- Loading static nanoapps.
+- The following sample nanoapps have been run:
+  - hello_world
+  - debug_dump_world
+  - timer_world
+  - unload_tester
+  - message_world
+  - spammer
+- Logging from a nanoapp.
+- Allocating memory (although it uses malloc/free).
+- Sending messages to/from the AP.
+
+Features not implemented, but likely to be implemented in the future:
+
+- Context Hub Qualification Test Suite (CHQTS).
+- Some simulated PALS for testing (based off of CHRE's linux simulated PALs).
+- Power Management APIs, e.g: waking the host AP and flushing messages.
+- Instructions around implementing a PAL.
+- Instructions around building and testing a nanoapp.
+- Dynamically loading nanoapps.
+
+Features that would be nice to have:
+
+- A plug-and-play implementation of AP <-> MCU flatbuffer message communication.
+- Pigweed defined facades for each PAL.
+- PAL implementations using Pigweed functionality (i.e: implementing bluetooth
+  via ``pw_bluetooth``).
+- Pigweed defined facades for core CHRE functionality, such as clock selection,
+  memory management, cache management.
+- A protobuf implementation of CHRE's flatbuffer API.
+- Cmake and Bazel build system integration.
+
+-------------
+API reference
+-------------
+.. doxygennamespace:: pw::chre
+   :members:
+
+-------------
+Porting Guide
+-------------
+The ``pw_chre`` module has completed the steps outlined for `creating a new CHRE platform`_ .
+
+.. _Creating a new CHRE platform: https://android.googlesource.com/platform/system/chre/+/refs/heads/main/doc/porting_guide.md#recommended-steps-for-porting-chre
+
+The ``pw_chre`` module still needs to be configured correctly on a new platform.
+You ``pw_chre`` user is responsible for:
+
+- Starting a thread for CHRE's event loop and calling the correct APIs.
+- Forwarding messages to/from the Application Processor (AP).
+
+-----------------------------
+Adding Optional Feature Areas
+-----------------------------
+However, ``pw_chre`` users will likely want to implement their own
+Platform Abstraction Layers (PALs). For more information see this
+`implementation guide <https://android.googlesource.com/platform/system/chre/+/refs/heads/main/doc/porting_guide.md#implementing-optional-feature-areas-e_g_pals>`_.
+
+.. list-table:: List of PALs
+   :widths: 1 1
+   :header-rows: 1
+
+   * - PAL Name
+     - Pigweed implementation available
+   * - Audio
+     - ❌
+   * - Bluetooth
+     - ❌
+   * - GNSS
+     - ❌
+   * - Sensor
+     - ❌
+   * - Wifi
+     - ❌
+   * - WWAN
+     - ❌
+
+
+For more information on a specific PAL see
+`the PAL headers <https://cs.android.com/android/platform/superproject/+/main:system/chre/pal/include/chre/pal/>`_
+or the `Linux reference PAL implementations <https://cs.android.com/android/platform/superproject/+/main:system/chre/platform/linux/>`_.
diff --git a/pw_chre/example_init.cc b/pw_chre/example_init.cc
new file mode 100644
index 0000000..5bd435c
--- /dev/null
+++ b/pw_chre/example_init.cc
@@ -0,0 +1,39 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#include "pw_chre/chre.h"
+#include "pw_system/target_hooks.h"
+#include "pw_thread/thread.h"
+
+namespace pw::system {
+namespace {
+
+pw::thread::Thread chre_thread;
+
+}  // namespace
+
+// This will run once after pw::system::Init() completes. This callback must
+// return or it will block the work queue.
+void UserAppInit() {
+  // Start the thread that is running CHRE.
+  chre_thread = pw::thread::Thread(
+      pw::system::LogThreadOptions(),
+      [](void*) {
+        pw::chre::Init();
+        pw::chre::RunEventLoop();
+        pw::chre::Deinit();
+      },
+      nullptr);
+}
+
+}  // namespace pw::system
diff --git a/pw_chre/host_link.cc b/pw_chre/host_link.cc
new file mode 100644
index 0000000..5a22d8e
--- /dev/null
+++ b/pw_chre/host_link.cc
@@ -0,0 +1,44 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "chre/platform/host_link.h"
+
+#include <algorithm>
+
+#include "chre/core/host_comms_manager.h"
+#include "public/pw_chre/host_link.h"
+#include "pw_chre/host_link.h"
+
+namespace chre {
+
+// TODO(b/294106526): Implement these, possibly by adding a facade.
+void HostLink::flushMessagesSentByNanoapp(uint64_t) {}
+
+bool HostLink::sendMessage(const MessageToHost* message) {
+  pw::chre::MessageToAp pw_message{
+      .nanoapp_id = message->appId,
+      .message_type = message->toHostData.messageType,
+      .app_permissions = message->toHostData.appPermissions,
+      .message_permissions = message->toHostData.messagePermissions,
+      .woke_host = message->toHostData.wokeHost,
+      .data = message->message.data(),
+      .length = message->message.size(),
+      .chre_context = message,
+  };
+  return pw::chre::SendMessageToAp(std::move(pw_message));
+}
+
+void HostLinkBase::sendNanConfiguration(bool) {}
+
+}  // namespace chre
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h b/pw_chre/include/chre/target_platform/atomic_base.h
similarity index 66%
copy from pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
copy to pw_chre/include/chre/target_platform/atomic_base.h
index af31532..3462671 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
+++ b/pw_chre/include/chre/target_platform/atomic_base.h
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2023 The Pigweed Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
 // use this file except in compliance with the License. You may obtain a copy of
@@ -11,9 +11,20 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+
 #pragma once
 
-// The HDLC address to which to write Base64-encoded tokenized logs.
-#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
-#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
-#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+#include <atomic>
+
+namespace chre {
+
+template <typename AtomicType>
+class AtomicBase {
+ protected:
+  std::atomic<AtomicType> atomic_;
+};
+
+typedef AtomicBase<bool> AtomicBoolBase;
+typedef AtomicBase<uint32_t> AtomicUint32Base;
+
+}  // namespace chre
diff --git a/pw_chre/include/chre/target_platform/atomic_base_impl.h b/pw_chre/include/chre/target_platform/atomic_base_impl.h
new file mode 100644
index 0000000..3979a52
--- /dev/null
+++ b/pw_chre/include/chre/target_platform/atomic_base_impl.h
@@ -0,0 +1,63 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#pragma once
+
+#include "chre/platform/atomic.h"
+
+namespace chre {
+
+inline AtomicBool::AtomicBool(bool starting_value) {
+  std::atomic_init(&atomic_, starting_value);
+}
+
+inline bool AtomicBool::operator=(bool desired) { return atomic_ = desired; }
+
+inline bool AtomicBool::load() const { return atomic_.load(); }
+
+inline void AtomicBool::store(bool desired) { atomic_.store(desired); }
+
+inline bool AtomicBool::exchange(bool desired) {
+  return atomic_.exchange(desired);
+}
+
+inline AtomicUint32::AtomicUint32(uint32_t startingValue) {
+  std::atomic_init(&atomic_, startingValue);
+}
+
+inline uint32_t AtomicUint32::operator=(uint32_t desired) {
+  return atomic_ = desired;
+}
+
+inline uint32_t AtomicUint32::load() const { return atomic_.load(); }
+
+inline void AtomicUint32::store(uint32_t desired) { atomic_.store(desired); }
+
+inline uint32_t AtomicUint32::exchange(uint32_t desired) {
+  return atomic_.exchange(desired);
+}
+
+inline uint32_t AtomicUint32::fetch_add(uint32_t arg) {
+  return atomic_.fetch_add(arg);
+}
+
+inline uint32_t AtomicUint32::fetch_increment() { return atomic_.fetch_add(1); }
+
+inline uint32_t AtomicUint32::fetch_sub(uint32_t arg) {
+  return atomic_.fetch_sub(arg);
+}
+
+inline uint32_t AtomicUint32::fetch_decrement() { return atomic_.fetch_sub(1); }
+
+}  // namespace chre
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h b/pw_chre/include/chre/target_platform/condition_variable_base.h
similarity index 68%
copy from pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
copy to pw_chre/include/chre/target_platform/condition_variable_base.h
index af31532..f40ab30 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
+++ b/pw_chre/include/chre/target_platform/condition_variable_base.h
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2023 The Pigweed Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
 // use this file except in compliance with the License. You may obtain a copy of
@@ -11,9 +11,16 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+
 #pragma once
 
-// The HDLC address to which to write Base64-encoded tokenized logs.
-#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
-#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
-#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+#include "pw_sync/timed_thread_notification.h"
+
+namespace chre {
+
+class ConditionVariableBase {
+ protected:
+  pw::sync::TimedThreadNotification notification_;
+};
+
+}  // namespace chre
diff --git a/pw_chre/include/chre/target_platform/condition_variable_impl.h b/pw_chre/include/chre/target_platform/condition_variable_impl.h
new file mode 100644
index 0000000..2d826ec
--- /dev/null
+++ b/pw_chre/include/chre/target_platform/condition_variable_impl.h
@@ -0,0 +1,42 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#pragma once
+#include "chre/platform/condition_variable.h"
+#include "pw_chrono/system_clock.h"
+
+namespace chre {
+
+inline ConditionVariable::ConditionVariable() {}
+
+inline ConditionVariable::~ConditionVariable() {}
+
+inline void ConditionVariable::notify_one() { notification_.release(); }
+
+inline void ConditionVariable::wait(Mutex& mutex) {
+  mutex.unlock();
+  notification_.acquire();
+  mutex.lock();
+}
+
+inline bool ConditionVariable::wait_for(Mutex& mutex, Nanoseconds timeout) {
+  mutex.unlock();
+  auto pw_timeout = pw::chrono::SystemClock::for_at_least(
+      std::chrono::nanoseconds(timeout.toRawNanoseconds()));
+  bool did_acquire = notification_.try_acquire_for(pw_timeout);
+  mutex.lock();
+  return did_acquire;
+}
+
+}  // namespace chre
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h b/pw_chre/include/chre/target_platform/fatal_error.h
similarity index 68%
copy from pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
copy to pw_chre/include/chre/target_platform/fatal_error.h
index af31532..5b236f6 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
+++ b/pw_chre/include/chre/target_platform/fatal_error.h
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2023 The Pigweed Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
 // use this file except in compliance with the License. You may obtain a copy of
@@ -11,9 +11,11 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
-#pragma once
 
-// The HDLC address to which to write Base64-encoded tokenized logs.
-#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
-#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
-#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+#pragma once
+#include "pw_assert/assert.h"
+
+#define FATAL_ERROR_QUIT() \
+  do {                     \
+    PW_ASSERT(false);      \
+  } while (0)
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h b/pw_chre/include/chre/target_platform/host_link_base.h
similarity index 67%
copy from pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
copy to pw_chre/include/chre/target_platform/host_link_base.h
index af31532..1062161 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
+++ b/pw_chre/include/chre/target_platform/host_link_base.h
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2023 The Pigweed Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
 // use this file except in compliance with the License. You may obtain a copy of
@@ -11,9 +11,15 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
-#pragma once
 
-// The HDLC address to which to write Base64-encoded tokenized logs.
-#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
-#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
-#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+#pragma once
+namespace chre {
+
+// TODO(b/294106526): This class will likely need a facade since this will be
+// implemented in downstream products.
+class HostLinkBase {
+ public:
+  void sendNanConfiguration(bool enable);
+};
+
+}  // namespace chre
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h b/pw_chre/include/chre/target_platform/log.h
similarity index 68%
copy from pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
copy to pw_chre/include/chre/target_platform/log.h
index af31532..58aad29 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
+++ b/pw_chre/include/chre/target_platform/log.h
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2023 The Pigweed Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
 // use this file except in compliance with the License. You may obtain a copy of
@@ -11,9 +11,14 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+
 #pragma once
 
-// The HDLC address to which to write Base64-encoded tokenized logs.
-#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
-#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
-#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+#include <cstddef>
+
+#include "pw_log/log.h"
+
+#define LOGW(...) PW_LOG_WARN(__VA_ARGS__)
+#define LOGE(...) PW_LOG_ERROR(__VA_ARGS__)
+#define LOGI(...) PW_LOG_INFO(__VA_ARGS__)
+#define LOGD(...) PW_LOG_DEBUG(__VA_ARGS__)
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h b/pw_chre/include/chre/target_platform/mutex_base.h
similarity index 68%
copy from pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
copy to pw_chre/include/chre/target_platform/mutex_base.h
index af31532..b5e42b0 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
+++ b/pw_chre/include/chre/target_platform/mutex_base.h
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2023 The Pigweed Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
 // use this file except in compliance with the License. You may obtain a copy of
@@ -11,9 +11,10 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
-#pragma once
 
-// The HDLC address to which to write Base64-encoded tokenized logs.
-#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
-#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
-#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+#pragma once
+#include "pw_sync/mutex.h"
+
+struct MutexBase {
+  pw::sync::Mutex mutex_;
+};
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h b/pw_chre/include/chre/target_platform/mutex_base_impl.h
similarity index 63%
copy from pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
copy to pw_chre/include/chre/target_platform/mutex_base_impl.h
index af31532..2158138 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
+++ b/pw_chre/include/chre/target_platform/mutex_base_impl.h
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2023 The Pigweed Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
 // use this file except in compliance with the License. You may obtain a copy of
@@ -11,9 +11,21 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+
 #pragma once
 
-// The HDLC address to which to write Base64-encoded tokenized logs.
-#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
-#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
-#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+#include "chre/platform/mutex.h"
+
+namespace chre {
+
+inline Mutex::Mutex() {}
+
+inline Mutex::~Mutex() {}
+
+inline void Mutex::lock() { mutex_.lock(); }
+
+inline bool Mutex::try_lock() { return mutex_.try_lock(); }
+
+inline void Mutex::unlock() { mutex_.unlock(); }
+
+}  // namespace chre
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h b/pw_chre/include/chre/target_platform/platform_nanoapp_base.h
similarity index 64%
copy from pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
copy to pw_chre/include/chre/target_platform/platform_nanoapp_base.h
index af31532..6f12d1e 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
+++ b/pw_chre/include/chre/target_platform/platform_nanoapp_base.h
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2023 The Pigweed Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
 // use this file except in compliance with the License. You may obtain a copy of
@@ -11,9 +11,19 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+
 #pragma once
 
-// The HDLC address to which to write Base64-encoded tokenized logs.
-#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
-#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
-#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+#include "chre/platform/shared/nanoapp_support_lib_dso.h"
+
+namespace chre {
+
+class PlatformNanoappBase {
+ public:
+  void loadStatic(const struct chreNslNanoappInfo* app_info);
+
+ protected:
+  const struct chreNslNanoappInfo* app_info_ = nullptr;
+};
+
+}  // namespace chre
diff --git a/pw_chre/include/chre/target_platform/platform_sensor_base.h b/pw_chre/include/chre/target_platform/platform_sensor_base.h
new file mode 100644
index 0000000..d87095d
--- /dev/null
+++ b/pw_chre/include/chre/target_platform/platform_sensor_base.h
@@ -0,0 +1,43 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#pragma once
+#include "chre_api/chre/sensor.h"
+
+namespace chre {
+
+class PlatformSensorBase {
+ public:
+  void initBase(const struct chreSensorInfo* sensor_info,
+                uint32_t sensor_handle) {
+    sensor_info_ = sensor_info;
+    sensor_handle_ = sensor_handle;
+  }
+
+  void setSensorInfo(const struct chreSensorInfo* sensor_info) {
+    sensor_info_ = sensor_info;
+  }
+
+  void setSensorHandle(uint32_t sensor_handle) {
+    sensor_handle_ = sensor_handle;
+  }
+
+  uint32_t getSensorHandle() const { return sensor_handle_; }
+
+ protected:
+  const struct chreSensorInfo* sensor_info_;
+  uint32_t sensor_handle_;
+};
+
+}  // namespace chre
diff --git a/pw_chre/include/chre/target_platform/platform_sensor_manager_base.h b/pw_chre/include/chre/target_platform/platform_sensor_manager_base.h
new file mode 100644
index 0000000..1c425bd
--- /dev/null
+++ b/pw_chre/include/chre/target_platform/platform_sensor_manager_base.h
@@ -0,0 +1,38 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#pragma once
+#include <cstdint>
+
+#include "chre/pal/sensor.h"
+
+namespace chre {
+
+class PlatformSensorManagerBase {
+ public:
+  // Note: these are load bearing names
+  static const chrePalSensorCallbacks sSensorCallbacks;
+  const chrePalSensorApi* mSensorApi;
+
+ private:
+  static void samplingStatusUpdateCallback(
+      uint32_t sensor_handle, struct chreSensorSamplingStatus* status);
+  static void dataEventCallback(uint32_t sensor_handle, void* data);
+  static void biasEventCallback(uint32_t sensor_handle, void* bias_data);
+  static void flushCompleteCallback(uint32_t sensor_handle,
+                                    uint32_t flush_request_id,
+                                    uint8_t error_code);
+};
+
+}  // namespace chre
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h b/pw_chre/include/chre/target_platform/platform_sensor_type_helpers_base.h
similarity index 68%
copy from pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
copy to pw_chre/include/chre/target_platform/platform_sensor_type_helpers_base.h
index af31532..072f886 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
+++ b/pw_chre/include/chre/target_platform/platform_sensor_type_helpers_base.h
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2023 The Pigweed Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
 // use this file except in compliance with the License. You may obtain a copy of
@@ -11,9 +11,10 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
-#pragma once
 
-// The HDLC address to which to write Base64-encoded tokenized logs.
-#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
-#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
-#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+#pragma once
+namespace chre {
+
+class PlatformSensorTypeHelpersBase {};
+
+}  // namespace chre
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h b/pw_chre/include/chre/target_platform/power_control_manager_base.h
similarity index 68%
copy from pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
copy to pw_chre/include/chre/target_platform/power_control_manager_base.h
index af31532..c1d00d3 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
+++ b/pw_chre/include/chre/target_platform/power_control_manager_base.h
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2023 The Pigweed Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
 // use this file except in compliance with the License. You may obtain a copy of
@@ -11,9 +11,11 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+
 #pragma once
 
-// The HDLC address to which to write Base64-encoded tokenized logs.
-#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
-#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
-#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+namespace chre {
+
+class PowerControlManagerBase {};
+
+}  // namespace chre
diff --git a/pw_chre/include/chre/target_platform/static_nanoapp_init.h b/pw_chre/include/chre/target_platform/static_nanoapp_init.h
new file mode 100644
index 0000000..6f2c091
--- /dev/null
+++ b/pw_chre/include/chre/target_platform/static_nanoapp_init.h
@@ -0,0 +1,50 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#pragma once
+
+#include "chre/core/static_nanoapps.h"
+#include "chre/platform/fatal_error.h"
+#include "chre/platform/shared/nanoapp_support_lib_dso.h"
+
+#define CHRE_STATIC_NANOAPP_INIT(appName, appId_, appVersion_, appPerms)     \
+  namespace chre {                                                           \
+                                                                             \
+  UniquePtr<Nanoapp> initializeStaticNanoapp##appName() {                    \
+    UniquePtr<Nanoapp> nanoapp = MakeUnique<Nanoapp>();                      \
+    static struct chreNslNanoappInfo appInfo;                                \
+    appInfo.magic = CHRE_NSL_NANOAPP_INFO_MAGIC;                             \
+    appInfo.structMinorVersion = CHRE_NSL_NANOAPP_INFO_STRUCT_MINOR_VERSION; \
+    appInfo.targetApiVersion = CHRE_API_VERSION;                             \
+    appInfo.vendor = "Google";                                               \
+    appInfo.name = #appName;                                                 \
+    appInfo.isSystemNanoapp = true;                                          \
+    appInfo.isTcmNanoapp = false;                                            \
+    appInfo.appId = appId_;                                                  \
+    appInfo.appVersion = appVersion_;                                        \
+    appInfo.entryPoints.start = nanoappStart;                                \
+    appInfo.entryPoints.handleEvent = nanoappHandleEvent;                    \
+    appInfo.entryPoints.end = nanoappEnd;                                    \
+    appInfo.appVersionString = "<undefined>";                                \
+    appInfo.appPermissions = appPerms;                                       \
+    if (nanoapp.isNull()) {                                                  \
+      FATAL_ERROR("Failed to allocate nanoapp " #appName);                   \
+    } else {                                                                 \
+      nanoapp->loadStatic(&appInfo);                                         \
+    }                                                                        \
+                                                                             \
+    return nanoapp;                                                          \
+  }                                                                          \
+                                                                             \
+  }  // namespace chre
diff --git a/pw_chre/include/chre/target_platform/system_timer_base.h b/pw_chre/include/chre/target_platform/system_timer_base.h
new file mode 100644
index 0000000..e8cb288
--- /dev/null
+++ b/pw_chre/include/chre/target_platform/system_timer_base.h
@@ -0,0 +1,38 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#pragma once
+
+#include <cinttypes>
+
+#include "pw_chrono/system_timer.h"
+
+namespace chre {
+
+class SystemTimerBase {
+ public:
+  SystemTimerBase()
+      : timer_([this](pw::chrono::SystemClock::time_point) {
+          this->OnExpired();
+        }) {}
+
+ protected:
+  void OnExpired();
+
+  bool is_active_ = false;
+  bool initialized_ = false;
+  pw::chrono::SystemTimer timer_;
+};
+
+}  // namespace chre
diff --git a/pw_chre/memory.cc b/pw_chre/memory.cc
new file mode 100644
index 0000000..e5241c9
--- /dev/null
+++ b/pw_chre/memory.cc
@@ -0,0 +1,31 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "chre/platform/memory.h"
+
+#include <cstdlib>
+
+namespace chre {
+
+// TODO(b/294106526): Today these APIs call the system malloc and free, but they
+// should be updated to use pw_allocator.
+void* memoryAlloc(size_t size) { return malloc(size); }
+
+void* palSystemApiMemoryAlloc(size_t size) { return malloc(size); }
+
+void memoryFree(void* pointer) { free(pointer); }
+
+void palSystemApiMemoryFree(void* pointer) { free(pointer); }
+
+}  // namespace chre
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h b/pw_chre/memory_manager.cc
similarity index 63%
copy from pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
copy to pw_chre/memory_manager.cc
index af31532..0d3786c 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
+++ b/pw_chre/memory_manager.cc
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2023 The Pigweed Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
 // use this file except in compliance with the License. You may obtain a copy of
@@ -11,9 +11,17 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
-#pragma once
 
-// The HDLC address to which to write Base64-encoded tokenized logs.
-#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
-#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
-#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+#include "chre/platform/memory_manager.h"
+
+#include "chre/util/memory.h"
+
+namespace chre {
+
+void* MemoryManager::doAlloc(Nanoapp*, uint32_t size) {
+  return chre::memoryAlloc(size);
+}
+
+void MemoryManager::doFree(Nanoapp*, void* ptr) { chre::memoryFree(ptr); }
+
+}  // namespace chre
diff --git a/pw_chre/platform_debug_dump_manager.cc b/pw_chre/platform_debug_dump_manager.cc
new file mode 100644
index 0000000..c7aa93c
--- /dev/null
+++ b/pw_chre/platform_debug_dump_manager.cc
@@ -0,0 +1,27 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#include "chre/platform/platform_debug_dump_manager.h"
+
+namespace chre {
+
+PlatformDebugDumpManagerBase::PlatformDebugDumpManagerBase() {}
+
+PlatformDebugDumpManagerBase::~PlatformDebugDumpManagerBase() {}
+
+// TODO(b/294106526): Implement these.
+void PlatformDebugDumpManager::sendDebugDump(const char*, bool) {}
+
+void PlatformDebugDumpManager::logStateToBuffer(DebugDumpWrapper&) {}
+
+}  // namespace chre
diff --git a/pw_chre/platform_nanoapp.cc b/pw_chre/platform_nanoapp.cc
new file mode 100644
index 0000000..5c90769
--- /dev/null
+++ b/pw_chre/platform_nanoapp.cc
@@ -0,0 +1,77 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#include "chre/platform/platform_nanoapp.h"
+
+#include "chre/util/system/napp_permissions.h"
+#include "chre_api/chre/version.h"
+
+namespace chre {
+
+PlatformNanoapp::~PlatformNanoapp() {}
+
+bool PlatformNanoapp::start() { return app_info_->entryPoints.start(); }
+
+void PlatformNanoapp::handleEvent(uint32_t SenderInstanceId,
+                                  uint16_t eventType,
+                                  const void* eventData) {
+  app_info_->entryPoints.handleEvent(SenderInstanceId, eventType, eventData);
+}
+
+void PlatformNanoapp::end() { app_info_->entryPoints.end(); }
+
+uint64_t PlatformNanoapp::getAppId() const {
+  return (app_info_ == nullptr) ? 0 : app_info_->appId;
+}
+
+uint32_t PlatformNanoapp::getAppVersion() const {
+  return app_info_->appVersion;
+}
+
+uint32_t PlatformNanoapp::getTargetApiVersion() const {
+  return CHRE_API_VERSION;
+}
+
+const char* PlatformNanoapp::getAppName() const {
+  return (app_info_ != nullptr) ? app_info_->name : "Unknown";
+}
+
+bool PlatformNanoapp::supportsAppPermissions() const {
+  return (app_info_ != nullptr) ? (app_info_->structMinorVersion >=
+                                   CHRE_NSL_NANOAPP_INFO_STRUCT_MINOR_VERSION)
+                                : false;
+}
+
+uint32_t PlatformNanoapp::getAppPermissions() const {
+  return (supportsAppPermissions())
+             ? app_info_->appPermissions
+             : static_cast<uint32_t>(chre::NanoappPermissions::CHRE_PERMS_NONE);
+}
+
+bool PlatformNanoapp::isSystemNanoapp() const {
+  return (app_info_ != nullptr && app_info_->isSystemNanoapp);
+}
+
+void PlatformNanoapp::logStateToBuffer(DebugDumpWrapper& debugDump) const {
+  if (!app_info_) {
+    return;
+  }
+  debugDump.print("%s: %s", app_info_->name, app_info_->vendor);
+}
+
+void PlatformNanoappBase::loadStatic(
+    const struct chreNslNanoappInfo* app_info) {
+  app_info_ = app_info;
+}
+
+}  // namespace chre
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h b/pw_chre/platform_pal.cc
similarity index 66%
copy from pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
copy to pw_chre/platform_pal.cc
index af31532..04fffef 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
+++ b/pw_chre/platform_pal.cc
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2023 The Pigweed Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
 // use this file except in compliance with the License. You may obtain a copy of
@@ -11,9 +11,12 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
-#pragma once
 
-// The HDLC address to which to write Base64-encoded tokenized logs.
-#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
-#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
-#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+#include "chre/platform/shared/platform_pal.h"
+
+namespace chre {
+
+// TODO(b/294106526): Implement this.
+void PlatformPal::prePalApiCall(PalType) const {}
+
+}  // namespace chre
diff --git a/pw_chre/power_control_manager.cc b/pw_chre/power_control_manager.cc
new file mode 100644
index 0000000..b2d229e
--- /dev/null
+++ b/pw_chre/power_control_manager.cc
@@ -0,0 +1,25 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "chre/platform/power_control_manager.h"
+
+namespace chre {
+
+// TODO(b/294106526): Implement these, possibly by adding a facade.
+void PowerControlManager::preEventLoopProcess(size_t) {}
+void PowerControlManager::postEventLoopProcess(size_t) {}
+
+bool PowerControlManager::hostIsAwake() { return true; }
+
+}  // namespace chre
diff --git a/pw_chre/public/pw_chre/chre.h b/pw_chre/public/pw_chre/chre.h
new file mode 100644
index 0000000..69f6f81
--- /dev/null
+++ b/pw_chre/public/pw_chre/chre.h
@@ -0,0 +1,71 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#pragma once
+
+#include <cstddef>
+#include <cstdint>
+
+#include "pw_chre/host_link.h"
+
+namespace pw::chre {
+
+/// A message to be sent to a CHRE nanoapp.
+/// This message originated from the Application Processor (AP).
+struct NanoappMessage {
+  /// The id of the nanoapp this message is sent to.
+  uint64_t nano_app_id;
+  /// The type of message this is.
+  uint32_t message_type;
+  /// The id of the host on the AP that sent this request.
+  uint16_t host_endpoint;
+  /// The actual message data.
+  const uint8_t* data;
+  /// The size in bytes of the message data.
+  size_t length;
+};
+
+/// Initialize the CHRE environment and load any static nanoapps that exist.
+/// This must be called before the event loop has been started.
+void Init();
+
+/// Teardown the CHRE environment.
+/// This must be called after Init and after the event loop has been stopped.
+void Deinit();
+
+/// Run the CHRE event loop.
+/// This function will not return until `StopEventLoop` is called.
+void RunEventLoop();
+
+/// Stop the CHRE event loop.
+/// This can be called from any thread.
+void StopEventLoop();
+
+/// Send a message to a nano app.
+/// This can be called from any thread.
+/// @param[in] message The message being send to the nano app.
+void SendMessageToNanoapp(NanoappMessage message);
+
+/// Free a message that CHRE created to send to the AP (via `SendMessageToAp`).
+/// This function must be called after the message is finishd being used.
+/// After this function is called, the message data must not be accessed.
+/// This can be called from any thread.
+/// @param[in] context The message being freed.
+void FreeMessageToAp(MessageToApContext context);
+
+/// Set the estimated offset between the AP time and CHRE's time.
+/// @param[in] offset The offset time in nanoseconds.
+void SetEstimatedHostTimeOffset(int64_t offset);
+
+}  // namespace pw::chre
diff --git a/pw_chre/public/pw_chre/host_link.h b/pw_chre/public/pw_chre/host_link.h
new file mode 100644
index 0000000..ca4e2cc
--- /dev/null
+++ b/pw_chre/public/pw_chre/host_link.h
@@ -0,0 +1,67 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#pragma once
+
+#include <cstddef>
+#include <cstdint>
+
+// These host link functions should be implemented by the system integrator.
+namespace pw::chre {
+
+/// This is a token representing a message that CHRE allocated.
+/// It must be passed to `FreeMessageToAp` when the message is finished.
+typedef const void* MessageToApContext;
+
+/// This is a message that should be sent to the AP.
+/// It was allocated by CHRE, so pw::chre::FreeMessageToAp should be called
+/// in order to free it.
+struct MessageToAp {
+  /// The id of the nanoapp sending the message.
+  uint64_t nanoapp_id;
+
+  /// The type of the message.
+  uint32_t message_type;
+
+  uint32_t app_permissions;
+  uint32_t message_permissions;
+
+  /// The id of the client that this message should be delivered to on the host.
+  uint16_t host_endpoint;
+
+  /// Whether CHRE is responsible for waking the AP.
+  /// If this is true, then the client must wake the AP in
+  /// `SendMessageToAp` before sending this message.
+  bool woke_host;
+
+  /// The underlying data of the message. This is owned by `chre_context` and
+  /// should not be accessed after the message has been freed.
+  const uint8_t* data;
+
+  /// The length of `data` in bytes.
+  size_t length;
+
+  /// The context of the message, used to free the message when the client is
+  /// finished sending it.
+  MessageToApContext chre_context;
+};
+
+/// CHRE calls this method to send a message to the Application Processor (AP).
+/// The client must implement this method, and the client is responsible for
+/// calling `FreeMessageToAp` once they are finished with the message.
+/// @param[in] message The message to be sent.
+/// @param[out] bool Whether this method was successful.
+bool SendMessageToAp(MessageToAp message);
+
+}  // namespace pw::chre
diff --git a/pw_chre/static_nanoapps.cc b/pw_chre/static_nanoapps.cc
new file mode 100644
index 0000000..8612b75
--- /dev/null
+++ b/pw_chre/static_nanoapps.cc
@@ -0,0 +1,34 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#include "chre/core/static_nanoapps.h"
+
+#include "chre/apps/apps.h"
+#include "chre/util/macros.h"
+
+namespace chre {
+
+const StaticNanoappInitFunction kStaticNanoappList[] = {
+    initializeStaticNanoappHelloWorld,
+    initializeStaticNanoappMessageWorld,
+#if defined(INCLUDE_SENSOR_APP)
+    initializeStaticNanoappSensorWorld,
+#endif
+    initializeStaticNanoappSpammer,
+    initializeStaticNanoappTimerWorld,
+    initializeStaticNanoappUnloadTester,
+};
+
+const size_t kStaticNanoappCount = ARRAY_SIZE(kStaticNanoappList);
+
+}  // namespace chre
diff --git a/pw_chre/system_time.cc b/pw_chre/system_time.cc
new file mode 100644
index 0000000..9a9df9a
--- /dev/null
+++ b/pw_chre/system_time.cc
@@ -0,0 +1,45 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#include "chre/platform/system_time.h"
+
+#include "chre/platform/assert.h"
+#include "chre/platform/log.h"
+#include "pw_chrono/system_clock.h"
+
+namespace chre {
+
+namespace {
+
+int64_t estimated_host_time_offset = 0;
+
+}
+
+Nanoseconds SystemTime::getMonotonicTime() {
+  const pw::chrono::SystemClock::time_point now =
+      pw::chrono::SystemClock::now();
+  auto nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>(
+                   now.time_since_epoch())
+                   .count();
+  return Nanoseconds(static_cast<uint64_t>(nsecs));
+}
+
+int64_t SystemTime::getEstimatedHostTimeOffset() {
+  return estimated_host_time_offset;
+}
+
+void SystemTime::setEstimatedHostTimeOffset(int64_t offset) {
+  estimated_host_time_offset = offset;
+}
+
+}  // namespace chre
diff --git a/pw_chre/system_timer.cc b/pw_chre/system_timer.cc
new file mode 100644
index 0000000..b178815
--- /dev/null
+++ b/pw_chre/system_timer.cc
@@ -0,0 +1,67 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#include "chre/platform/system_timer.h"
+
+#include "chre/platform/log.h"
+#include "chre/util/time.h"
+
+namespace chre {
+
+void SystemTimerBase::OnExpired() {
+  SystemTimer* timer = static_cast<SystemTimer*>(this);
+  timer->mCallback(timer->mData);
+}
+
+SystemTimer::SystemTimer() {}
+
+SystemTimer::~SystemTimer() {
+  if (!initialized_) {
+    return;
+  }
+  cancel();
+  initialized_ = false;
+}
+
+bool SystemTimer::init() {
+  initialized_ = true;
+  return initialized_;
+}
+
+bool SystemTimer::set(SystemTimerCallback* callback,
+                      void* data,
+                      Nanoseconds delay) {
+  if (!initialized_) {
+    return false;
+  }
+  mCallback = callback;
+  mData = data;
+  pw::chrono::SystemClock::duration interval =
+      std::chrono::nanoseconds(delay.toRawNanoseconds());
+  const pw::chrono::SystemClock::time_point now =
+      pw::chrono::SystemClock::now();
+  timer_.InvokeAt(now + interval);
+  return true;
+}
+
+bool SystemTimer::cancel() {
+  if (!initialized_) {
+    return false;
+  }
+  timer_.Cancel();
+  return true;
+}
+
+bool SystemTimer::isActive() { return is_active_; }
+
+}  // namespace chre
diff --git a/pw_chrono/BUILD.bazel b/pw_chrono/BUILD.bazel
index 2329241..9479e65 100644
--- a/pw_chrono/BUILD.bazel
+++ b/pw_chrono/BUILD.bazel
@@ -12,6 +12,7 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
 load(
     "//pw_build:pigweed.bzl",
     "pw_cc_facade",
@@ -19,7 +20,6 @@
     "pw_cc_test",
 )
 load("//pw_protobuf_compiler:pw_proto_library.bzl", "pw_proto_library")
-load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
 
 package(default_visibility = ["//visibility:public"])
 
diff --git a/pw_chrono_zephyr/docs.rst b/pw_chrono_zephyr/docs.rst
index 06760fd..9d3cafd 100644
--- a/pw_chrono_zephyr/docs.rst
+++ b/pw_chrono_zephyr/docs.rst
@@ -13,4 +13,4 @@
 pw_chrono.system_clock
 ----------------------
 To enable the ``system_clock`` facade, it is also required to add
-``CONFIG_PIGWEED_CHRONO_SYSTEM_CLOCK=y``.
\ No newline at end of file
+``CONFIG_PIGWEED_CHRONO_SYSTEM_CLOCK=y``.
diff --git a/pw_cli/docs.rst b/pw_cli/docs.rst
index 4ab1715..c4aa8ac 100644
--- a/pw_cli/docs.rst
+++ b/pw_cli/docs.rst
@@ -61,44 +61,44 @@
 
 .. code-block::
 
-  {
-    "pw": {
-      "pw_cli": {
-        "plugins": {
-          "<plugin name>": {
-            "module": "<module containing plugin>",
-            "function": "<entry point for plugin>"
-          },
-          ...
-        }
-      }
-    }
-  }
+   {
+     "pw": {
+       "pw_cli": {
+         "plugins": {
+           "<plugin name>": {
+             "module": "<module containing plugin>",
+             "function": "<entry point for plugin>"
+           },
+           ...
+         }
+       }
+     }
+   }
 
 The following example registers three commands:
 
 .. code-block::
 
-  {
-    "pw": {
-      "pw_cli": {
-        "plugins": {
-          "presubmit": {
-            "module": "my_cool_project.tools",
-            "function": "run_presubmit"
-          },
-          "test": {
-            "module": "my_cool_project.testing",
-            "function": "run_test"
-          },
-          "flash": {
-            "module": "my_cool_project.flash",
-            "function": "main"
-          }
-        }
-      }
-    }
-  }
+   {
+     "pw": {
+       "pw_cli": {
+         "plugins": {
+           "presubmit": {
+             "module": "my_cool_project.tools",
+             "function": "run_presubmit"
+           },
+           "test": {
+             "module": "my_cool_project.testing",
+             "function": "run_test"
+           },
+           "flash": {
+             "module": "my_cool_project.flash",
+             "function": "main"
+           }
+         }
+       }
+     }
+   }
 
 Defining a plugin function
 --------------------------
@@ -293,50 +293,50 @@
 
 Plugins may be registered in a few different ways.
 
- * **Direct function call.** Register plugins by calling
-   :py:meth:`pw_cli.plugins.Registry.register` or
-   :py:meth:`pw_cli.plugins.Registry.register_by_name`.
+* **Direct function call.** Register plugins by calling
+  :py:meth:`pw_cli.plugins.Registry.register` or
+  :py:meth:`pw_cli.plugins.Registry.register_by_name`.
 
-   .. code-block:: python
+  .. code-block:: python
 
-     registry = pw_cli.plugins.Registry()
+    registry = pw_cli.plugins.Registry()
 
-     registry.register('plugin_name', my_plugin)
-     registry.register_by_name('plugin_name', 'module_name', 'function_name')
+    registry.register('plugin_name', my_plugin)
+    registry.register_by_name('plugin_name', 'module_name', 'function_name')
 
- * **Decorator.** Register using the :py:meth:`pw_cli.plugins.Registry.plugin`
-   decorator.
+* **Decorator.** Register using the :py:meth:`pw_cli.plugins.Registry.plugin`
+  decorator.
 
-   .. code-block:: python
+  .. code-block:: python
 
-     _REGISTRY = pw_cli.plugins.Registry()
+    _REGISTRY = pw_cli.plugins.Registry()
 
-     # This function is registered as the "my_plugin" plugin.
-     @_REGISTRY.plugin
-     def my_plugin():
-         pass
+    # This function is registered as the "my_plugin" plugin.
+    @_REGISTRY.plugin
+    def my_plugin():
+        pass
 
-     # This function is registered as the "input" plugin.
-     @_REGISTRY.plugin(name='input')
-     def read_something():
-         pass
+    # This function is registered as the "input" plugin.
+    @_REGISTRY.plugin(name='input')
+    def read_something():
+        pass
 
-   The decorator may be aliased to give a cleaner syntax (e.g. ``register =
-   my_registry.plugin``).
+  The decorator may be aliased to give a cleaner syntax (e.g. ``register =
+  my_registry.plugin``).
 
- * **Plugins files.** Plugins files use a simple format:
+* **Plugins files.** Plugins files use a simple format:
 
-   .. code-block::
+  .. code-block::
 
      # Comments start with "#". Blank lines are ignored.
      name_of_the_plugin module.name module_member
 
      another_plugin some_module some_function
 
-   These files are placed in the file system and apply similarly to Git's
-   ``.gitignore`` files. From Python, these files are registered using
-   :py:meth:`pw_cli.plugins.Registry.register_file` and
-   :py:meth:`pw_cli.plugins.Registry.register_directory`.
+  These files are placed in the file system and apply similarly to Git's
+  ``.gitignore`` files. From Python, these files are registered using
+  :py:meth:`pw_cli.plugins.Registry.register_file` and
+  :py:meth:`pw_cli.plugins.Registry.register_directory`.
 
 pw_cli.plugins module reference
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pw_cli/py/pw_cli/branding.py b/pw_cli/py/pw_cli/branding.py
index 358e401..0d9dccd 100644
--- a/pw_cli/py/pw_cli/branding.py
+++ b/pw_cli/py/pw_cli/branding.py
@@ -42,7 +42,7 @@
     # Take the banner from the file PW_BRANDING_BANNER; or use the default.
     banner_filename = parsed_env.PW_BRANDING_BANNER
     _memoized_banner = (
-        Path(banner_filename).read_text()
+        Path(banner_filename).read_text(encoding='utf-8', errors='replace')
         if banner_filename
         else _PIGWEED_BANNER
     )
diff --git a/pw_console/py/pw_console/console_app.py b/pw_console/py/pw_console/console_app.py
index 127366f..d27ea73 100644
--- a/pw_console/py/pw_console/console_app.py
+++ b/pw_console/py/pw_console/console_app.py
@@ -1072,6 +1072,15 @@
         self.update_menu_items()
         self._update_help_window()
 
+    def all_log_stores(self) -> List[LogStore]:
+        log_stores: List[LogStore] = []
+        for pane in self.window_manager.active_panes():
+            if not isinstance(pane, LogPane):
+                continue
+            if pane.log_view.log_store not in log_stores:
+                log_stores.append(pane.log_view.log_store)
+        return log_stores
+
     def add_log_handler(
         self,
         window_title: str,
diff --git a/pw_console/py/pw_console/html/index.html b/pw_console/py/pw_console/html/index.html
index 5d7a21b..f50d0cc 100644
--- a/pw_console/py/pw_console/html/index.html
+++ b/pw_console/py/pw_console/html/index.html
@@ -18,20 +18,13 @@
 </head>
 
 <body>
-  <div class="table-container">
-    <div class="log-header">
-      <div class="log-entry">
-        <span class="timestamp">Time</span>
-        <span class="level">Level</span>
-        <span class="module">Module</span>
-        <span class="time">Timestamp</span>
-        <span class="keys">Keys</span>
-        <span class="msg">Message</span>
-      </div>
-    </div>
-    <div class="log-container"></div>
-  </div>
+  <main id="log-viewer-container"></main>
 
-  <script src="https://unpkg.com/virtualized-list@2.2.0/umd/virtualized-list.min.js"></script>
+  <!-- CDN pigweedjs -->
+  <script src="https://unpkg.com/pigweedjs/dist/logging.umd.js"></script>
+
+  <!-- Locally built pigweedjs -->
+  <!-- <script src="./index.umd.js"></script> -->
+  <!-- <script src="./logging.umd.js"></script> -->
   <script src="./main.js"></script>
 </body>
diff --git a/pw_console/py/pw_console/html/main.js b/pw_console/py/pw_console/html/main.js
index 88214f0..79e239e 100644
--- a/pw_console/py/pw_console/html/main.js
+++ b/pw_console/py/pw_console/html/main.js
@@ -12,9 +12,81 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-var VirtualizedList = window.VirtualizedList.default;
-const rowHeight = 30;
+// eslint-disable-next-line no-undef
+const { createLogViewer, LogSource, LogEntry, Severity } = PigweedLogging;
 
+let currentTheme = {};
+let defaultLogStyleRule = 'color: #ffffff;';
+let columnStyleRules = {};
+let defaultColumnStyles = [];
+let logLevelStyles = {};
+
+const logLevelToString = {
+  10: 'DBG',
+  20: 'INF',
+  21: 'OUT',
+  30: 'WRN',
+  40: 'ERR',
+  50: 'CRT',
+  70: 'FTL',
+};
+
+const logLevelToSeverity = {
+  10: Severity.DEBUG,
+  20: Severity.INFO,
+  21: Severity.INFO,
+  30: Severity.WARNING,
+  40: Severity.ERROR,
+  50: Severity.CRITICAL,
+  70: Severity.CRITICAL,
+};
+
+let nonAdditionalDataFields = [
+  '_hosttime',
+  'levelname',
+  'levelno',
+  'args',
+  'fields',
+  'message',
+  'time',
+];
+let additionalHeaders = [];
+
+// New LogSource to consume pw-console log json messages
+class PwConsoleLogSource extends LogSource {
+  constructor() {
+    super();
+  }
+  append_log(data) {
+    var fields = [
+      { key: 'severity', value: logLevelToSeverity[data.levelno] },
+      { key: 'time', value: data.time },
+    ];
+    Object.keys(data.fields).forEach((columnName) => {
+      if (
+        nonAdditionalDataFields.indexOf(columnName) === -1 &&
+        additionalHeaders.indexOf(columnName) === -1
+      ) {
+        fields.push({ key: columnName, value: data.fields[columnName] });
+      }
+    });
+    fields.push({ key: 'message', value: data.message });
+    fields.push({ key: 'py_file', value: data.py_file });
+    fields.push({ key: 'py_logger', value: data.py_logger });
+    this.emitEvent('logEntry', {
+      severity: logLevelToSeverity[data.levelno],
+      timestamp: new Date(),
+      fields: fields,
+    });
+  }
+}
+
+// Setup the pigweedjs log-viewer
+const logSource = new PwConsoleLogSource();
+const containerEl = document.querySelector('#log-viewer-container');
+let unsubscribe = createLogViewer(logSource, containerEl);
+
+// Format a date in the standard pw_cli style YYYY-mm-dd HH:MM:SS
 function formatDate(dt) {
   function pad2(n) {
     return (n < 10 ? '0' : '') + n;
@@ -33,88 +105,13 @@
   );
 }
 
-let data = [];
-function clearLogs() {
-  data = [
-    {
-      message: 'Logs started',
-      levelno: 20,
-      time: formatDate(new Date()),
-      levelname: '\u001b[35m\u001b[1mINF\u001b[0m',
-      args: [],
-      fields: { module: '', file: '', timestamp: '', keys: '' },
-    },
-  ];
-}
-clearLogs();
-
-let nonAdditionalDataFields = [
-  '_hosttime',
-  'levelname',
-  'levelno',
-  'args',
-  'fields',
-  'message',
-  'time',
-];
-let additionalHeaders = [];
-function updateHeadersFromData(data) {
-  let dirty = false;
-  Object.keys(data).forEach((columnName) => {
-    if (
-      nonAdditionalDataFields.indexOf(columnName) === -1 &&
-      additionalHeaders.indexOf(columnName) === -1
-    ) {
-      additionalHeaders.push(columnName);
-      dirty = true;
-    }
-  });
-  Object.keys(data.fields || {}).forEach((columnName) => {
-    if (
-      nonAdditionalDataFields.indexOf(columnName) === -1 &&
-      additionalHeaders.indexOf(columnName) === -1
-    ) {
-      additionalHeaders.push(columnName);
-      dirty = true;
-    }
-  });
-
-  const headerDOM = document.querySelector('.log-header');
-  if (dirty) {
-    headerDOM.innerHTML = `
-      <span class="_hosttime">Time</span>
-      <span class="level">Level</span>
-      ${additionalHeaders
-        .map(
-          (key) => `
-        <span class="${key}">${key}</span>
-      `,
-        )
-        .join('\n')}
-      <span class="msg">Message</span>`;
-  }
-
-  // Also update column widths to match actual row.
-  const headerChildren = Array.from(headerDOM.children);
-
-  const firstRow = document.querySelector('.log-container .log-entry');
-  const firstRowChildren = Array.from(firstRow.children);
-  headerChildren.forEach((col, index) => {
-    if (firstRowChildren[index]) {
-      col.setAttribute(
-        'style',
-        `width:${firstRowChildren[index].getBoundingClientRect().width}`,
-      );
-      col.setAttribute('title', col.innerText);
-    }
-  });
-}
-
+// Return the value for the given # parameter name.
 function getUrlHashParameter(param) {
   var params = getUrlHashParameters();
   return params[param];
 }
 
+// Capture all # parameters from the current URL.
 function getUrlHashParameters() {
   var sPageURL = window.location.hash;
   if (sPageURL) sPageURL = sPageURL.split('#')[1];
@@ -126,25 +123,14 @@
   });
   return object;
 }
-let currentTheme = {};
-let defaultLogStyleRule = 'color: #ffffff;';
-let columnStyleRules = {};
-let defaultColumnStyles = [];
-let logLevelStyles = {};
-const logLevelToString = {
-  10: 'DBG',
-  20: 'INF',
-  21: 'OUT',
-  30: 'WRN',
-  40: 'ERR',
-  50: 'CRT',
-  70: 'FTL',
-};
 
+// Update web page CSS styles based on a pw-console color json log message.
 function setCurrentTheme(newTheme) {
   currentTheme = newTheme;
-  defaultLogStyleRule = parseStyle(newTheme.default);
-  document.querySelector('body').setAttribute('style', defaultLogStyleRule);
+  defaultLogStyleRule = parsePromptToolkitStyle(newTheme.default);
+  // Set body background color
+  // document.querySelector('body').setAttribute('style', defaultLogStyleRule);
+
   // Apply default font styles to columns
   let styles = [];
   Object.keys(newTheme).forEach((key) => {
@@ -152,15 +138,16 @@
       styles.push(newTheme[key]);
     }
     if (key.startsWith('log-level-')) {
-      logLevelStyles[parseInt(key.replace('log-level-', ''))] = parseStyle(
-        newTheme[key],
-      );
+      logLevelStyles[parseInt(key.replace('log-level-', ''))] =
+        parsePromptToolkitStyle(newTheme[key]);
     }
   });
   defaultColumnStyles = styles;
 }
 
-function parseStyle(rule) {
+// Convert prompt_toolkit color format strings to CSS.
+// 'bg:#BG-HEX #FG-HEX STYLE' where STYLE is either 'bold' or 'underline'
+function parsePromptToolkitStyle(rule) {
   const ruleList = rule.split(' ');
   let outputStyle = ruleList.map((fragment) => {
     if (fragment.startsWith('bg:')) {
@@ -176,6 +163,7 @@
   return outputStyle.join(';');
 }
 
+// Inject styled spans into the log message column values.
 function applyStyling(data, applyColors = false) {
   let colIndex = 0;
   Object.keys(data).forEach((key) => {
@@ -188,7 +176,7 @@
                 applyColors
                   ? defaultColumnStyles[colIndex % defaultColumnStyles.length]
                   : ''
-              };${parseStyle(columnStyleRules[key][token])};">
+              };${parsePromptToolkitStyle(columnStyleRules[key][token])};">
                 ${token}
             </span>`,
         );
@@ -198,7 +186,7 @@
     }
     if (applyColors) {
       data[key] = `<span
-      style="${parseStyle(
+      style="${parsePromptToolkitStyle(
         defaultColumnStyles[colIndex % defaultColumnStyles.length],
       )}">
         ${data[key]}
@@ -209,54 +197,11 @@
   return data;
 }
 
+// Connect to the pw-console websocket and start emitting logs.
 (function () {
   const container = document.querySelector('.log-container');
   const height = window.innerHeight - 50;
   let follow = true;
-  // Initialize our VirtualizedList
-  var virtualizedList = new VirtualizedList(container, {
-    height,
-    rowCount: data.length,
-    rowHeight: rowHeight,
-    estimatedRowHeight: rowHeight,
-    renderRow: (index) => {
-      const element = document.createElement('div');
-      element.classList.add('log-entry');
-      element.setAttribute('style', `height: ${rowHeight}px;`);
-      const logData = data[index];
-      element.innerHTML = `
-        <span class="time">${logData.time}</span>
-        <span class="level" style="${logLevelStyles[logData.levelno] || ''}">${
-          logLevelToString[logData.levelno]
-        }</span>
-        ${additionalHeaders
-          .map(
-            (key) => `
-          <span class="${key}">${
-            logData[key] || logData.fields[key] || ''
-          }</span>
-        `,
-          )
-          .join('\n')}
-        <span class="msg">${logData.message}</span>
-      `;
-      return element;
-    },
-    initialIndex: 0,
-    onScroll: (scrollTop, event) => {
-      const offset =
-        virtualizedList._sizeAndPositionManager.getUpdatedOffsetForIndex({
-          containerSize: height,
-          targetIndex: data.length - 1,
-        });
-
-      if (scrollTop < offset) {
-        follow = false;
-      } else {
-        follow = true;
-      }
-    },
-  });
 
   const port = getUrlHashParameter('ws');
   const hostname = location.hostname || '127.0.0.1';
@@ -271,19 +216,16 @@
     if (!dataObj) return;
 
     if (dataObj.__pw_console_colors) {
+      // If this is a color theme message, update themes.
       const colors = dataObj.__pw_console_colors;
       setCurrentTheme(colors.classes);
       if (colors.column_values) {
         columnStyleRules = { ...colors.column_values };
       }
     } else {
+      // Normal log message.
       const currentData = { ...dataObj, time: formatDate(new Date()) };
-      updateHeadersFromData(currentData);
-      data.push(applyStyling(currentData));
-      virtualizedList.setRowCount(data.length);
-      if (follow) {
-        virtualizedList.scrollToIndex(data.length - 1);
-      }
+      logSource.append_log(currentData);
     }
   };
 })();
diff --git a/pw_console/py/pw_console/html/style.css b/pw_console/py/pw_console/html/style.css
index 2c92a48..0cd5b6a 100644
--- a/pw_console/py/pw_console/html/style.css
+++ b/pw_console/py/pw_console/html/style.css
@@ -1,5 +1,5 @@
 /*
- * Copyright 2022 The Pigweed Authors
+ * Copyright 2023 The Pigweed Authors
  *
  * Licensed under the Apache License, Version 2.0 (the "License"); you may not
  * use this file except in compliance with the License. You may obtain a copy of
@@ -13,62 +13,205 @@
  * License for the specific language governing permissions and limitations under
  * the License.
  */
+
+ @import url('https://fonts.googleapis.com/css2?family=Google+Sans&family=Roboto+Mono:wght@400;500&family=Material+Symbols+Rounded:opsz,wght,FILL,GRAD@20..48,100..700,0..1,-50..200&display=block');
+
+:root {
+    background-color: var(--sys-log-viewer-color-bg);
+    color-scheme: light dark;
+    font-family: "Google Sans", Arial, sans-serif;
+    font-synthesis: none;
+    font-weight: 400;
+    line-height: 1.5;
+    text-rendering: optimizeLegibility;
+    -webkit-font-smoothing: antialiased;
+    -moz-osx-font-smoothing: grayscale;
+    -webkit-text-size-adjust: 100%;
+}
+
+
+:root {
+    /* Material component properties */
+    --md-icon-font: 'Material Symbols Rounded';
+    --md-icon-size: 1.25rem;
+    --md-filled-button-label-text-type: "Google Sans", Arial, sans-serif;
+    --md-outlined-button-label-text-type: "Google Sans", Arial, sans-serif;
+    --md-icon-button-unselected-icon-color: var(--md-sys-color-on-surface-variant);
+    --md-icon-button-unselected-hover-icon-color: var(--md-sys-color-on-primary-container);
+
+    /* Log View */
+    --sys-log-viewer-view-outline-width: 1px;
+    --sys-log-viewer-view-corner-radius: 0.5rem;
+}
+
+* {
+    box-sizing: border-box;
+}
+
+button {
+    font-family: "Google Sans";
+}
+
+main {
+    height: 100vh;
+    padding: 2rem;
+}
+
+a {
+    color: var(--md-sys-color-primary);
+    font-weight: 500;
+    text-decoration: inherit;
+}
+
+a:hover {
+    color: var(--md-sys-color-secondary);
+}
+
 body {
-  background-color: rgb(46, 46, 46);
-  color: #ffffff;
-  overflow: hidden;
-  margin: 0;
+    display: grid;
+    place-content: start;
+    margin: 0;
 }
 
-.table-container {
-  display: table;
-  width: 100%;
-  border-spacing: 30px 0px;
+@media (prefers-color-scheme: dark) {
+    :root {
+        --md-sys-color-primary: #A8C7FA;
+        --md-sys-color-primary-60: #4C8DF6;
+        --md-sys-color-primary-container: #0842A0;
+        --md-sys-color-on-primary: #062E6F;
+        --md-sys-color-on-primary-container: #D3E3FD;
+        --md-sys-color-inverse-primary: #0B57D0;
+        --md-sys-color-secondary: #7FCFFF;
+        --md-sys-color-secondary-container: #004A77;
+        --md-sys-color-on-secondary: #003355;
+        --md-sys-color-on-secondary-container: #C2E7FF;
+        --md-sys-color-tertiary: #6DD58C;
+        --md-sys-color-tertiary-container: #0F5223;
+        --md-sys-color-on-tertiary: #0A3818;
+        --md-sys-color-on-tertiary-container: #C4EED0;
+        --md-sys-color-surface: #131314;
+        --md-sys-color-surface-dim: #131314;
+        --md-sys-color-surface-bright: #37393B;
+        --md-sys-color-surface-container-lowest: #0E0E0E;
+        --md-sys-color-surface-container-low: #1B1B1B;
+        --md-sys-color-surface-container: #1E1F20;
+        --md-sys-color-surface-container-high: #282A2C;
+        --md-sys-color-surface-container-highest: #333537;
+        --md-sys-color-on-surface: #E3E3E3;
+        --md-sys-color-on-surface-variant: #C4C7C5;
+        --md-sys-color-inverse-surface: #E3E3E3;
+        --md-sys-color-inverse-on-surface: #303030;
+        --md-sys-color-outline: #8E918F;
+        --md-sys-color-outline-variant: #444746;
+        --md-sys-color-shadow: #000000;
+        --md-sys-color-scrim: #000000;
+
+        --md-sys-inverse-surface-rgb: 230, 225, 229;
+
+        /* Log Viewer */
+        --sys-log-viewer-color-bg: var(--md-sys-color-surface);
+
+        /* Log View */
+        --sys-log-viewer-color-view-outline:  var(--md-sys-color-outline-variant);
+
+        /* Log View Controls */
+        --sys-log-viewer-color-controls-bg: var(--md-sys-color-surface-container-high);
+        --sys-log-viewer-color-controls-text: var(--md-sys-color-on-surface-variant);
+        --sys-log-viewer-color-controls-input-outline: transparent;
+        --sys-log-viewer-color-controls-input-bg: var(--md-sys-color-surface);
+        --sys-log-viewer-color-controls-button-enabled: var(--md-sys-color-primary-container);
+
+        /* Log List */
+        --sys-log-viewer-color-table-header-bg: var(--md-sys-color-surface-container);
+        --sys-log-viewer-color-table-header-text: var(--md-sys-color-on-surface);
+        --sys-log-viewer-color-table-bg: var(--md-sys-color-surface-container-lowest);
+        --sys-log-viewer-color-table-text: var(--md-sys-color-on-surface);
+        --sys-log-viewer-color-table-cell-outline: var(--md-sys-color-outline-variant);
+        --sys-log-viewer-color-overflow-indicator: var(--md-sys-color-surface-container-lowest);
+        --sys-log-viewer-color-table-mark: var(--md-sys-color-primary-container);
+        --sys-log-viewer-color-table-mark-text: var(--md-sys-color-on-primary-container);
+        --sys-log-viewer-color-table-mark-outline: var(--md-sys-color-outline-variant);
+
+        /* Severity */
+        --sys-log-viewer-color-error-bright: #E46962;
+        --sys-log-viewer-color-surface-error: #601410;
+        --sys-log-viewer-color-on-surface-error: #F9DEDC;
+        --sys-log-viewer-color-orange-bright: #EE9836;
+        --sys-log-viewer-color-surface-yellow: #402D00;
+        --sys-log-viewer-color-on-surface-yellow: #FFDFA0;
+        --sys-log-viewer-color-debug: var(--md-sys-color-primary-60);
+    }
 }
 
-.log-header {
-  font-size: 18px;
-  font-family: monospace;
-}
+@media (prefers-color-scheme: light) {
+    :root {
+        --md-sys-color-primary: #0B57D0;
+        --md-sys-color-primary-70: #7CACF8;
+        --md-sys-color-primary-90: #D3E3FD;
+        --md-sys-color-primary-95: #ECF3FE;
+        --md-sys-color-primary-99: #FAFBFF;
+        --md-sys-color-primary-container: #D3E3FD;
+        --md-sys-color-on-primary: #FFFFFF;
+        --md-sys-color-on-primary-container: #041E49;
+        --md-sys-color-inverse-primary: #A8C7FA;
+        --md-sys-color-secondary: #00639B;
+        --md-sys-color-secondary-container: #C2E7FF;
+        --md-sys-color-on-secondary: #FFFFFF;
+        --md-sys-color-on-secondary-container: #001D35;
+        --md-sys-color-tertiary: #146C2E;
+        --md-sys-color-tertiary-container: #C4EED0;
+        --md-sys-color-on-tertiary: #FFFFFF;
+        --md-sys-color-on-tertiary-container: #072711;
+        --md-sys-color-surface: #FFFFFF;
+        --md-sys-color-surface-dim: #D3DBE5;
+        --md-sys-color-surface-bright: #FFFFFF;
+        --md-sys-color-surface-container-lowest: #FFFFFF;
+        --md-sys-color-surface-container-low: #F8FAFD;
+        --md-sys-color-surface-container: #F0F4F9;
+        --md-sys-color-surface-container-high: #E9EEF6;
+        --md-sys-color-surface-container-highest: #DDE3EA;
+        --md-sys-color-on-surface: #1F1F1F;
+        --md-sys-color-on-surface-variant: #444746;
+        --md-sys-color-inverse-surface: #303030;
+        --md-sys-color-inverse-on-surface: #F2F2F2;
+        --md-sys-color-outline: #747775;
+        --md-sys-color-outline-variant: #C4C7C5;
+        --md-sys-color-shadow: #000000;
+        --md-sys-color-scrim: #000000;
 
-.log-container {
-  width: 100%;
-  height: calc(100vh - 50px);
-  overflow-y: auto;
-  border-top: 1px solid #DDD;
-  font-size: 18px;
-  font-family: monospace;
-}
+        --md-sys-inverse-surface-rgb: 49, 48, 51;
 
-.log-header {
-  width: 100%;
-  font-weight: bold;
-  display: table-row;
-}
+        /* Log Viewer */
+        --sys-log-viewer-color-bg: var(--md-sys-color-surface);
 
-.log-container .row>span {
-  display: table-cell;
-  padding: 20px 18px;
+        /* Log View */
+        --sys-log-viewer-color-view-outline:  var(--md-sys-color-outline);
 
-}
+        /* Log View Controls */
+        --sys-log-viewer-color-controls-bg: var(--md-sys-color-primary-90);
+        --sys-log-viewer-color-controls-text: var(--md-sys-color-on-primary-container);
+        --sys-log-viewer-color-controls-input-outline: transparent;
+        --sys-log-viewer-color-controls-input-bg: var(--md-sys-color-surface-container-lowest);
+        --sys-log-viewer-color-controls-button-enabled: var(--md-sys-color-primary-70);
 
-.log-header>span {
-  text-transform: capitalize;
-  overflow: hidden;
-    display: inline-block;
-    margin-left: 30px;
-}
+        /* Log List */
+        --sys-log-viewer-color-table-header-bg: var(--md-sys-color-primary-95);
+        --sys-log-viewer-color-table-header-text: var(--md-sys-color-on-surface);
+        --sys-log-viewer-color-table-bg: var(--md-sys-color-surface-container-lowest);
+        --sys-log-viewer-color-table-text: var(--md-sys-color-on-surface);
+        --sys-log-viewer-color-table-cell-outline: var(--md-sys-color-outline-variant);
+        --sys-log-viewer-color-overflow-indicator: var(--md-sys-color-surface-container);
+        --sys-log-viewer-color-table-mark: var(--md-sys-color-primary-container);
+        --sys-log-viewer-color-table-mark-text: var(--md-sys-color-on-primary-container);
+        --sys-log-viewer-color-table-mark-outline: var(--md-sys-color-outline-variant);
 
-.log-entry {
-  display: table-row;
-}
-
-.log-entry>span {
-  display: table-cell;
-  overflow: hidden;
-  text-overflow: ellipsis;
-}
-
-.log-entry .msg {
-  flex: 1;
+        /* Severity */
+        --sys-log-viewer-color-error-bright: #DC362E;
+        --sys-log-viewer-color-surface-error: #FCEFEE;
+        --sys-log-viewer-color-on-surface-error: #8C1D18;
+        --sys-log-viewer-color-orange-bright: #F49F2A;
+        --sys-log-viewer-color-surface-yellow: #FEF9EB;
+        --sys-log-viewer-color-on-surface-yellow: #783616;
+        --sys-log-viewer-color-debug: var(--md-sys-color-primary);
+    }
 }
diff --git a/pw_console/py/pw_console/log_store.py b/pw_console/py/pw_console/log_store.py
index 8a8c4e8..588576a 100644
--- a/pw_console/py/pw_console/log_store.py
+++ b/pw_console/py/pw_console/log_store.py
@@ -147,6 +147,9 @@
         self.channel_formatted_prefix_widths = {}
         self.line_index = 0
 
+    def get_channel_names(self) -> List[str]:
+        return list(sorted(self.channel_counts.keys()))
+
     def get_channel_counts(self):
         """Return the seen channel log counts for this conatiner."""
         return ', '.join(
diff --git a/pw_console/py/pw_console/python_logging.py b/pw_console/py/pw_console/python_logging.py
index c9701bf..026ee84 100644
--- a/pw_console/py/pw_console/python_logging.py
+++ b/pw_console/py/pw_console/python_logging.py
@@ -105,6 +105,7 @@
         "parso.python.diff",
         "parso.cache",
         "pw_console.serial_debug_logger",
+        "websockets.server",
     ]
     for logger_name in hidden_host_loggers:
         logging.getLogger(logger_name).propagate = False
@@ -122,6 +123,15 @@
     log_dict["levelno"] = record.levelno
     log_dict["levelname"] = record.levelname
     log_dict["args"] = record.args
+    log_dict["time"] = str(record.created)
+    log_dict["time_string"] = datetime.fromtimestamp(record.created).isoformat(
+        timespec="seconds"
+    )
+
+    lineno = record.lineno
+    file_name = str(record.filename)
+    log_dict['py_file'] = f'{file_name}:{lineno}'
+    log_dict['py_logger'] = str(record.name)
 
     if hasattr(record, "extra_metadata_fields") and (
         record.extra_metadata_fields  # type: ignore
@@ -158,11 +168,15 @@
            "pw_system ",
            "System init"
          ],
+         "time": "1692302986.4729185",
+         "time_string": "2023-08-17T13:09:46",
          "fields": {
            "module": "pw_system",
            "file": "pw_system/init.cc",
            "timestamp": "0:00"
-         }
+         },
+         "py_file": "script.py:1234",
+         "py_logger": "root"
        }
 
     Example usage:
diff --git a/pw_console/py/pw_console/test_mode.py b/pw_console/py/pw_console/test_mode.py
index 815792c..3a2f518 100644
--- a/pw_console/py/pw_console/test_mode.py
+++ b/pw_console/py/pw_console/test_mode.py
@@ -54,7 +54,7 @@
         if search:
             keyboard_key = search.group(1)
 
-        fake_logs.append((line, {'keys': keyboard_key}))
+        fake_logs.append((line.lstrip(), {'keys': keyboard_key}))
     return fake_logs
 
 
@@ -64,15 +64,25 @@
     start_time = time.time()
     message_count = 0
 
+    log_methods = [
+        _FAKE_DEVICE_LOG.info,
+        _FAKE_DEVICE_LOG.debug,
+        _FAKE_DEVICE_LOG.warning,
+        _FAKE_DEVICE_LOG.error,
+        _FAKE_DEVICE_LOG.critical,
+    ]
+    log_method_rand_weights = [50, 20, 10, 10, 10]
+
     # Fake module column names.
     module_names = ['APP', 'RADIO', 'BAT', 'USB', 'CPU']
     while True:
-        if message_count > 32 or message_count < 2:
-            await asyncio.sleep(0.1)
+        if message_count > 32:
+            await asyncio.sleep(1)
         fake_log = random.choice(fake_log_messages)
+        log_func = random.choices(log_methods, weights=log_method_rand_weights)
 
         module_name = module_names[message_count % len(module_names)]
-        _FAKE_DEVICE_LOG.info(
+        log_func[0](
             fake_log[0],
             extra=dict(
                 extra_metadata_fields=dict(
@@ -84,3 +94,5 @@
             ),
         )
         message_count += 1
+        if message_count % 10 == 0:
+            _ROOT_LOG.info('Device message count: %d', message_count)
diff --git a/pw_containers/BUILD.bazel b/pw_containers/BUILD.bazel
index e9a7142..d956cf7 100644
--- a/pw_containers/BUILD.bazel
+++ b/pw_containers/BUILD.bazel
@@ -96,8 +96,9 @@
     ],
     includes = ["public"],
     deps = [
-        "//pw_assert:facade",
+        "//pw_assert",
         "//pw_polyfill",
+        "//pw_preprocessor",
     ],
 )
 
diff --git a/pw_containers/BUILD.gn b/pw_containers/BUILD.gn
index d3f0598..6fcc274 100644
--- a/pw_containers/BUILD.gn
+++ b/pw_containers/BUILD.gn
@@ -102,6 +102,7 @@
   public_deps = [
     dir_pw_assert,
     dir_pw_polyfill,
+    dir_pw_preprocessor,
   ]
   public = [ "public/pw_containers/vector.h" ]
 }
diff --git a/pw_containers/CMakeLists.txt b/pw_containers/CMakeLists.txt
index 18759cb..f0ef3a9 100644
--- a/pw_containers/CMakeLists.txt
+++ b/pw_containers/CMakeLists.txt
@@ -107,6 +107,7 @@
   PUBLIC_DEPS
     pw_assert
     pw_polyfill
+    pw_preprocessor
 )
 
 pw_add_library(pw_containers.wrapped_iterator INTERFACE
diff --git a/pw_containers/public/pw_containers/vector.h b/pw_containers/public/pw_containers/vector.h
index 1e3e0e7..ace0462 100644
--- a/pw_containers/public/pw_containers/vector.h
+++ b/pw_containers/public/pw_containers/vector.h
@@ -26,6 +26,7 @@
 
 #include "pw_assert/assert.h"
 #include "pw_polyfill/language_feature_macros.h"
+#include "pw_preprocessor/compiler.h"
 
 namespace pw {
 namespace vector_impl {
@@ -316,7 +317,7 @@
 
   // Returns the number of elements in the Vector. Uses size_t instead of
   // size_type for consistency with other containers.
-  size_t size() const noexcept { return size_; }
+  size_t size() const noexcept PW_NO_SANITIZE("memory") { return size_; }
 
   // Returns the maximum number of elements in this Vector.
   size_t max_size() const noexcept { return max_size_; }
diff --git a/pw_cpu_exception/BUILD.bazel b/pw_cpu_exception/BUILD.bazel
index 1a0b024..4af80c9 100644
--- a/pw_cpu_exception/BUILD.bazel
+++ b/pw_cpu_exception/BUILD.bazel
@@ -12,18 +12,166 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_facade",
+    "pw_cc_library",
+)
+
 package(default_visibility = ["//visibility:public"])
 
 licenses(["notice"])
 
-filegroup(
-    name = "pw_cpu_exception",
-    srcs = [
-        "basic_handler.cc",
+# This module has three facades, each of whose backends are set with a
+# different constraint_setting.
+#
+# - entry: This is the library that handles early exception entry and prepares
+#   any CPU state that must be available to the exception handler via the
+#   pw_cpu_exception_State object. The backend for this facade is
+#   architecture-specific.
+constraint_setting(
+    name = "entry_constraint_setting",
+)
+
+# - handler: This facade is backed by an application-specific handler that
+#   determines what to do when an exception is encountered. This may be
+#   capturing a crash report before resetting the device, or in some cases
+#   handling the exception to allow execution to continue.
+constraint_setting(
+    name = "handler_constraint_setting",
+)
+
+# - support: This facade provides architecture-independent functions that may be
+#   helpful for dumping CPU state in various forms. This allows an application
+#   to create an application-specific handler that is portable across multiple
+#   architectures.
+constraint_setting(
+    name = "support_constraint_setting",
+)
+
+pw_cc_facade(
+    name = "entry_facade",
+    hdrs = [
         "public/pw_cpu_exception/entry.h",
-        "public/pw_cpu_exception/handler.h",
         "public/pw_cpu_exception/state.h",
-        "public/pw_cpu_exception/support.h",
-        "start_exception_handler.cc",
     ],
+    includes = ["public"],
+    deps = ["//pw_preprocessor"],
+)
+
+pw_cc_library(
+    name = "entry",
+    hdrs = [
+        "public/pw_cpu_exception/entry.h",
+        "public/pw_cpu_exception/state.h",
+    ],
+    includes = ["public"],
+    deps = [
+        ":entry_backend",
+        "//pw_preprocessor",
+    ],
+)
+
+pw_cc_facade(
+    name = "handler_facade",
+    hdrs = ["public/pw_cpu_exception/handler.h"],
+    includes = ["public"],
+    deps = [":entry"],
+)
+
+pw_cc_library(
+    name = "handler",
+    srcs = ["start_exception_handler.cc"],
+    hdrs = ["public/pw_cpu_exception/handler.h"],
+    includes = ["public"],
+    deps = [
+        ":handler_backend",
+        "//pw_preprocessor",
+    ],
+)
+
+pw_cc_facade(
+    name = "support_facade",
+    hdrs = ["public/pw_cpu_exception/support.h"],
+    includes = ["public"],
+    deps = [":entry"],
+)
+
+pw_cc_library(
+    name = "support",
+    hdrs = ["public/pw_cpu_exception/support.h"],
+    includes = ["public"],
+    deps = [
+        ":entry",
+        ":support_backend",
+    ],
+)
+
+constraint_value(
+    name = "basic_handler_backend",
+    constraint_setting = "//pw_cpu_exception:handler_constraint_setting",
+)
+
+pw_cc_library(
+    name = "basic_handler",
+    srcs = ["basic_handler.cc"],
+    deps = [
+        ":entry",
+        ":handler_facade",
+        "//pw_log",
+    ],
+)
+
+# Override-able flags for each facade backend.
+label_flag(
+    name = "entry_backend",
+    build_setting_default = ":entry_backend_multiplexer",
+)
+
+label_flag(
+    name = "entry_backend_impl",
+    build_setting_default = ":entry_backend_impl_multiplexer",
+)
+
+label_flag(
+    name = "handler_backend",
+    build_setting_default = ":handler_backend_multiplexer",
+)
+
+label_flag(
+    name = "support_backend",
+    build_setting_default = ":support_backend_multiplexer",
+)
+
+# Default facade backends.
+alias(
+    name = "entry_backend_multiplexer",
+    actual = select({
+        "//pw_cpu_exception_cortex_m:entry_backend": "@pigweed//pw_cpu_exception_cortex_m:cpu_exception",
+        "//conditions:default": "//pw_build:unspecified_backend",
+    }),
+)
+
+alias(
+    name = "entry_backend_impl_multiplexer",
+    actual = select({
+        "//pw_cpu_exception_cortex_m:entry_backend": "@pigweed//pw_cpu_exception_cortex_m:cpu_exception_impl",
+        "//conditions:default": "//pw_build:unspecified_backend",
+    }),
+)
+
+alias(
+    name = "handler_backend_multiplexer",
+    actual = select({
+        ":basic_handler_backend": ":basic_handler",
+        "//conditions:default": "//pw_build:unspecified_backend",
+    }),
+)
+
+alias(
+    name = "support_backend_multiplexer",
+    actual = select({
+        "//pw_cpu_exception_cortex_m:support_backend": "@pigweed//pw_cpu_exception_cortex_m:support",
+        "//conditions:default": "//pw_build:unspecified_backend",
+    }),
 )
diff --git a/pw_cpu_exception/docs.rst b/pw_cpu_exception/docs.rst
index 0cd2140..225382a 100644
--- a/pw_cpu_exception/docs.rst
+++ b/pw_cpu_exception/docs.rst
@@ -13,11 +13,11 @@
 -----
 Setup
 -----
-This module has three facades, each of whose backends are set with a
-different GN variable.
+This module has three facades, each of whose backends must be provided by the
+target or application.
 
-``pw_cpu_exception_ENTRY_BACKEND``
-==================================
+Entry facade
+============
 This is the library that handles early exception entry and prepares any CPU
 state that must be available to the exception handler via the
 pw_cpu_exception_State object. The backend for this facade is
@@ -28,8 +28,11 @@
 called immediately upon a CPU exception. For specifics on how this may be done,
 see the backend documentation for your architecture.
 
-``pw_cpu_exception_HANDLER_BACKEND``
-====================================
+The GN variable to set the backend for this facade is
+``pw_cpu_exception_ENTRY_BACKEND``.
+
+Handler facade
+==============
 This facade is backed by an application-specific handler that determines what to
 do when an exception is encountered. This may be capturing a crash report before
 resetting the device, or in some cases handling the exception to allow execution
@@ -39,21 +42,27 @@
 ``pw_cpu_exception_DefaultHandler()``. The behavior of this functions is entirely
 up to the application/project, but some examples are provided below:
 
-  * Enter an infinite loop so the device can be debugged by JTAG.
-  * Reset the device.
-  * Attempt to handle the exception so execution can continue.
-  * Capture and record additional device state and save to flash for a crash
-    report.
-  * A combination of the above, using logic that fits the needs of your project.
+* Enter an infinite loop so the device can be debugged by JTAG.
+* Reset the device.
+* Attempt to handle the exception so execution can continue.
+* Capture and record additional device state and save to flash for a crash
+  report.
+* A combination of the above, using logic that fits the needs of your project.
 
-``pw_cpu_exception_SUPPORT_BACKEND``
-====================================
+The GN variable to set the backend for this facade is
+``pw_cpu_exception_HANDLER_BACKEND``.
+
+Support facade
+==============
 This facade provides architecture-independent functions that may be helpful for
 dumping CPU state in various forms. This allows an application to create an
 application-specific handler that is portable across multiple architectures.
 
-Avoiding circular dependencies with ``pw_cpu_exception_ENTRY_BACKEND``
-======================================================================
+The GN variable to set the backend for this facade is
+``pw_cpu_exception_SUPPORT_BACKEND``.
+
+Avoiding circular dependencies with the entry facade
+====================================================
 The entry facade is hard tied to the definition of the
 ``pw_cpu_exception_State``, so spliting them into separate facades would require
 extra configurations along with extra compatibility checks to ensure they are
@@ -68,6 +77,12 @@
 Entry backends must provide their own ``*.impl`` target that collects their
 entry implementation.
 
+In Bazel, this circular dependency is avoided by putting the backend's full
+implementation including the entry method into a separate override-able
+``entry_backend_impl`` library. When the entry facade is being used, the
+application should add a dependency on the
+``//pw_cpu_exception:entry_backend_impl`` label_flag.
+
 ------------
 Module Usage
 ------------
@@ -102,22 +117,20 @@
 mechanisms to capture CPU state for use by an application's exception handler,
 and allow recovery from CPU exceptions when possible.
 
-  * The entry backend should provide a definition for the
-    ``pw_cpu_exception_State`` object through
-    ``pw_cpu_exception_backend/state.h``.
-  * In GN, the entry backend should also provide a ``.impl`` suffixed form of
-    the entry backend target which collects the actual entry implementation to
-    avoid circular dependencies due to the state definition in the entry backend
-    target.
-  * The entry backend should implement the ``pw_cpu_exception_Entry()`` function
-    that will call ``pw_cpu_exception_HandleException()`` after performing any
-    necessary actions prior to handing control to the application's exception
-    handler (e.g. capturing necessary CPU state).
-  * If an application's exception handler backend modifies the captured CPU
-    state, the state should be treated as though it were the original state of
-    the CPU when the exception occurred. The backend may need to manually
-    restore some of the modified state to ensure this on exception handler
-    return.
+* The entry backend should provide a definition for the
+  ``pw_cpu_exception_State`` object through
+  ``pw_cpu_exception_backend/state.h``.
+* In GN, the entry backend should also provide a ``.impl`` suffixed form of the
+  entry backend target which collects the actual entry implementation to avoid
+  circular dependencies due to the state definition in the entry backend target.
+* The entry backend should implement the ``pw_cpu_exception_Entry()`` function
+  that will call ``pw_cpu_exception_HandleException()`` after performing any
+  necessary actions prior to handing control to the application's exception
+  handler (e.g. capturing necessary CPU state).
+* If an application's exception handler backend modifies the captured CPU state,
+  the state should be treated as though it were the original state of the CPU
+  when the exception occurred. The backend may need to manually restore some of
+  the modified state to ensure this on exception handler return.
 
 -------------
 Compatibility
@@ -128,5 +141,5 @@
 ------------
 Dependencies
 ------------
-  * ``pw_span``
-  * ``pw_preprocessor``
+- :ref:`module-pw_span`
+- :ref:`module-pw_preprocessor`
diff --git a/pw_cpu_exception_cortex_m/BUILD.bazel b/pw_cpu_exception_cortex_m/BUILD.bazel
index 968bc22..3880193 100644
--- a/pw_cpu_exception_cortex_m/BUILD.bazel
+++ b/pw_cpu_exception_cortex_m/BUILD.bazel
@@ -12,18 +12,28 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
 load(
     "//pw_build:pigweed.bzl",
     "pw_cc_library",
     "pw_cc_test",
 )
-load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
 load("//pw_protobuf_compiler:pw_proto_library.bzl", "pw_proto_library")
 
 package(default_visibility = ["//visibility:public"])
 
 licenses(["notice"])
 
+constraint_value(
+    name = "entry_backend",
+    constraint_setting = "//pw_cpu_exception:entry_constraint_setting",
+)
+
+constraint_value(
+    name = "support_backend",
+    constraint_setting = "//pw_cpu_exception:support_constraint_setting",
+)
+
 pw_cc_library(
     name = "config",
     hdrs = ["pw_cpu_exception_cortex_m_private/config.h"],
@@ -68,6 +78,7 @@
         ":cortex_m_constants",
         ":cpu_state",
         ":util",
+        "//pw_cpu_exception:support_facade",
         "//pw_log",
         "//pw_preprocessor",
         "//pw_preprocessor:cortex_m",
@@ -110,27 +121,35 @@
 
 pw_cc_library(
     name = "cpu_exception",
-    srcs = ["entry.cc"],
     hdrs = [
-        "public/pw_cpu_exception_cortex_m/cpu_state.h",
         "public_overrides/pw_cpu_exception_backend/state.h",
     ],
-    includes = ["public"],
+    includes = [
+        "public_overrides",
+    ],
     deps = [
-        ":config",
         ":cpu_state",
-        ":cortex_m_constants",
-        ":proto_dump",
-        ":support",
-        ":util",
-        # TODO(b/242183021): Need to add support for facades/backends to Bazel.
-        # "//pw_cpu_exception",
+        "//pw_cpu_exception:entry_facade",
         "//pw_preprocessor",
         "//pw_preprocessor:cortex_m",
     ],
 )
 
 pw_cc_library(
+    name = "cpu_exception_impl",
+    srcs = ["entry.cc"],
+    deps = [
+        ":config",
+        ":cortex_m_constants",
+        ":cpu_state",
+        ":util",
+        # Depend on the full backends in the impl target.
+        "//pw_cpu_exception:entry",
+        "//pw_cpu_exception:handler",
+    ],
+)
+
+pw_cc_library(
     name = "snapshot",
     srcs = ["snapshot.cc"],
     hdrs = ["public/pw_cpu_exception_cortex_m/snapshot.h"],
@@ -165,6 +184,7 @@
         ":config",
         ":cpu_exception",
         ":cpu_state",
+        "//pw_cpu_exception:handler",
     ],
 )
 
diff --git a/pw_cpu_exception_cortex_m/docs.rst b/pw_cpu_exception_cortex_m/docs.rst
index b8726b2..b52e17e 100644
--- a/pw_cpu_exception_cortex_m/docs.rst
+++ b/pw_cpu_exception_cortex_m/docs.rst
@@ -15,60 +15,63 @@
 There are a few ways to set up the Cortex M exception handler so the
 application's exception handler is properly called during an exception.
 
-**1. Use existing CMSIS functions**
-  Inside of CMSIS fault handler functions, branch to ``pw_cpu_exception_Entry``.
+1. Use existing CMSIS functions
+-------------------------------
+Inside of CMSIS fault handler functions, branch to ``pw_cpu_exception_Entry``.
 
-  .. code-block:: cpp
+.. code-block:: cpp
 
-    __attribute__((naked)) void HardFault_Handler(void) {
-    asm volatile(
-        " ldr r0, =pw_cpu_exception_Entry  \n"
-        " bx r0                            \n");
-    }
+   __attribute__((naked)) void HardFault_Handler(void) {
+   asm volatile(
+       " ldr r0, =pw_cpu_exception_Entry  \n"
+       " bx r0                            \n");
+   }
 
-**2. Modify a startup file**
-  Assembly startup files for some microcontrollers initialize the interrupt
-  vector table. The functions to call for fault handlers can be changed here.
-  For ARMv7-M and ARMv8-M, the fault handlers are indexes 3 to 6 of the
-  interrupt vector table. It's also may be helpful to redirect the NMI handler
-  to the entry function (if it's otherwise unused in your project).
+2. Modify a startup file
+------------------------
+Assembly startup files for some microcontrollers initialize the interrupt
+vector table. The functions to call for fault handlers can be changed here.
+For ARMv7-M and ARMv8-M, the fault handlers are indexes 3 to 6 of the
+interrupt vector table. It's also may be helpful to redirect the NMI handler
+to the entry function (if it's otherwise unused in your project).
 
-  Default:
+Default:
 
-  .. code-block:: cpp
+.. code-block:: cpp
 
-    __isr_vector_table:
-      .word  __stack_start
-      .word  Reset_Handler
-      .word  NMI_Handler
-      .word  HardFault_Handler
-      .word  MemManage_Handler
-      .word  BusFault_Handler
-      .word  UsageFault_Handler
+   __isr_vector_table:
+     .word  __stack_start
+     .word  Reset_Handler
+     .word  NMI_Handler
+     .word  HardFault_Handler
+     .word  MemManage_Handler
+     .word  BusFault_Handler
+     .word  UsageFault_Handler
 
-  Using CPU exception module:
+Using CPU exception module:
 
-  .. code-block:: cpp
+.. code-block:: cpp
 
-    __isr_vector_table:
-      .word  __stack_start
-      .word  Reset_Handler
-      .word  pw_cpu_exception_Entry
-      .word  pw_cpu_exception_Entry
-      .word  pw_cpu_exception_Entry
-      .word  pw_cpu_exception_Entry
-      .word  pw_cpu_exception_Entry
+   __isr_vector_table:
+     .word  __stack_start
+     .word  Reset_Handler
+     .word  pw_cpu_exception_Entry
+     .word  pw_cpu_exception_Entry
+     .word  pw_cpu_exception_Entry
+     .word  pw_cpu_exception_Entry
+     .word  pw_cpu_exception_Entry
 
-  Note: ``__isr_vector_table`` and ``__stack_start`` are example names, and may
-  vary by platform. See your platform's assembly startup script.
+Note: ``__isr_vector_table`` and ``__stack_start`` are example names, and may
+vary by platform. See your platform's assembly startup script.
 
-**3. Modify interrupt vector table at runtime**
-  Some applications may choose to modify their interrupt vector tables at
-  runtime. The exception handler works with this use case (see the
-  exception_entry_test integration test), but keep in mind that your
-  application's exception handler will not be entered if an exception occurs
-  before the vector table entries are updated to point to
-  ``pw_cpu_exception_Entry``.
+3. Modify interrupt vector table at runtime
+-------------------------------------------
+Some applications may choose to modify their interrupt vector tables at
+runtime. The exception handler works with this use case (see the
+exception_entry_test integration test), but keep in mind that your
+application's exception handler will not be entered if an exception occurs
+before the vector table entries are updated to point to
+``pw_cpu_exception_Entry``.
 
 Module Usage
 ============
@@ -115,7 +118,6 @@
 
 Configuration Options
 =====================
-
 - ``PW_CPU_EXCEPTION_CORTEX_M_EXTENDED_CFSR_DUMP``: Enable extended logging in
   ``pw::cpu_exception::LogCpuState()`` that dumps the active CFSR fields with
   help strings. This is disabled by default since it increases the binary size
@@ -139,30 +141,30 @@
 
 For example:
 
-  .. code-block::
+.. code-block::
 
-    $ python -m pw_cpu_exception_cortex_m.cfsr_decoder 0x00010100
-    20210412 15:11:14 INF Exception caused by a usage fault, bus fault.
+   $ python -m pw_cpu_exception_cortex_m.cfsr_decoder 0x00010100
+   20210412 15:11:14 INF Exception caused by a usage fault, bus fault.
 
-    Active Crash Fault Status Register (CFSR) fields:
-    IBUSERR     Instruction bus error.
-        The processor attempted to issue an invalid instruction. It
-        detects the instruction bus error on prefecting, but this
-        flag is only set to 1 if it attempts to issue the faulting
-        instruction. When this bit is set, the processor has not
-        written a fault address to the BFAR.
-    UNDEFINSTR  Encountered invalid instruction.
-        The processor has attempted to execute an undefined
-        instruction. When this bit is set to 1, the PC value stacked
-        for the exception return points to the undefined instruction.
-        An undefined instruction is an instruction that the processor
-        cannot decode.
+   Active Crash Fault Status Register (CFSR) fields:
+   IBUSERR     Instruction bus error.
+       The processor attempted to issue an invalid instruction. It
+       detects the instruction bus error on prefecting, but this
+       flag is only set to 1 if it attempts to issue the faulting
+       instruction. When this bit is set, the processor has not
+       written a fault address to the BFAR.
+   UNDEFINSTR  Encountered invalid instruction.
+       The processor has attempted to execute an undefined
+       instruction. When this bit is set to 1, the PC value stacked
+       for the exception return points to the undefined instruction.
+       An undefined instruction is an instruction that the processor
+       cannot decode.
 
-    All registers:
-    cfsr       0x00010100
+   All registers:
+   cfsr       0x00010100
 
 .. note::
-  The CFSR is not supported on ARMv6-M CPUs (Cortex M0, M0+, M1).
+   The CFSR is not supported on ARMv6-M CPUs (Cortex M0, M0+, M1).
 
 --------------------
 Snapshot integration
@@ -186,10 +188,10 @@
 information is captured by a ``pw::thread::Thread`` protobuf encoder.
 
 .. note::
-  We recommend providing the ``pw_cpu_exception_State``, for example through
-  ``pw_cpu_exception_DefaultHandler()`` instead of using the current running
-  context to capture the main stack to minimize how much of the snapshot
-  handling is captured in the stack.
+   We recommend providing the ``pw_cpu_exception_State``, for example through
+   ``pw_cpu_exception_DefaultHandler()`` instead of using the current running
+   context to capture the main stack to minimize how much of the snapshot
+   handling is captured in the stack.
 
 Python processor
 ================
@@ -199,43 +201,43 @@
 
 .. code-block::
 
-  Exception caused by a usage fault.
+   Exception caused by a usage fault.
 
-  Active Crash Fault Status Register (CFSR) fields:
-  UNDEFINSTR  Undefined Instruction UsageFault.
-      The processor has attempted to execute an undefined
-      instruction. When this bit is set to 1, the PC value stacked
-      for the exception return points to the undefined instruction.
-      An undefined instruction is an instruction that the processor
-      cannot decode.
+   Active Crash Fault Status Register (CFSR) fields:
+   UNDEFINSTR  Undefined Instruction UsageFault.
+       The processor has attempted to execute an undefined
+       instruction. When this bit is set to 1, the PC value stacked
+       for the exception return points to the undefined instruction.
+       An undefined instruction is an instruction that the processor
+       cannot decode.
 
-  All registers:
-  pc         0x0800e1c4 example::Service::Crash(_example_service_CrashRequest const&, _pw_protobuf_Empty&) (src/example_service/service.cc:131)
-  lr         0x0800e141 example::Service::Crash(_example_service_CrashRequest const&, _pw_protobuf_Empty&) (src/example_service/service.cc:128)
-  psr        0x81000000
-  msp        0x20040fd8
-  psp        0x20001488
-  exc_return 0xffffffed
-  cfsr       0x00010000
-  mmfar      0xe000ed34
-  bfar       0xe000ed38
-  icsr       0x00000803
-  hfsr       0x40000000
-  shcsr      0x00000000
-  control    0x00000000
-  r0         0xe03f7847
-  r1         0x714083dc
-  r2         0x0b36dc49
-  r3         0x7fbfbe1a
-  r4         0xc36e8efb
-  r5         0x69a14b13
-  r6         0x0ec35eaa
-  r7         0xa5df5543
-  r8         0xc892b931
-  r9         0xa2372c94
-  r10        0xbd15c968
-  r11        0x759b95ab
-  r12        0x00000000
+   All registers:
+   pc         0x0800e1c4 example::Service::Crash(_example_service_CrashRequest const&, _pw_protobuf_Empty&) (src/example_service/service.cc:131)
+   lr         0x0800e141 example::Service::Crash(_example_service_CrashRequest const&, _pw_protobuf_Empty&) (src/example_service/service.cc:128)
+   psr        0x81000000
+   msp        0x20040fd8
+   psp        0x20001488
+   exc_return 0xffffffed
+   cfsr       0x00010000
+   mmfar      0xe000ed34
+   bfar       0xe000ed38
+   icsr       0x00000803
+   hfsr       0x40000000
+   shcsr      0x00000000
+   control    0x00000000
+   r0         0xe03f7847
+   r1         0x714083dc
+   r2         0x0b36dc49
+   r3         0x7fbfbe1a
+   r4         0xc36e8efb
+   r5         0x69a14b13
+   r6         0x0ec35eaa
+   r7         0xa5df5543
+   r8         0xc892b931
+   r9         0xa2372c94
+   r10        0xbd15c968
+   r11        0x759b95ab
+   r12        0x00000000
 
 Module Configuration Options
 ============================
@@ -246,17 +248,17 @@
 
 .. c:macro:: PW_CPU_EXCEPTION_CORTEX_M_LOG_LEVEL
 
-  The log level to use for this module. Logs below this level are omitted.
+   The log level to use for this module. Logs below this level are omitted.
 
-  This defaults to ``PW_LOG_LEVEL_DEBUG``.
+   This defaults to ``PW_LOG_LEVEL_DEBUG``.
 
 .. c:macro:: PW_CPU_EXCEPTION_CORTEX_M_EXTENDED_CFSR_DUMP
 
-  Enables extended logging in pw::cpu_exception::LogCpuState() and
-  pw::cpu_exception::cortex_m::LogExceptionAnalysis() that dumps the active
-  CFSR fields with help strings. This is disabled by default since it
-  increases the binary size by >1.5KB when using plain-text logs, or ~460
-  Bytes when using tokenized logging. It's useful to enable this for device
-  bringup until your application has an end-to-end crash reporting solution.
+   Enables extended logging in pw::cpu_exception::LogCpuState() and
+   pw::cpu_exception::cortex_m::LogExceptionAnalysis() that dumps the active
+   CFSR fields with help strings. This is disabled by default since it
+   increases the binary size by >1.5KB when using plain-text logs, or ~460
+   Bytes when using tokenized logging. It's useful to enable this for device
+   bringup until your application has an end-to-end crash reporting solution.
 
-  This is disabled by default.
+   This is disabled by default.
diff --git a/pw_crypto/docs.rst b/pw_crypto/docs.rst
index abe5381..db3b0c2 100644
--- a/pw_crypto/docs.rst
+++ b/pw_crypto/docs.rst
@@ -1,5 +1,6 @@
 .. _module-pw_crypto:
 
+=========
 pw_crypto
 =========
 A set of safe (read: easy to use, hard to misuse) crypto APIs.
@@ -10,6 +11,7 @@
 2. Verifying a digital signature signed with `ECDSA`_ over the NIST P256 curve.
 3. Many more to come ...
 
+------
 SHA256
 ------
 
@@ -43,6 +45,7 @@
     // Handle errors.
   }
 
+-----
 ECDSA
 -----
 
@@ -82,6 +85,7 @@
       // Handle errors.
   }
 
+-------------
 Configuration
 -------------
 
@@ -89,7 +93,7 @@
 crypto libraries.
 
 Mbed TLS
-^^^^^^^^
+========
 
 The `Mbed TLS project <https://www.trustedfirmware.org/projects/mbed-tls/>`_
 is a mature and full-featured crypto library that implements cryptographic
@@ -159,7 +163,7 @@
    #define MBEDTLS_ECP_DP_SECP256R1_ENABLED
 
 Micro ECC
-^^^^^^^^^
+=========
 
 To select Micro ECC, the library needs to be installed and configured.
 
@@ -180,6 +184,7 @@
 
 Note Micro-ECC does not implement any hashing functions, so you will need to use other backends for SHA256 functionality if needed.
 
+------------
 Size Reports
 ------------
 
@@ -187,3 +192,14 @@
 configurations.
 
 .. include:: size_report
+
+-------------
+API reference
+-------------
+.. doxygenfunction:: pw::crypto::ecdsa::VerifyP256Signature(ConstByteSpan public_key, ConstByteSpan digest, ConstByteSpan signature)
+.. doxygenfunction:: pw::crypto::sha256::Hash(ConstByteSpan message, ByteSpan out_digest)
+.. doxygenfunction:: pw::crypto::sha256::Hash(stream::Reader& reader, ByteSpan out_digest)
+.. doxygenvariable:: pw::crypto::sha256::kDigestSizeBytes
+.. doxygenfunction:: pw::crypto::sha256::Sha256::Final(ByteSpan out_digest)
+.. doxygenfunction:: pw::crypto::sha256::Sha256::Update(ConstByteSpan data)
+.. doxygenenum::     pw::crypto::sha256::Sha256State
diff --git a/pw_crypto/public/pw_crypto/ecdsa.h b/pw_crypto/public/pw_crypto/ecdsa.h
index 3aa3766..38326db 100644
--- a/pw_crypto/public/pw_crypto/ecdsa.h
+++ b/pw_crypto/public/pw_crypto/ecdsa.h
@@ -19,18 +19,35 @@
 
 namespace pw::crypto::ecdsa {
 
-// VerifyP256Signature verifies the `signature` of `digest` using `public_key`.
-//
-// `public_key` is a byte string in SEC 1 uncompressed form (0x04||X||Y), which
-// is exactly 65 bytes. Compressed forms (02/03||X) *may* not be supported
-// by some backends, e.g. Mbed TLS.
-//
-// `digest` is a raw byte string, truncated to 32 bytes.
-//
-// `signature` is a raw byte string (r||s) of exactly 64 bytes.
-//
-// Returns Status::OkStatus() for a successful verification, or an error Status
-// otherwise.
+/// Verifies the `signature` of `digest` using `public_key`.
+///
+/// Example:
+///
+/// @code{.cpp}
+/// #include "pw_crypto/sha256.h"
+///
+/// // Verify a digital signature signed with ECDSA over the NIST P256 curve.
+/// std::byte digest[32];
+/// if (!pw::crypto::sha256::Hash(message, digest).ok()) {
+///     // handle errors.
+/// }
+///
+/// if (!pw::crypto::ecdsa::VerifyP256Signature(public_key, digest,
+///                                             signature).ok()) {
+///     // handle errors.
+/// }
+/// @endcode
+///
+/// @param[in] public_key A byte string in SEC 1 uncompressed form
+/// ``(0x04||X||Y)``, which is exactly 65 bytes. Compressed forms
+/// ``(02/03||X)`` *may* not be supported by some backends, e.g. Mbed TLS.
+///
+/// @param[in] digest A raw byte string, truncated to 32 bytes.
+///
+/// @param[in] signature A raw byte string ``(r||s)`` of exactly 64 bytes.
+///
+/// @returns @pw_status{OK} for a successful verification, or an error
+/// ``Status`` otherwise.
 Status VerifyP256Signature(ConstByteSpan public_key,
                            ConstByteSpan digest,
                            ConstByteSpan signature);
diff --git a/pw_crypto/public/pw_crypto/sha256.h b/pw_crypto/public/pw_crypto/sha256.h
index 1389e28..d2e7d97 100644
--- a/pw_crypto/public/pw_crypto/sha256.h
+++ b/pw_crypto/public/pw_crypto/sha256.h
@@ -25,19 +25,19 @@
 
 namespace pw::crypto::sha256 {
 
-// Size in bytes of a SHA256 digest.
+/// The size of a SHA256 digest in bytes.
 constexpr uint32_t kDigestSizeBytes = 32;
 
-// State machine of a hashing session.
+/// A state machine of a hashing session.
 enum class Sha256State {
-  // Initialized and accepting input (via Update()).
+  /// Initialized and accepting input (via `Update()`).
   kReady = 1,
 
-  // Finalized by Final(). Any additional requests, Update() or Final(), will
-  // trigger a transition to kError.
+  /// Finalized by `Final()`. Any additional requests to `Update()` or `Final()`
+  /// will trigger a transition to `kError`.
   kFinalized = 2,
 
-  // In an unrecoverable error state.
+  /// In an unrecoverable error state.
   kError = 3,
 };
 
@@ -50,14 +50,16 @@
 
 }  // namespace backend
 
-// Sha256 computes the SHA256 digest of potentially long, non-contiguous input
-// messages.
-//
-// Usage:
-//
-// if (!Sha256().Update(message).Update(more_message).Final(out_digest).ok()) {
-//   // Error handling.
-// }
+/// Computes the SHA256 digest of potentially long, non-contiguous input
+/// messages.
+///
+/// Usage:
+///
+/// @code{.cpp}
+/// if (!Sha256().Update(message).Update(more_message).Final(out_digest).ok()) {
+///     // Error handling.
+/// }
+/// @endcode
 class Sha256 {
  public:
   Sha256() {
@@ -70,8 +72,8 @@
     state_ = Sha256State::kReady;
   }
 
-  // Update feeds `data` to the running hasher. The feeding can involve zero
-  // or more `Update()` calls and the order matters.
+  /// Feeds `data` to the running hasher. The feeding can involve zero
+  /// or more `Update()` calls and the order matters.
   Sha256& Update(ConstByteSpan data) {
     if (state_ != Sha256State::kReady) {
       PW_LOG_DEBUG("The backend is not ready/initialized");
@@ -87,14 +89,14 @@
     return *this;
   }
 
-  // Final wraps up the hashing session and outputs the final digest in the
-  // first `kDigestSizeBytes` of `out_digest`. `out_digest` must be at least
-  // `kDigestSizeBytes` long.
-  //
-  // Final locks down the Sha256 instance from any additional use.
-  //
-  // Any error, including those occurr inside `Init()` or `Update()` will be
-  // reflected in the return value of Final();
+  /// Finishes the hashing session and outputs the final digest in the
+  /// first `kDigestSizeBytes` of `out_digest`. `out_digest` must be at least
+  /// `kDigestSizeBytes` long.
+  ///
+  /// `Final()` locks down the `Sha256` instance from any additional use.
+  ///
+  /// Any error, including those occurring inside the constructor or `Update()`
+  /// will be reflected in the return value of `Final()`.
   Status Final(ByteSpan out_digest) {
     if (out_digest.size() < kDigestSizeBytes) {
       PW_LOG_DEBUG("Digest output buffer is too small");
@@ -125,8 +127,38 @@
   backend::NativeSha256Context native_ctx_;
 };
 
-// Hash calculates the SHA256 digest of `message` and stores the result
-// in `out_digest`. `out_digest` must be at least `kDigestSizeBytes` long.
+/// Calculates the SHA256 digest of `message` and stores the result
+/// in `out_digest`. `out_digest` must be at least `kDigestSizeBytes` long.
+///
+/// One-shot digest example:
+///
+/// @code{.cpp}
+/// #include "pw_crypto/sha256.h"
+///
+/// std::byte digest[32];
+/// if (!pw::crypto::sha256::Hash(message, digest).ok()) {
+///     // Handle errors.
+/// }
+///
+/// // The content can also come from a pw::stream::Reader.
+/// if (!pw::crypto::sha256::Hash(reader, digest).ok()) {
+///     // Handle errors.
+/// }
+/// @endcode
+///
+/// Long, potentially non-contiguous message example:
+///
+/// @code{.cpp}
+/// #include "pw_crypto/sha256.h"
+///
+/// std::byte digest[32];
+///
+/// if (!pw::crypto::sha256::Sha256()
+///     .Update(chunk1).Update(chunk2).Update(chunk...)
+///     .Final().ok()) {
+///     // Handle errors.
+/// }
+/// @endcode
 inline Status Hash(ConstByteSpan message, ByteSpan out_digest) {
   return Sha256().Update(message).Final(out_digest);
 }
diff --git a/pw_docgen/docs.gni b/pw_docgen/docs.gni
index 7f3b3fc..ed157f9 100644
--- a/pw_docgen/docs.gni
+++ b/pw_docgen/docs.gni
@@ -23,6 +23,9 @@
 
   # Set to enable Google Analytics tracking of generated docs.
   pw_docgen_GOOGLE_ANALYTICS_ID = ""
+
+  # Set to define the number of parallel threads to use during the Sphinx build.
+  pw_docgen_THREADS = ""
 }
 
 # Defines a group of documentation files and assets.
@@ -127,6 +130,14 @@
         ]
       }
 
+      # Override the default number of threads for the Sphinx build.
+      if (pw_docgen_THREADS != "") {
+        args += [
+          "-j",
+          pw_docgen_THREADS,
+        ]
+      }
+
       # Metadata JSON file path.
       args += [ "--metadata" ] +
               rebase_path(get_target_outputs(":$_metadata_file_target"),
diff --git a/pw_docgen/docs.rst b/pw_docgen/docs.rst
index 80b996a..1361503 100644
--- a/pw_docgen/docs.rst
+++ b/pw_docgen/docs.rst
@@ -77,14 +77,14 @@
 
 **Example**
 
-.. code::
+.. code-block::
 
-  pw_doc_group("my_doc_group") {
-    sources = [ "docs.rst" ]
-    inputs = [ "face-with-tears-of-joy-emoji.svg" ]
-    group_deps = [ ":sub_doc_group" ]
-    report_deps = [ ":my_size_report" ]
-  }
+   pw_doc_group("my_doc_group") {
+     sources = [ "docs.rst" ]
+     inputs = [ "face-with-tears-of-joy-emoji.svg" ]
+     group_deps = [ ":sub_doc_group" ]
+     report_deps = [ ":my_size_report" ]
+   }
 
 pw_doc_gen
 __________
@@ -109,16 +109,16 @@
 
 **Example**
 
-.. code::
+.. code-block::
 
-  pw_doc_gen("my_docs") {
-    conf = "//my_docs/conf.py"
-    index = "//my_docs/index.rst"
-    output_directory = target_gen_dir
-    deps = [
-      "//my_module:my_doc_group",
-    ]
-  }
+   pw_doc_gen("my_docs") {
+     conf = "//my_docs/conf.py"
+     index = "//my_docs/index.rst"
+     output_directory = target_gen_dir
+     deps = [
+       "//my_module:my_doc_group",
+     ]
+   }
 
 Generating Documentation
 ------------------------
@@ -129,18 +129,18 @@
 
 Consider the following target in ``$dir_pigweed/docs/BUILD.gn``:
 
-.. code::
+.. code-block::
 
-  pw_doc_gen("docs") {
-    conf = "conf.py"
-    index = "index.rst"
-    output_directory = target_gen_dir
-    deps = [
-      "$dir_pw_bloat:docs",
-      "$dir_pw_docgen:docs",
-      "$dir_pw_preprocessor:docs",
-    ]
-  }
+   pw_doc_gen("docs") {
+     conf = "conf.py"
+     index = "index.rst"
+     output_directory = target_gen_dir
+     deps = [
+       "$dir_pw_bloat:docs",
+       "$dir_pw_docgen:docs",
+       "$dir_pw_preprocessor:docs",
+     ]
+   }
 
 A documentation tree is created under the output directory. Each of the sources
 and inputs in the target's dependency graph is copied under this tree in the
@@ -148,19 +148,19 @@
 (``$dir_pigweed`` in this case). The ``conf.py`` and ``index.rst`` provided
 directly to the ``pw_doc_gen`` template are copied in at the root of the tree.
 
-.. code::
+.. code-block::
 
-  out/gen/docs/pw_docgen_tree/
-  ├── conf.py
-  ├── index.rst
-  ├── pw_bloat
-  │   ├── bloat.rst
-  │   └── examples
-  │       └── simple_bloat.rst
-  ├── pw_docgen
-  │   └── docgen.rst
-  └── pw_preprocessor
-      └── docs.rst
+   out/gen/docs/pw_docgen_tree/
+   ├── conf.py
+   ├── index.rst
+   ├── pw_bloat
+   │   ├── bloat.rst
+   │   └── examples
+   │       └── simple_bloat.rst
+   ├── pw_docgen
+   │   └── docgen.rst
+   └── pw_preprocessor
+       └── docs.rst
 
 This is the documentation tree which gets passed to Sphinx to build HTML output.
 Imports within documentation files must be relative to this structure. In
@@ -230,20 +230,20 @@
 
 #. Set a breakpoint in your extension code:
 
-   .. code::
+   .. code-block::
 
       breakpoint()
 
 #. Build ``python.install`` to install the code change into the bootstrap venv
    (``environment/pigweed-venv/lib/python3.8/site-packages/pw_docgen``):
 
-   .. code::
+   .. code-block::
 
       ninja -C out python.install
 
 #. Manually invoke Sphinx to build the docs and trigger your breakpoint:
 
-   .. code::
+   .. code-block::
 
       cd out
       sphinx-build -W -b html -d docs/gen/docs/help docs/gen/docs/pw_docgen_tree docs/gen/docs/html -v -v -v
diff --git a/pw_docgen/py/pw_docgen/docgen.py b/pw_docgen/py/pw_docgen/docgen.py
index 764717d..331fac5 100644
--- a/pw_docgen/py/pw_docgen/docgen.py
+++ b/pw_docgen/py/pw_docgen/docgen.py
@@ -50,6 +50,13 @@
         '--conf', required=True, help='Path to conf.py file for Sphinx'
     )
     parser.add_argument(
+        '-j',
+        '--parallel',
+        type=int,
+        default=os.cpu_count(),
+        help='Number of parallel processes to run',
+    )
+    parser.add_argument(
         '--gn-root', required=True, help='Root of the GN build tree'
     )
     parser.add_argument(
@@ -78,13 +85,25 @@
 
 
 def build_docs(
-    src_dir: str, dst_dir: str, google_analytics_id: Optional[str] = None
+    src_dir: str,
+    dst_dir: str,
+    parallel: int,
+    google_analytics_id: Optional[str] = None,
 ) -> int:
     """Runs Sphinx to render HTML documentation from a doc tree."""
 
     # TODO(frolv): Specify the Sphinx script from a prebuilts path instead of
     # requiring it in the tree.
-    command = ['sphinx-build', '-W', '-b', 'html', '-d', f'{dst_dir}/help']
+    command = [
+        'sphinx-build',
+        '-W',
+        '-j',
+        str(parallel),
+        '-b',
+        'html',
+        '-d',
+        f'{dst_dir}/help',
+    ]
 
     if google_analytics_id is not None:
         command.append(f'-Dgoogle_analytics_id={google_analytics_id}')
@@ -147,7 +166,10 @@
     print('-' * 80, flush=True)
 
     return build_docs(
-        args.sphinx_build_dir, args.out_dir, args.google_analytics_id
+        args.sphinx_build_dir,
+        args.out_dir,
+        args.parallel,
+        args.google_analytics_id,
     )
 
 
diff --git a/pw_docgen/py/pw_docgen/sphinx/module_metadata.py b/pw_docgen/py/pw_docgen/sphinx/module_metadata.py
index 1cb1b9b..fad1736 100644
--- a/pw_docgen/py/pw_docgen/sphinx/module_metadata.py
+++ b/pw_docgen/py/pw_docgen/sphinx/module_metadata.py
@@ -114,7 +114,6 @@
 
 
 def create_topnav(
-    title: str,
     subtitle: str,
     extra_classes: Optional[List[str]] = None,
 ) -> nodes.Node:
@@ -125,15 +124,6 @@
     )
 
     topnav_container = nodes.container(classes=topnav_classes)
-    topnav_inline_container = nodes.container(classes=['pw-topnav-inline'])
-    topnav_container += topnav_inline_container
-
-    title_node = nodes.paragraph(
-        classes=['pw-topnav-title'],
-        text=title,
-    )
-
-    topnav_inline_container += title_node
 
     subtitle_node = nodes.paragraph(
         classes=['pw-topnav-subtitle'],
@@ -225,7 +215,6 @@
         )
 
         topbar = create_topnav(
-            module_name,
             tagline,
             ['pw-module-index'],
         )
@@ -246,11 +235,9 @@
     }
 
     def run(self) -> List[nodes.Node]:
-        module_name = self._try_get_option('name')
         tagline = self._try_get_option('tagline')
 
         topbar = create_topnav(
-            module_name,
             tagline,
             ['pw-module-subpage'],
         )
@@ -361,7 +348,6 @@
 def setup(app: SphinxApplication):
     app.add_directive('pigweed-module', PigweedModuleDirective)
     app.add_directive('pigweed-module-subpage', PigweedModuleSubpageDirective)
-    app.connect('html-page-context', setup_parse_body)
 
     return {
         'parallel_read_safe': True,
diff --git a/pw_docgen/py/setup.cfg b/pw_docgen/py/setup.cfg
index 41f7505..c139c96 100644
--- a/pw_docgen/py/setup.cfg
+++ b/pw_docgen/py/setup.cfg
@@ -24,7 +24,6 @@
 install_requires =
     sphinx>=5.3.0
     sphinx-argparse
-    sphinx-rtd-theme
     sphinxcontrib-mermaid>=0.7.1
     sphinx-design>=0.3.0
 
diff --git a/pw_doctor/docs.rst b/pw_doctor/docs.rst
index 1179984..bc0ad58 100644
--- a/pw_doctor/docs.rst
+++ b/pw_doctor/docs.rst
@@ -49,3 +49,4 @@
        }
      }
    }
+
diff --git a/pw_env_setup/BUILD.gn b/pw_env_setup/BUILD.gn
index e806416..583a539 100644
--- a/pw_env_setup/BUILD.gn
+++ b/pw_env_setup/BUILD.gn
@@ -43,6 +43,7 @@
     "$dir_pw_doctor/py",
     "$dir_pw_env_setup/py",
     "$dir_pw_hdlc/py",
+    "$dir_pw_i2c:protos.python",
     "$dir_pw_ide/py",
     "$dir_pw_log:protos.python",
     "$dir_pw_log/py",
diff --git a/pw_env_setup/docs.rst b/pw_env_setup/docs.rst
index e8b75c2..1cb4fff 100644
--- a/pw_env_setup/docs.rst
+++ b/pw_env_setup/docs.rst
@@ -102,7 +102,7 @@
 It is possible to pull in a CIPD dependency into Bazel using WORKSPACE rules
 rather than using `bootstrap.sh`. e.g.
 
-.. code:: python
+.. code-block:: python
 
    # WORKSPACE
 
@@ -122,7 +122,7 @@
 project. Though these repositories will only be donwloaded if you use them. To
 get a full list of the remote repositories that this configures, run:
 
-.. code:: sh
+.. code-block:: sh
 
    bazel query //external:all | grep cipd_
 
@@ -133,7 +133,7 @@
 From here it is possible to get access to the Bloaty binaries using the
 following command. For example;
 
-.. code:: sh
+.. code-block:: sh
 
    bazel run @cipd_pigweed_third_party_bloaty_embedded_linux_amd64//:bloaty \
     -- --help
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/.cipd_version b/pw_env_setup/py/pw_env_setup/cipd_setup/.cipd_version
index fadfd87..f85993d 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/.cipd_version
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/.cipd_version
@@ -1 +1 @@
-git_revision:66a1c73609268be7b0cbfc0054856abd97f1eeef
+git_revision:0f08b927516afe56fec88b3472088deddc5b6a89
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/.cipd_version.digests b/pw_env_setup/py/pw_env_setup/cipd_setup/.cipd_version.digests
index aa7dc56..658aaf7 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/.cipd_version.digests
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/.cipd_version.digests
@@ -1,25 +1,34 @@
 # This file was generated by
 #
 #  cipd selfupdate-roll -version-file .cipd_version \
-#      -version git_revision:66a1c73609268be7b0cbfc0054856abd97f1eeef
+#      -version git_revision:0f08b927516afe56fec88b3472088deddc5b6a89
 #
 # Do not modify manually. All changes will be overwritten.
 # Use 'cipd selfupdate-roll ...' to modify.
 
-aix-ppc64       sha256  9e19cf9e02d58023b084f1dd9a78568e7a0ea6daf960f98b38e0be0427e7169b
-linux-386       sha256  7999d06dd646098518421fc6ec789216a2f3702b445c24f07b82ade0c87bc16e
-linux-amd64     sha256  cb1055e41a0bcdd40e8efadf2b1476e4787ddfd293ae4bd1685f4fe7c5a36b4b
-linux-arm64     sha256  a944396cb469cd012d84633d0be3d2e2584de9a7cd164284687603e51b7e3f88
-linux-armv6l    sha256  3752c42be0db8f8838c87ba563af42488bc505c74fc3a11e3e1f1891750ad0d2
-linux-mips64    sha256  75a87994df462800a114ce78c17cef6656710ac5718717917be2294e9983c1f6
-linux-mips64le  sha256  5b9f4dc35a9059dc72c6f05781f3d6c2ec8346c141dfa1eef21e5fff564c2eee
-linux-mipsle    sha256  5ea52c648def87ad4b7c096b9e3f52dd2640b7b0dc3a0967aace09f054bd2f62
-linux-ppc64     sha256  3b50a9ddce62e3a8552f7d064984a159b91c4c79f35310334c44e0e5821c101c
-linux-ppc64le   sha256  9a3fb1b8108dc172e5f7dee3cd8b7ea7a86402e4e3c6129ee4e763efc5166529
-linux-riscv64   sha256  c5f14e494bb0f76b97ee5e2bd8a40a617b2c300dcdb80359d91a7c81e67deaa7
-linux-s390x     sha256  6b4e29beb2d7fa5379b3469e28eece7a5f2e8c76e743a5aaa006dc9395427743
-mac-amd64       sha256  42fbb1659fed5183d77b83a72070bb8a31838d5499128ef8b6d52758a7e00bc3
-mac-arm64       sha256  eb7953aa5aee1035c349c18011095d7a76b96f224777d3dc3d830b04568362e7
-windows-386     sha256  3f046002827e787acb45204efb89e4f3b2a71557fa90ec982b189826acb35f9f
-windows-amd64   sha256  7a911522b7eba230c8468defe3981568bb62e4c2028015ba7905e842a980279c
-windows-arm64   sha256  07003942b142281d7cbf3a0f7c0431d4f234ea2ee9879abbd9d6ee37e88c6a7e
+aix-ppc64       sha256  8ffe6d0beffcc6889e84e11ca047d5b140421a764ca7d3fdfb66b1fb0862fc05
+freebsd-amd64   sha256  5e678bbde4da25328d7160131b7abdcee33d8b38cd50bcb38a5db68ce8ec8f54
+freebsd-arm64   sha256  7d872c2ef4f345304392cc986f741df4a35acc4f273f329fc5554c31d3b3711e
+illumos-amd64   sha256  3160a4d72603d8e7bf053c4def90e8fcf5d3ecce3bfaedaf8f9d42b66fa1cc85
+linux-386       sha256  35efb66f86e42fcbaf6963b56cd0acbd930f8ac1b4b328b6a40abe16e962df04
+linux-amd64     sha256  5df8080af532ca5b446e5b49999cf15e57b701b944995be5913c088af795a51b
+linux-arm64     sha256  0b722b3cc478ccd471a173d620cdf7f5d7c03504f2fbe8fa2649e7d02d610e8d
+linux-armv6l    sha256  fdb7ebebb78feca8b96b254bbcea75129e5c69037c07f85e240c306a6aafe126
+linux-mips      sha256  8b691d8de7449662ab7f7834eac4e78dfd6283c75df65f4762da1f8a6b58b69f
+linux-mips64    sha256  7a42fc00100fdf0f72fda53e4dfba904baf2e8f7232660efa49c027d03bfcfac
+linux-mips64le  sha256  015fb514ea326a306955ce561061f2f82b31c337454b68255efb58f3f3428293
+linux-mipsle    sha256  95181fed5eac512afcd5ef617911b27a0e9b0fc3ac3af7f65cdc4b6086ea719c
+linux-ppc64     sha256  1b88edfe135bce16dd22c4b609a00f1491b23d6b91da4c7b9053572577ce5a15
+linux-ppc64le   sha256  7e45ee956ac243b79acaa63fe9a85bcd23831e68432b53c9599ca2a5f6ec93ec
+linux-riscv64   sha256  5409d012d5c184878d4aef27cd293e36c4651ef249992f79233357117aa73f18
+linux-s390x     sha256  862eb6e6e4bf1e6d4aa5447420c8a0126eddf31593705fb37a800bf130a9c0a4
+mac-amd64       sha256  fa76505e7017b04ee87b98d52e0714429502145a9f9b9be4c9ee963057380d22
+mac-arm64       sha256  fcfca8d51b62f82bb6a1d51dffc6be9ec8d16ed568c206d149256191190b5b2a
+netbsd-amd64    sha256  8cf814eed27c7bab079b8ee5a94d4d2cc39588d1706756095e89aa1866d191fb
+netbsd-arm64    sha256  2ac6d65599e2a09a65ebf00282babffc4ec3a9d87a77318860754c21e0298c52
+openbsd-amd64   sha256  81c38db1ef03ba8f607926e275971cec4278aed497ee52efae126e9d50ed5694
+openbsd-arm64   sha256  57af9a8c4628478f4b6d1dc497420a000b04f8836d3929ae50d3ff2fa93a03ec
+solaris-amd64   sha256  458d45236f5a671f2425636cbd98391063d21e82ef80ec3c4101ad6b6c583ce3
+windows-386     sha256  aef697c4e94e01593e9b6bd4a25b6ebfa148a733eac50a90d21313c012d7a1b8
+windows-amd64   sha256  0a241893653b1afd203b35c23a417ec01a4168837f3ab952ea417e5351593f99
+windows-arm64   sha256  9574fd143b602a647aa8ebf2db138d7f69fc3aa4a6eedea7f68309659e963693
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/buildifier.json b/pw_env_setup/py/pw_env_setup/cipd_setup/buildifier.json
index dcd09de..cfe2766 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/buildifier.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/buildifier.json
@@ -10,7 +10,7 @@
         "windows-amd64"
       ],
       "tags": [
-        "version:2@v6.1.2"
+        "version:2@v6.3.3"
       ]
     }
   ]
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/clang_next.json b/pw_env_setup/py/pw_env_setup/cipd_setup/clang_next.json
index f948b59..9b04af9 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/clang_next.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/clang_next.json
@@ -10,7 +10,7 @@
         "windows-amd64"
       ],
       "tags": [
-        "git_revision:020d2fb7711d70e296f19d83565f8d93d2cfda71"
+        "git_revision:576b184d6e3b633f51b908b61ebd281d2ecbf66f"
       ],
       "version_file": ".versions/clang.cipd_version"
     }
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/cmake.json b/pw_env_setup/py/pw_env_setup/cipd_setup/cmake.json
index 96367e8..3ae2501 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/cmake.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/cmake.json
@@ -13,7 +13,7 @@
         "windows-amd64"
       ],
       "tags": [
-        "version:2@3.27.2.chromium.8"
+        "version:2@3.27.4.chromium.8"
       ]
     }
   ]
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/coverage.json b/pw_env_setup/py/pw_env_setup/cipd_setup/coverage.json
index f7d773a..337b136 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/coverage.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/coverage.json
@@ -6,7 +6,7 @@
         "linux-amd64"
       ],
       "tags": [
-        "g3-revision:fuchsia.infra.coverage.upload_clients_20230811_RC00"
+        "g3-revision:fuchsia.infra.coverage.upload_clients_20230908_RC00"
       ]
     },
     {
@@ -15,7 +15,7 @@
         "linux-amd64"
       ],
       "tags": [
-        "g3-revision:fuchsia.infra.coverage.upload_clients_20230811_RC00"
+        "g3-revision:fuchsia.infra.coverage.upload_clients_20230908_RC00"
       ]
     }
   ]
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/go.json b/pw_env_setup/py/pw_env_setup/cipd_setup/go.json
index b9a775a..7891a06 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/go.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/go.json
@@ -10,7 +10,7 @@
         "windows-amd64"
       ],
       "tags": [
-        "version:2@1.21.0.chromium1"
+        "version:2@1.21.1.chromium1"
       ]
     },
     {
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/host_tools.json b/pw_env_setup/py/pw_env_setup/cipd_setup/host_tools.json
index 077ab12..aff7380 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/host_tools.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/host_tools.json
@@ -8,7 +8,7 @@
         "windows-amd64"
       ],
       "tags": [
-        "git_revision:171a9a7e624b0a48e061c76d4629df2cafedeecc"
+        "git_revision:2f4f1ea0dc56d36f36687a1faa433b17774be647"
       ],
       "version_file": ".versions/host_tools.cipd_version"
     }
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/pigweed.json b/pw_env_setup/py/pw_env_setup/cipd_setup/pigweed.json
index 1e75248..ec9536a 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/pigweed.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/pigweed.json
@@ -70,7 +70,7 @@
         "windows-amd64"
       ],
       "tags": [
-        "git_revision:ebd0b8a0472b865b7eb6e1a32af97ae31d829033"
+        "git_revision:8475d0a2b853f6184948b428ec679edf84ed2688"
       ],
       "version_file": ".versions/clang.cipd_version"
     },
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/python310.json b/pw_env_setup/py/pw_env_setup/cipd_setup/python310.json
index e46eb39..cb9b353 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/python310.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/python310.json
@@ -20,10 +20,10 @@
         "mac-amd64",
         "windows-amd64"
       ],
+      "subdir": "pip_cache",
       "tags": [
-        "git_revision:797baaed66ab642510ca07e9478754a11000a5f5"
-      ],
-      "subdir": "pip_cache"
+        "git_revision:b7a3673c074997f0011732393d10868213effa48"
+      ]
     }
   ]
 }
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/python311.json b/pw_env_setup/py/pw_env_setup/cipd_setup/python311.json
index 51ae612..ea76636 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/python311.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/python311.json
@@ -21,10 +21,10 @@
         "mac-amd64",
         "windows-amd64"
       ],
+      "subdir": "pip_cache",
       "tags": [
-        "git_revision:797baaed66ab642510ca07e9478754a11000a5f5"
-      ],
-      "subdir": "pip_cache"
+        "git_revision:b7a3673c074997f0011732393d10868213effa48"
+      ]
     }
   ]
 }
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/python38.json b/pw_env_setup/py/pw_env_setup/cipd_setup/python38.json
index 7b31185..0d0f34d 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/python38.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/python38.json
@@ -20,10 +20,10 @@
         "mac-amd64",
         "windows-amd64"
       ],
+      "subdir": "pip_cache",
       "tags": [
-        "git_revision:797baaed66ab642510ca07e9478754a11000a5f5"
-      ],
-      "subdir": "pip_cache"
+        "git_revision:b7a3673c074997f0011732393d10868213effa48"
+      ]
     }
   ]
 }
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/python39.json b/pw_env_setup/py/pw_env_setup/cipd_setup/python39.json
index 2f78feb..a116989 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/python39.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/python39.json
@@ -20,10 +20,10 @@
         "mac-amd64",
         "windows-amd64"
       ],
+      "subdir": "pip_cache",
       "tags": [
-        "git_revision:797baaed66ab642510ca07e9478754a11000a5f5"
-      ],
-      "subdir": "pip_cache"
+        "git_revision:b7a3673c074997f0011732393d10868213effa48"
+      ]
     }
   ]
 }
diff --git a/pw_env_setup/py/pw_env_setup/entry_points/arm_gdb.py b/pw_env_setup/py/pw_env_setup/entry_points/arm_gdb.py
index cae9582..7bcf076 100644
--- a/pw_env_setup/py/pw_env_setup/entry_points/arm_gdb.py
+++ b/pw_env_setup/py/pw_env_setup/entry_points/arm_gdb.py
@@ -21,16 +21,23 @@
 import subprocess
 
 
-def main() -> None:
+def main() -> int:
+    """arm-gdb wrapper that sets up the Python environment for gdb"""
+
     # Find 'arm-none-eabi-gdb' as long as it isn't in the current Python
     # virtualenv entry point. In other words: not this script.
+    exclude_paths = sys.path
+    venv = os.environ.get('VIRTUAL_ENV')
+    if venv:
+        venv_path = Path(venv).resolve()
+        exclude_paths.append(os.path.join(venv_path, 'Scripts'))
     arm_gdb_binary = shutil.which(
         'arm-none-eabi-gdb',
         path=os.pathsep.join(
             [
                 path_entry
                 for path_entry in os.environ.get('PATH', '').split(os.pathsep)
-                if path_entry not in sys.path
+                if str(Path(path_entry).resolve()) not in exclude_paths
             ]
         ),
     )
@@ -49,9 +56,11 @@
         env['PYTHONPATH'] = str(python_path)
 
     # Ignore Ctrl-C to allow gdb to handle normally
-    signal.signal(signal.SIGINT, lambda sig, frame: None)
-    subprocess.run([str(arm_gdb_path)] + sys.argv[1:], env=env, check=False)
+    signal.signal(signal.SIGINT, signal.SIG_IGN)
+    return subprocess.run(
+        [str(arm_gdb_path)] + sys.argv[1:], env=env, check=False
+    ).returncode
 
 
 if __name__ == '__main__':
-    main()
+    sys.exit(main())
diff --git a/pw_env_setup/py/pw_env_setup/env_setup.py b/pw_env_setup/py/pw_env_setup/env_setup.py
index 3351341..bbdace5 100755
--- a/pw_env_setup/py/pw_env_setup/env_setup.py
+++ b/pw_env_setup/py/pw_env_setup/env_setup.py
@@ -473,6 +473,7 @@
 
         with open(gni_file, 'w') as outs:
             self._env.gni(outs, self._project_root)
+        shutil.copy(gni_file, os.path.join(self._install_dir, 'logs'))
 
     def _log(self, *args, **kwargs):
         # Not using logging module because it's awkward to flush a log handler.
diff --git a/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint.list b/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint.list
index 6590908..8ee0141 100644
--- a/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint.list
+++ b/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint.list
@@ -1,22 +1,21 @@
 -c ./python_base_requirements.txt
 
-alabaster==0.7.12
+alabaster==0.7.13
 appdirs==1.4.4
-astroid==2.14.2
-Babel==2.9.1
+astroid==2.15.6
+Babel==2.12.1
 backcall==0.2.0
 black==23.1.0
-breathe==4.34.0
 cachetools==5.0.0
 certifi==2021.10.8
 cffi==1.15.1
-charset-normalizer==2.0.10
+charset-normalizer==3.2.0
 coloredlogs==15.0.1
 coverage==7.2.7
 cryptography==41.0.2
 decorator==5.1.1
 dill==0.3.6
-docutils==0.18
+docutils==0.20.1
 google-api-core==2.7.1
 googleapis-common-protos==1.56.2
 google-auth==2.6.3
@@ -26,20 +25,20 @@
 google-resumable-media==2.3.2
 graphlib-backport==1.0.3; python_version < "3.9"
 humanfriendly==10.0
-idna==3.3
-imagesize==1.3.0
+idna==3.4
+imagesize==1.4.1
 importlib-metadata==6.8.0
 ipython==8.12.2
 isort==5.10.1
 jedi==0.18.1
-Jinja2==3.0.3
+Jinja2==3.1.2
 json5==0.9.11
 kconfiglib==14.1.0
 lazy-object-proxy==1.9.0
 MarkupSafe==2.1.3
 matplotlib-inline==0.1.3
 mccabe==0.6.1
-mypy==1.0.1
+mypy==1.5.0
 mypy-extensions==1.0.0
 mypy-protobuf==3.3.0
 parameterized==0.8.1
@@ -57,49 +56,47 @@
 pyasn1-modules==0.2.8
 pycparser==2.21
 pyelftools==0.27
-Pygments==2.14.0
-pylint==2.16.2
+Pygments==2.16.1
+pylint==2.17.5
 pyperclip==1.8.2
 pyserial==3.5
-pytz==2021.3
+pytz==2023.3
 pyusb==1.2.1
 PyYAML==6.0.1
-requests==2.27.1
+requests==2.31.0
 rsa==4.8
 setuptools==68.0.0
 six==1.16.0
 snowballstemmer==2.2.0
-sphinx==5.3.0
+sphinx==7.1.2
 sphinx-argparse==0.4.0
-sphinxcontrib-applehelp==1.0.2
+sphinxcontrib-applehelp==1.0.4
 sphinxcontrib-devhelp==1.0.2
-sphinxcontrib-htmlhelp==2.0.0
+sphinxcontrib-htmlhelp==2.0.1
 sphinxcontrib-jsmath==1.0.1
 sphinxcontrib-mermaid==0.9.2
 sphinxcontrib-qthelp==1.0.3
 sphinxcontrib-serializinghtml==1.1.5
 sphinx-copybutton==0.5.1
-sphinx-design==0.3.0
-sphinx-rtd-theme==1.2.0
-sphinx-tabs==3.4.1
+sphinx-design==0.5.0
 toml==0.10.2
 tomlkit==0.11.6
 traitlets==5.1.1
-types-docutils==0.18.0
+types-docutils==0.20.0.3
 types-futures==3.3.2
 types-protobuf==3.20.4.6
-types-Pygments==2.9.13
+types-Pygments==2.16.0.0
 types-pyserial==3.5.0.7
 types-PyYAML==6.0.12.11
-types-requests==2.28.11.15
+types-requests==2.31.0.2
 types-setuptools==67.8.0.0
 types-six==1.16.9
 types-urllib3==1.26.25.14
 typing-extensions==4.4.0
-urllib3==1.26.14
+urllib3==2.0.4
 watchdog==2.3.1
 wcwidth==0.2.6
 websockets==10.4
 wrapt==1.15.0
 yapf==0.31.0
-zipp==3.15.0
+zipp==3.16.2
diff --git a/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint_hashes_darwin.list b/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint_hashes_darwin.list
index 7eb27ea..bdf8e07 100644
--- a/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint_hashes_darwin.list
+++ b/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint_hashes_darwin.list
@@ -4,9 +4,9 @@
 #
 #    pip-compile --allow-unsafe --generate-hashes --output-file=/constraint_hashes_darwin.list --strip-extras /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 #
-alabaster==0.7.12 \
-    --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \
-    --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02
+alabaster==0.7.13 \
+    --hash=sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3 \
+    --hash=sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -22,9 +22,9 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   ipython
-astroid==2.14.2 \
-    --hash=sha256:0e0e3709d64fbffd3037e4ff403580550f14471fd3eaae9fa11cc9a5c7901153 \
-    --hash=sha256:a3cf9f02c53dd259144a7e8f3ccd75d67c9a8c716ef183e0c1f291bc5d7bb3cf
+astroid==2.15.6 \
+    --hash=sha256:389656ca57b6108f939cf5d2f9a2a825a3be50ba9d589670f393236e0a03b91c \
+    --hash=sha256:903f024859b7c7687d7a7f3a3f73b17301f8e42dfd9cc9df9d4418172d3e2dbd
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   pylint
@@ -34,9 +34,9 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   stack-data
-babel==2.9.1 \
-    --hash=sha256:ab49e12b91d937cd11f0b67cb259a57ab4ad2b59ac7a3b41d6c06c0ac5b0def9 \
-    --hash=sha256:bc0c176f9f6a994582230df350aa6e05ba2ebe4b3ac317eab29d9be5d2768da0
+babel==2.12.1 \
+    --hash=sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610 \
+    --hash=sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -159,9 +159,82 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   cryptography
-charset-normalizer==2.0.10 \
-    --hash=sha256:876d180e9d7432c5d1dfd4c5d26b72f099d503e8fcc0feb7532c9289be60fcbd \
-    --hash=sha256:cb957888737fc0bbcd78e3df769addb41fd1ff8cf950dc9e7ad7793f1bf44455
+charset-normalizer==3.2.0 \
+    --hash=sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96 \
+    --hash=sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c \
+    --hash=sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710 \
+    --hash=sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706 \
+    --hash=sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020 \
+    --hash=sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252 \
+    --hash=sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad \
+    --hash=sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329 \
+    --hash=sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a \
+    --hash=sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f \
+    --hash=sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6 \
+    --hash=sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4 \
+    --hash=sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a \
+    --hash=sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46 \
+    --hash=sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2 \
+    --hash=sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23 \
+    --hash=sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace \
+    --hash=sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd \
+    --hash=sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982 \
+    --hash=sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10 \
+    --hash=sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2 \
+    --hash=sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea \
+    --hash=sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09 \
+    --hash=sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5 \
+    --hash=sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149 \
+    --hash=sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489 \
+    --hash=sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9 \
+    --hash=sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80 \
+    --hash=sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592 \
+    --hash=sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3 \
+    --hash=sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6 \
+    --hash=sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed \
+    --hash=sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c \
+    --hash=sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200 \
+    --hash=sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a \
+    --hash=sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e \
+    --hash=sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d \
+    --hash=sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6 \
+    --hash=sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623 \
+    --hash=sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669 \
+    --hash=sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3 \
+    --hash=sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa \
+    --hash=sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9 \
+    --hash=sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2 \
+    --hash=sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f \
+    --hash=sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1 \
+    --hash=sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4 \
+    --hash=sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a \
+    --hash=sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8 \
+    --hash=sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3 \
+    --hash=sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029 \
+    --hash=sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f \
+    --hash=sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959 \
+    --hash=sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22 \
+    --hash=sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7 \
+    --hash=sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952 \
+    --hash=sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346 \
+    --hash=sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e \
+    --hash=sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d \
+    --hash=sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299 \
+    --hash=sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd \
+    --hash=sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a \
+    --hash=sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3 \
+    --hash=sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037 \
+    --hash=sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94 \
+    --hash=sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c \
+    --hash=sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858 \
+    --hash=sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a \
+    --hash=sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449 \
+    --hash=sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c \
+    --hash=sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918 \
+    --hash=sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1 \
+    --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \
+    --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \
+    --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   requests
@@ -275,13 +348,12 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   pylint
-docutils==0.18 \
-    --hash=sha256:a31688b2ea858517fa54293e5d5df06fbb875fb1f7e4c64529271b77781ca8fc \
-    --hash=sha256:c1d5dab2b11d16397406a282e53953fe495a46d69ae329f55aa98a5c4e3c5fbb
+docutils==0.20.1 \
+    --hash=sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6 \
+    --hash=sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
-    #   sphinx-rtd-theme
 executing==1.2.0 \
     --hash=sha256:0314a69e37426e3608aada02473b4161d4caf5a4b244d1d0c48072b8fee7bacc \
     --hash=sha256:19da64c18d2d851112f09c287f8d3dbbdf725ab0e569077efb6cdcbd3497c107
@@ -407,15 +479,15 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   coloredlogs
-idna==3.3 \
-    --hash=sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff \
-    --hash=sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d
+idna==3.4 \
+    --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
+    --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   requests
-imagesize==1.3.0 \
-    --hash=sha256:1db2f82529e53c3e929e8926a1fa9235aa82d0bd0c580359c67ec31b2fddaa8c \
-    --hash=sha256:cd1750d452385ca327479d45b64d9c7729ecf0b3969a58148298c77092261f9d
+imagesize==1.4.1 \
+    --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \
+    --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -442,9 +514,9 @@
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   ipython
     #   ptpython
-jinja2==3.0.3 \
-    --hash=sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8 \
-    --hash=sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7
+jinja2==3.1.2 \
+    --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \
+    --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -558,33 +630,29 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   pylint
-mypy==1.0.1 \
-    --hash=sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6 \
-    --hash=sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3 \
-    --hash=sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c \
-    --hash=sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262 \
-    --hash=sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e \
-    --hash=sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0 \
-    --hash=sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d \
-    --hash=sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65 \
-    --hash=sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8 \
-    --hash=sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76 \
-    --hash=sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4 \
-    --hash=sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407 \
-    --hash=sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4 \
-    --hash=sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b \
-    --hash=sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a \
-    --hash=sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf \
-    --hash=sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5 \
-    --hash=sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf \
-    --hash=sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd \
-    --hash=sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8 \
-    --hash=sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994 \
-    --hash=sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff \
-    --hash=sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88 \
-    --hash=sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919 \
-    --hash=sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6 \
-    --hash=sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4
+mypy==1.5.0 \
+    --hash=sha256:1fe816e26e676c1311b9e04fd576543b873576d39439f7c24c8e5c7728391ecf \
+    --hash=sha256:2c9d570f53908cbea326ad8f96028a673b814d9dca7515bf71d95fa662c3eb6f \
+    --hash=sha256:35b13335c6c46a386577a51f3d38b2b5d14aa619e9633bb756bd77205e4bd09f \
+    --hash=sha256:372fd97293ed0076d52695849f59acbbb8461c4ab447858cdaeaf734a396d823 \
+    --hash=sha256:42170e68adb1603ccdc55a30068f72bcfcde2ce650188e4c1b2a93018b826735 \
+    --hash=sha256:69b32d0dedd211b80f1b7435644e1ef83033a2af2ac65adcdc87c38db68a86be \
+    --hash=sha256:725b57a19b7408ef66a0fd9db59b5d3e528922250fb56e50bded27fea9ff28f0 \
+    --hash=sha256:769ddb6bfe55c2bd9c7d6d7020885a5ea14289619db7ee650e06b1ef0852c6f4 \
+    --hash=sha256:79c520aa24f21852206b5ff2cf746dc13020113aa73fa55af504635a96e62718 \
+    --hash=sha256:84cf9f7d8a8a22bb6a36444480f4cbf089c917a4179fbf7eea003ea931944a7f \
+    --hash=sha256:9166186c498170e1ff478a7f540846b2169243feb95bc228d39a67a1a450cdc6 \
+    --hash=sha256:a2500ad063413bc873ae102cf655bf49889e0763b260a3a7cf544a0cbbf7e70a \
+    --hash=sha256:a551ed0fc02455fe2c1fb0145160df8336b90ab80224739627b15ebe2b45e9dc \
+    --hash=sha256:ad3109bec37cc33654de8db30fe8ff3a1bb57ea65144167d68185e6dced9868d \
+    --hash=sha256:b4ea3a0241cb005b0ccdbd318fb99619b21ae51bcf1660b95fc22e0e7d3ba4a1 \
+    --hash=sha256:c36011320e452eb30bec38b9fd3ba20569dc9545d7d4540d967f3ea1fab9c374 \
+    --hash=sha256:c8a7444d6fcac7e2585b10abb91ad900a576da7af8f5cffffbff6065d9115813 \
+    --hash=sha256:cbf18f8db7e5f060d61c91e334d3b96d6bb624ddc9ee8a1cde407b737acbca2c \
+    --hash=sha256:d145b81a8214687cfc1f85c03663a5bbe736777410e5580e54d526e7e904f564 \
+    --hash=sha256:eec5c927aa4b3e8b4781840f1550079969926d0a22ce38075f6cfcf4b13e3eb4 \
+    --hash=sha256:f3460f34b3839b9bc84ee3ed65076eb827cd99ed13ed08d723f9083cada4a212 \
+    --hash=sha256:f3940cf5845b2512b3ab95463198b0cdf87975dfd17fdcc6ce9709a9abe09e69
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 mypy-extensions==1.0.0 \
     --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \
@@ -735,17 +803,17 @@
     --hash=sha256:5609aa6da1123fccfae2e8431a67b4146aa7fad5b3889f808df12b110f230937 \
     --hash=sha256:cde854e662774c5457d688ca41615f6594187ba7067af101232df889a6b7a66b
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-pygments==2.14.0 \
-    --hash=sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297 \
-    --hash=sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717
+pygments==2.16.1 \
+    --hash=sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692 \
+    --hash=sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   ipython
     #   ptpython
     #   sphinx
-pylint==2.16.2 \
-    --hash=sha256:13b2c805a404a9bf57d002cd5f054ca4d40b0b87542bdaba5e05321ae8262c84 \
-    --hash=sha256:ff22dde9c2128cd257c145cfd51adeff0be7df4d80d669055f24a962b351bbe4
+pylint==2.17.5 \
+    --hash=sha256:73995fb8216d3bed149c8d51bba25b2c52a8251a2c8ac846ec668ce38fab5413 \
+    --hash=sha256:f7b601cbc06fef7e62a754e2b41294c2aa31f1cb659624b9a85bcba29eaf8252
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 pyperclip==1.8.2 \
     --hash=sha256:105254a8b04934f0bc84e9c24eb360a591aaf6535c9def5f29d92af107a9bf57
@@ -760,9 +828,9 @@
     --hash=sha256:3c77e014170dfffbd816e6ffc205e9842efb10be9f58ec16d3e8675b4925cddb \
     --hash=sha256:c4451db6ba391ca6ca299fb3ec7bae67a5c55dde170964c7a14ceefec02f2cf0
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-pytz==2021.3 \
-    --hash=sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c \
-    --hash=sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326
+pytz==2023.3 \
+    --hash=sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588 \
+    --hash=sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   babel
@@ -808,9 +876,9 @@
     --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \
     --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-requests==2.27.1 \
-    --hash=sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61 \
-    --hash=sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d
+requests==2.31.0 \
+    --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \
+    --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   google-api-core
@@ -835,30 +903,24 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
-sphinx==5.3.0 \
-    --hash=sha256:060ca5c9f7ba57a08a1219e547b269fadf125ae25b06b9fa7f66768efb652d6d \
-    --hash=sha256:51026de0a9ff9fc13c05d74913ad66047e104f56a129ff73e174eb5c3ee794b5
+sphinx==7.1.2 \
+    --hash=sha256:780f4d32f1d7d1126576e0e5ecc19dc32ab76cd24e950228dcf7b1f6d3d9e22f \
+    --hash=sha256:d170a81825b2fcacb6dfd5a0d7f578a053e45d3f2b153fecc948c37344eb4cbe
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx-argparse
     #   sphinx-design
-    #   sphinx-rtd-theme
-    #   sphinxcontrib-jquery
 sphinx-argparse==0.4.0 \
     --hash=sha256:73bee01f7276fae2bf621ccfe4d167af7306e7288e3482005405d9f826f9b037 \
     --hash=sha256:e0f34184eb56f12face774fbc87b880abdb9017a0998d1ec559b267e9697e449
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-sphinx-design==0.3.0 \
-    --hash=sha256:7183fa1fae55b37ef01bda5125a21ee841f5bbcbf59a35382be598180c4cefba \
-    --hash=sha256:823c1dd74f31efb3285ec2f1254caefed29d762a40cd676f58413a1e4ed5cc96
+sphinx-design==0.5.0 \
+    --hash=sha256:1af1267b4cea2eedd6724614f19dcc88fe2e15aff65d06b2f6252cee9c4f4c1e \
+    --hash=sha256:e8e513acea6f92d15c6de3b34e954458f245b8e761b45b63950f65373352ab00
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-sphinx-rtd-theme==1.2.0 \
-    --hash=sha256:a0d8bd1a2ed52e0b338cbe19c4b2eef3c5e7a048769753dac6a9f059c7b641b8 \
-    --hash=sha256:f823f7e71890abe0ac6aaa6013361ea2696fc8d3e1fa798f463e82bdb77eeff2
-    # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-sphinxcontrib-applehelp==1.0.2 \
-    --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \
-    --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58
+sphinxcontrib-applehelp==1.0.4 \
+    --hash=sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228 \
+    --hash=sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -868,18 +930,12 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
-sphinxcontrib-htmlhelp==2.0.0 \
-    --hash=sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07 \
-    --hash=sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2
+sphinxcontrib-htmlhelp==2.0.1 \
+    --hash=sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff \
+    --hash=sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
-sphinxcontrib-jquery==4.1 \
-    --hash=sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a \
-    --hash=sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae
-    # via
-    #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-    #   sphinx-rtd-theme
 sphinxcontrib-jsmath==1.0.1 \
     --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \
     --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8
@@ -936,9 +992,9 @@
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   ipython
     #   matplotlib-inline
-types-docutils==0.18.0 \
-    --hash=sha256:14f781eb28d89a1cd61f1c41bd0776ad3bb4e2333d317c37d2c67f2eaf5891fe \
-    --hash=sha256:f0305109169326edffd98c128f542f773be06a1502a17d96359c53e31082db19
+types-docutils==0.20.0.3 \
+    --hash=sha256:4928e790f42b99d5833990f99c8dd9fa9f16825f6ed30380ca981846d36870cd \
+    --hash=sha256:a930150d8e01a9170f9bca489f46808ddebccdd8bc1e47c07968a77e49fb9321
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   types-pygments
@@ -948,9 +1004,9 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   mypy-protobuf
-types-pygments==2.9.13 \
-    --hash=sha256:589b87a6295c2b8b40566e46ff5ab9b5097735b051158f36648f64acc2e98dbe \
-    --hash=sha256:8b4ff9be1208b1dce639e406fc4d4e6a4682b6b253a021f2a7ffff9dbce16944
+types-pygments==2.16.0.0 \
+    --hash=sha256:4624a547d5ba73c971fac5d6fd327141e85e65f6123448bee76f0c8557652a71 \
+    --hash=sha256:aa93e4664e2d6cfea7570cde156e3966bf939f9c7d736cd179c4c8e94f7600b2
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 types-pyserial==3.5.0.7 \
     --hash=sha256:22b665a336539b85108e2f5d61e6cde1b59818eae78324c17bfe64edc1a2bd66 \
@@ -960,9 +1016,9 @@
     --hash=sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b \
     --hash=sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-types-requests==2.28.11.15 \
-    --hash=sha256:a05e4c7bc967518fba5789c341ea8b0c942776ee474c7873129a61161978e586 \
-    --hash=sha256:fc8eaa09cc014699c6b63c60c2e3add0c8b09a410c818b5ac6e65f92a26dde09
+types-requests==2.31.0.2 \
+    --hash=sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a \
+    --hash=sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 types-setuptools==67.8.0.0 \
     --hash=sha256:6df73340d96b238a4188b7b7668814b37e8018168aef1eef94a3b1872e3f60ff \
@@ -990,9 +1046,9 @@
     #   ipython
     #   mypy
     #   pylint
-urllib3==1.26.14 \
-    --hash=sha256:076907bf8fd355cde77728471316625a4d2f7e713c125f51953bb5b3eecf4f72 \
-    --hash=sha256:75edcdc2f7d85b137124a6c3c9fc3933cdeaa12ecb9a6a959f22797a0feca7e1
+urllib3==2.0.4 \
+    --hash=sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11 \
+    --hash=sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   requests
@@ -1192,13 +1248,12 @@
     --hash=sha256:408fb9a2b254c302f49db83c59f9aa0b4b0fd0ec25be3a5c51181327922ff63d \
     --hash=sha256:e3a234ba8455fe201eaa649cdac872d590089a18b661e39bbac7020978dd9c2e
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-zipp==3.15.0 \
-    --hash=sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b \
-    --hash=sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556
+zipp==3.16.2 \
+    --hash=sha256:679e51dd4403591b2d6838a48de3d283f3d188412a9782faadf845f298736ba0 \
+    --hash=sha256:ebc15946aa78bd63458992fc81ec3b6f7b1e92d51c35e6de1c3804e73b799147
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   importlib-metadata
-
 # The following packages are considered to be unsafe in a requirements file:
 pip==23.2 \
     --hash=sha256:78e5353a9dda374b462f2054f83a7b63f3f065c98236a68361845c1b0ee7e35f \
diff --git a/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint_hashes_linux.list b/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint_hashes_linux.list
index b7b3898..07b61b9 100644
--- a/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint_hashes_linux.list
+++ b/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint_hashes_linux.list
@@ -4,9 +4,9 @@
 #
 #    pip-compile --allow-unsafe --generate-hashes --output-file=/constraint_hashes_linux.list --strip-extras /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 #
-alabaster==0.7.12 \
-    --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \
-    --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02
+alabaster==0.7.13 \
+    --hash=sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3 \
+    --hash=sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -16,9 +16,9 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   ptpython
-astroid==2.14.2 \
-    --hash=sha256:0e0e3709d64fbffd3037e4ff403580550f14471fd3eaae9fa11cc9a5c7901153 \
-    --hash=sha256:a3cf9f02c53dd259144a7e8f3ccd75d67c9a8c716ef183e0c1f291bc5d7bb3cf
+astroid==2.15.6 \
+    --hash=sha256:389656ca57b6108f939cf5d2f9a2a825a3be50ba9d589670f393236e0a03b91c \
+    --hash=sha256:903f024859b7c7687d7a7f3a3f73b17301f8e42dfd9cc9df9d4418172d3e2dbd
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   pylint
@@ -28,9 +28,9 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   stack-data
-babel==2.9.1 \
-    --hash=sha256:ab49e12b91d937cd11f0b67cb259a57ab4ad2b59ac7a3b41d6c06c0ac5b0def9 \
-    --hash=sha256:bc0c176f9f6a994582230df350aa6e05ba2ebe4b3ac317eab29d9be5d2768da0
+babel==2.12.1 \
+    --hash=sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610 \
+    --hash=sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -153,9 +153,82 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   cryptography
-charset-normalizer==2.0.10 \
-    --hash=sha256:876d180e9d7432c5d1dfd4c5d26b72f099d503e8fcc0feb7532c9289be60fcbd \
-    --hash=sha256:cb957888737fc0bbcd78e3df769addb41fd1ff8cf950dc9e7ad7793f1bf44455
+charset-normalizer==3.2.0 \
+    --hash=sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96 \
+    --hash=sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c \
+    --hash=sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710 \
+    --hash=sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706 \
+    --hash=sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020 \
+    --hash=sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252 \
+    --hash=sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad \
+    --hash=sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329 \
+    --hash=sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a \
+    --hash=sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f \
+    --hash=sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6 \
+    --hash=sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4 \
+    --hash=sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a \
+    --hash=sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46 \
+    --hash=sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2 \
+    --hash=sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23 \
+    --hash=sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace \
+    --hash=sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd \
+    --hash=sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982 \
+    --hash=sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10 \
+    --hash=sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2 \
+    --hash=sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea \
+    --hash=sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09 \
+    --hash=sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5 \
+    --hash=sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149 \
+    --hash=sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489 \
+    --hash=sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9 \
+    --hash=sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80 \
+    --hash=sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592 \
+    --hash=sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3 \
+    --hash=sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6 \
+    --hash=sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed \
+    --hash=sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c \
+    --hash=sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200 \
+    --hash=sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a \
+    --hash=sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e \
+    --hash=sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d \
+    --hash=sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6 \
+    --hash=sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623 \
+    --hash=sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669 \
+    --hash=sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3 \
+    --hash=sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa \
+    --hash=sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9 \
+    --hash=sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2 \
+    --hash=sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f \
+    --hash=sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1 \
+    --hash=sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4 \
+    --hash=sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a \
+    --hash=sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8 \
+    --hash=sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3 \
+    --hash=sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029 \
+    --hash=sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f \
+    --hash=sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959 \
+    --hash=sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22 \
+    --hash=sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7 \
+    --hash=sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952 \
+    --hash=sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346 \
+    --hash=sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e \
+    --hash=sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d \
+    --hash=sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299 \
+    --hash=sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd \
+    --hash=sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a \
+    --hash=sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3 \
+    --hash=sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037 \
+    --hash=sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94 \
+    --hash=sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c \
+    --hash=sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858 \
+    --hash=sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a \
+    --hash=sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449 \
+    --hash=sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c \
+    --hash=sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918 \
+    --hash=sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1 \
+    --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \
+    --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \
+    --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   requests
@@ -269,13 +342,12 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   pylint
-docutils==0.18 \
-    --hash=sha256:a31688b2ea858517fa54293e5d5df06fbb875fb1f7e4c64529271b77781ca8fc \
-    --hash=sha256:c1d5dab2b11d16397406a282e53953fe495a46d69ae329f55aa98a5c4e3c5fbb
+docutils==0.20.1 \
+    --hash=sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6 \
+    --hash=sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
-    #   sphinx-rtd-theme
 executing==1.2.0 \
     --hash=sha256:0314a69e37426e3608aada02473b4161d4caf5a4b244d1d0c48072b8fee7bacc \
     --hash=sha256:19da64c18d2d851112f09c287f8d3dbbdf725ab0e569077efb6cdcbd3497c107
@@ -401,15 +473,15 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   coloredlogs
-idna==3.3 \
-    --hash=sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff \
-    --hash=sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d
+idna==3.4 \
+    --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
+    --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   requests
-imagesize==1.3.0 \
-    --hash=sha256:1db2f82529e53c3e929e8926a1fa9235aa82d0bd0c580359c67ec31b2fddaa8c \
-    --hash=sha256:cd1750d452385ca327479d45b64d9c7729ecf0b3969a58148298c77092261f9d
+imagesize==1.4.1 \
+    --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \
+    --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -436,9 +508,9 @@
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   ipython
     #   ptpython
-jinja2==3.0.3 \
-    --hash=sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8 \
-    --hash=sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7
+jinja2==3.1.2 \
+    --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \
+    --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -552,33 +624,29 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   pylint
-mypy==1.0.1 \
-    --hash=sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6 \
-    --hash=sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3 \
-    --hash=sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c \
-    --hash=sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262 \
-    --hash=sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e \
-    --hash=sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0 \
-    --hash=sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d \
-    --hash=sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65 \
-    --hash=sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8 \
-    --hash=sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76 \
-    --hash=sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4 \
-    --hash=sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407 \
-    --hash=sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4 \
-    --hash=sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b \
-    --hash=sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a \
-    --hash=sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf \
-    --hash=sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5 \
-    --hash=sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf \
-    --hash=sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd \
-    --hash=sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8 \
-    --hash=sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994 \
-    --hash=sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff \
-    --hash=sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88 \
-    --hash=sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919 \
-    --hash=sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6 \
-    --hash=sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4
+mypy==1.5.0 \
+    --hash=sha256:1fe816e26e676c1311b9e04fd576543b873576d39439f7c24c8e5c7728391ecf \
+    --hash=sha256:2c9d570f53908cbea326ad8f96028a673b814d9dca7515bf71d95fa662c3eb6f \
+    --hash=sha256:35b13335c6c46a386577a51f3d38b2b5d14aa619e9633bb756bd77205e4bd09f \
+    --hash=sha256:372fd97293ed0076d52695849f59acbbb8461c4ab447858cdaeaf734a396d823 \
+    --hash=sha256:42170e68adb1603ccdc55a30068f72bcfcde2ce650188e4c1b2a93018b826735 \
+    --hash=sha256:69b32d0dedd211b80f1b7435644e1ef83033a2af2ac65adcdc87c38db68a86be \
+    --hash=sha256:725b57a19b7408ef66a0fd9db59b5d3e528922250fb56e50bded27fea9ff28f0 \
+    --hash=sha256:769ddb6bfe55c2bd9c7d6d7020885a5ea14289619db7ee650e06b1ef0852c6f4 \
+    --hash=sha256:79c520aa24f21852206b5ff2cf746dc13020113aa73fa55af504635a96e62718 \
+    --hash=sha256:84cf9f7d8a8a22bb6a36444480f4cbf089c917a4179fbf7eea003ea931944a7f \
+    --hash=sha256:9166186c498170e1ff478a7f540846b2169243feb95bc228d39a67a1a450cdc6 \
+    --hash=sha256:a2500ad063413bc873ae102cf655bf49889e0763b260a3a7cf544a0cbbf7e70a \
+    --hash=sha256:a551ed0fc02455fe2c1fb0145160df8336b90ab80224739627b15ebe2b45e9dc \
+    --hash=sha256:ad3109bec37cc33654de8db30fe8ff3a1bb57ea65144167d68185e6dced9868d \
+    --hash=sha256:b4ea3a0241cb005b0ccdbd318fb99619b21ae51bcf1660b95fc22e0e7d3ba4a1 \
+    --hash=sha256:c36011320e452eb30bec38b9fd3ba20569dc9545d7d4540d967f3ea1fab9c374 \
+    --hash=sha256:c8a7444d6fcac7e2585b10abb91ad900a576da7af8f5cffffbff6065d9115813 \
+    --hash=sha256:cbf18f8db7e5f060d61c91e334d3b96d6bb624ddc9ee8a1cde407b737acbca2c \
+    --hash=sha256:d145b81a8214687cfc1f85c03663a5bbe736777410e5580e54d526e7e904f564 \
+    --hash=sha256:eec5c927aa4b3e8b4781840f1550079969926d0a22ce38075f6cfcf4b13e3eb4 \
+    --hash=sha256:f3460f34b3839b9bc84ee3ed65076eb827cd99ed13ed08d723f9083cada4a212 \
+    --hash=sha256:f3940cf5845b2512b3ab95463198b0cdf87975dfd17fdcc6ce9709a9abe09e69
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 mypy-extensions==1.0.0 \
     --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \
@@ -729,17 +797,17 @@
     --hash=sha256:5609aa6da1123fccfae2e8431a67b4146aa7fad5b3889f808df12b110f230937 \
     --hash=sha256:cde854e662774c5457d688ca41615f6594187ba7067af101232df889a6b7a66b
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-pygments==2.14.0 \
-    --hash=sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297 \
-    --hash=sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717
+pygments==2.16.1 \
+    --hash=sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692 \
+    --hash=sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   ipython
     #   ptpython
     #   sphinx
-pylint==2.16.2 \
-    --hash=sha256:13b2c805a404a9bf57d002cd5f054ca4d40b0b87542bdaba5e05321ae8262c84 \
-    --hash=sha256:ff22dde9c2128cd257c145cfd51adeff0be7df4d80d669055f24a962b351bbe4
+pylint==2.17.5 \
+    --hash=sha256:73995fb8216d3bed149c8d51bba25b2c52a8251a2c8ac846ec668ce38fab5413 \
+    --hash=sha256:f7b601cbc06fef7e62a754e2b41294c2aa31f1cb659624b9a85bcba29eaf8252
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 pyperclip==1.8.2 \
     --hash=sha256:105254a8b04934f0bc84e9c24eb360a591aaf6535c9def5f29d92af107a9bf57
@@ -754,9 +822,9 @@
     --hash=sha256:3c77e014170dfffbd816e6ffc205e9842efb10be9f58ec16d3e8675b4925cddb \
     --hash=sha256:c4451db6ba391ca6ca299fb3ec7bae67a5c55dde170964c7a14ceefec02f2cf0
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-pytz==2021.3 \
-    --hash=sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c \
-    --hash=sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326
+pytz==2023.3 \
+    --hash=sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588 \
+    --hash=sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   babel
@@ -802,9 +870,9 @@
     --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \
     --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-requests==2.27.1 \
-    --hash=sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61 \
-    --hash=sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d
+requests==2.31.0 \
+    --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \
+    --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   google-api-core
@@ -829,30 +897,24 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
-sphinx==5.3.0 \
-    --hash=sha256:060ca5c9f7ba57a08a1219e547b269fadf125ae25b06b9fa7f66768efb652d6d \
-    --hash=sha256:51026de0a9ff9fc13c05d74913ad66047e104f56a129ff73e174eb5c3ee794b5
+sphinx==7.1.2 \
+    --hash=sha256:780f4d32f1d7d1126576e0e5ecc19dc32ab76cd24e950228dcf7b1f6d3d9e22f \
+    --hash=sha256:d170a81825b2fcacb6dfd5a0d7f578a053e45d3f2b153fecc948c37344eb4cbe
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx-argparse
     #   sphinx-design
-    #   sphinx-rtd-theme
-    #   sphinxcontrib-jquery
 sphinx-argparse==0.4.0 \
     --hash=sha256:73bee01f7276fae2bf621ccfe4d167af7306e7288e3482005405d9f826f9b037 \
     --hash=sha256:e0f34184eb56f12face774fbc87b880abdb9017a0998d1ec559b267e9697e449
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-sphinx-design==0.3.0 \
-    --hash=sha256:7183fa1fae55b37ef01bda5125a21ee841f5bbcbf59a35382be598180c4cefba \
-    --hash=sha256:823c1dd74f31efb3285ec2f1254caefed29d762a40cd676f58413a1e4ed5cc96
+sphinx-design==0.5.0 \
+    --hash=sha256:1af1267b4cea2eedd6724614f19dcc88fe2e15aff65d06b2f6252cee9c4f4c1e \
+    --hash=sha256:e8e513acea6f92d15c6de3b34e954458f245b8e761b45b63950f65373352ab00
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-sphinx-rtd-theme==1.2.0 \
-    --hash=sha256:a0d8bd1a2ed52e0b338cbe19c4b2eef3c5e7a048769753dac6a9f059c7b641b8 \
-    --hash=sha256:f823f7e71890abe0ac6aaa6013361ea2696fc8d3e1fa798f463e82bdb77eeff2
-    # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-sphinxcontrib-applehelp==1.0.2 \
-    --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \
-    --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58
+sphinxcontrib-applehelp==1.0.4 \
+    --hash=sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228 \
+    --hash=sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -862,18 +924,12 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
-sphinxcontrib-htmlhelp==2.0.0 \
-    --hash=sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07 \
-    --hash=sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2
+sphinxcontrib-htmlhelp==2.0.1 \
+    --hash=sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff \
+    --hash=sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
-sphinxcontrib-jquery==4.1 \
-    --hash=sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a \
-    --hash=sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae
-    # via
-    #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-    #   sphinx-rtd-theme
 sphinxcontrib-jsmath==1.0.1 \
     --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \
     --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8
@@ -930,9 +986,9 @@
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   ipython
     #   matplotlib-inline
-types-docutils==0.18.0 \
-    --hash=sha256:14f781eb28d89a1cd61f1c41bd0776ad3bb4e2333d317c37d2c67f2eaf5891fe \
-    --hash=sha256:f0305109169326edffd98c128f542f773be06a1502a17d96359c53e31082db19
+types-docutils==0.20.0.3 \
+    --hash=sha256:4928e790f42b99d5833990f99c8dd9fa9f16825f6ed30380ca981846d36870cd \
+    --hash=sha256:a930150d8e01a9170f9bca489f46808ddebccdd8bc1e47c07968a77e49fb9321
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   types-pygments
@@ -942,9 +998,9 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   mypy-protobuf
-types-pygments==2.9.13 \
-    --hash=sha256:589b87a6295c2b8b40566e46ff5ab9b5097735b051158f36648f64acc2e98dbe \
-    --hash=sha256:8b4ff9be1208b1dce639e406fc4d4e6a4682b6b253a021f2a7ffff9dbce16944
+types-pygments==2.16.0.0 \
+    --hash=sha256:4624a547d5ba73c971fac5d6fd327141e85e65f6123448bee76f0c8557652a71 \
+    --hash=sha256:aa93e4664e2d6cfea7570cde156e3966bf939f9c7d736cd179c4c8e94f7600b2
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 types-pyserial==3.5.0.7 \
     --hash=sha256:22b665a336539b85108e2f5d61e6cde1b59818eae78324c17bfe64edc1a2bd66 \
@@ -954,9 +1010,9 @@
     --hash=sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b \
     --hash=sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-types-requests==2.28.11.15 \
-    --hash=sha256:a05e4c7bc967518fba5789c341ea8b0c942776ee474c7873129a61161978e586 \
-    --hash=sha256:fc8eaa09cc014699c6b63c60c2e3add0c8b09a410c818b5ac6e65f92a26dde09
+types-requests==2.31.0.2 \
+    --hash=sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a \
+    --hash=sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 types-setuptools==67.8.0.0 \
     --hash=sha256:6df73340d96b238a4188b7b7668814b37e8018168aef1eef94a3b1872e3f60ff \
@@ -984,9 +1040,9 @@
     #   ipython
     #   mypy
     #   pylint
-urllib3==1.26.14 \
-    --hash=sha256:076907bf8fd355cde77728471316625a4d2f7e713c125f51953bb5b3eecf4f72 \
-    --hash=sha256:75edcdc2f7d85b137124a6c3c9fc3933cdeaa12ecb9a6a959f22797a0feca7e1
+urllib3==2.0.4 \
+    --hash=sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11 \
+    --hash=sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   requests
@@ -1186,13 +1242,12 @@
     --hash=sha256:408fb9a2b254c302f49db83c59f9aa0b4b0fd0ec25be3a5c51181327922ff63d \
     --hash=sha256:e3a234ba8455fe201eaa649cdac872d590089a18b661e39bbac7020978dd9c2e
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-zipp==3.15.0 \
-    --hash=sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b \
-    --hash=sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556
+zipp==3.16.2 \
+    --hash=sha256:679e51dd4403591b2d6838a48de3d283f3d188412a9782faadf845f298736ba0 \
+    --hash=sha256:ebc15946aa78bd63458992fc81ec3b6f7b1e92d51c35e6de1c3804e73b799147
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   importlib-metadata
-
 # The following packages are considered to be unsafe in a requirements file:
 pip==23.2 \
     --hash=sha256:78e5353a9dda374b462f2054f83a7b63f3f065c98236a68361845c1b0ee7e35f \
diff --git a/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint_hashes_windows.list b/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint_hashes_windows.list
index 23152c5..bab9221 100644
--- a/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint_hashes_windows.list
+++ b/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint_hashes_windows.list
@@ -4,9 +4,9 @@
 #
 #    pip-compile --allow-unsafe --generate-hashes --output-file='\constraint_hashes_windows.list' --strip-extras '\python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt'
 #
-alabaster==0.7.12 \
-    --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \
-    --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02
+alabaster==0.7.13 \
+    --hash=sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3 \
+    --hash=sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   sphinx
@@ -16,9 +16,9 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   ptpython
-astroid==2.14.2 \
-    --hash=sha256:0e0e3709d64fbffd3037e4ff403580550f14471fd3eaae9fa11cc9a5c7901153 \
-    --hash=sha256:a3cf9f02c53dd259144a7e8f3ccd75d67c9a8c716ef183e0c1f291bc5d7bb3cf
+astroid==2.15.6 \
+    --hash=sha256:389656ca57b6108f939cf5d2f9a2a825a3be50ba9d589670f393236e0a03b91c \
+    --hash=sha256:903f024859b7c7687d7a7f3a3f73b17301f8e42dfd9cc9df9d4418172d3e2dbd
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   pylint
@@ -28,9 +28,9 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   stack-data
-babel==2.9.1 \
-    --hash=sha256:ab49e12b91d937cd11f0b67cb259a57ab4ad2b59ac7a3b41d6c06c0ac5b0def9 \
-    --hash=sha256:bc0c176f9f6a994582230df350aa6e05ba2ebe4b3ac317eab29d9be5d2768da0
+babel==2.12.1 \
+    --hash=sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610 \
+    --hash=sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   sphinx
@@ -153,9 +153,82 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   cryptography
-charset-normalizer==2.0.10 \
-    --hash=sha256:876d180e9d7432c5d1dfd4c5d26b72f099d503e8fcc0feb7532c9289be60fcbd \
-    --hash=sha256:cb957888737fc0bbcd78e3df769addb41fd1ff8cf950dc9e7ad7793f1bf44455
+charset-normalizer==3.2.0 \
+    --hash=sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96 \
+    --hash=sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c \
+    --hash=sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710 \
+    --hash=sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706 \
+    --hash=sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020 \
+    --hash=sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252 \
+    --hash=sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad \
+    --hash=sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329 \
+    --hash=sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a \
+    --hash=sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f \
+    --hash=sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6 \
+    --hash=sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4 \
+    --hash=sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a \
+    --hash=sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46 \
+    --hash=sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2 \
+    --hash=sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23 \
+    --hash=sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace \
+    --hash=sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd \
+    --hash=sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982 \
+    --hash=sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10 \
+    --hash=sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2 \
+    --hash=sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea \
+    --hash=sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09 \
+    --hash=sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5 \
+    --hash=sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149 \
+    --hash=sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489 \
+    --hash=sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9 \
+    --hash=sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80 \
+    --hash=sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592 \
+    --hash=sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3 \
+    --hash=sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6 \
+    --hash=sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed \
+    --hash=sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c \
+    --hash=sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200 \
+    --hash=sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a \
+    --hash=sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e \
+    --hash=sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d \
+    --hash=sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6 \
+    --hash=sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623 \
+    --hash=sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669 \
+    --hash=sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3 \
+    --hash=sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa \
+    --hash=sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9 \
+    --hash=sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2 \
+    --hash=sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f \
+    --hash=sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1 \
+    --hash=sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4 \
+    --hash=sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a \
+    --hash=sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8 \
+    --hash=sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3 \
+    --hash=sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029 \
+    --hash=sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f \
+    --hash=sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959 \
+    --hash=sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22 \
+    --hash=sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7 \
+    --hash=sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952 \
+    --hash=sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346 \
+    --hash=sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e \
+    --hash=sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d \
+    --hash=sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299 \
+    --hash=sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd \
+    --hash=sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a \
+    --hash=sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3 \
+    --hash=sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037 \
+    --hash=sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94 \
+    --hash=sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c \
+    --hash=sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858 \
+    --hash=sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a \
+    --hash=sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449 \
+    --hash=sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c \
+    --hash=sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918 \
+    --hash=sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1 \
+    --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \
+    --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \
+    --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   requests
@@ -279,13 +352,12 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   pylint
-docutils==0.18 \
-    --hash=sha256:a31688b2ea858517fa54293e5d5df06fbb875fb1f7e4c64529271b77781ca8fc \
-    --hash=sha256:c1d5dab2b11d16397406a282e53953fe495a46d69ae329f55aa98a5c4e3c5fbb
+docutils==0.20.1 \
+    --hash=sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6 \
+    --hash=sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   sphinx
-    #   sphinx-rtd-theme
 executing==1.2.0 \
     --hash=sha256:0314a69e37426e3608aada02473b4161d4caf5a4b244d1d0c48072b8fee7bacc \
     --hash=sha256:19da64c18d2d851112f09c287f8d3dbbdf725ab0e569077efb6cdcbd3497c107
@@ -411,15 +483,15 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   coloredlogs
-idna==3.3 \
-    --hash=sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff \
-    --hash=sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d
+idna==3.4 \
+    --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
+    --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   requests
-imagesize==1.3.0 \
-    --hash=sha256:1db2f82529e53c3e929e8926a1fa9235aa82d0bd0c580359c67ec31b2fddaa8c \
-    --hash=sha256:cd1750d452385ca327479d45b64d9c7729ecf0b3969a58148298c77092261f9d
+imagesize==1.4.1 \
+    --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \
+    --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   sphinx
@@ -446,9 +518,9 @@
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   ipython
     #   ptpython
-jinja2==3.0.3 \
-    --hash=sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8 \
-    --hash=sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7
+jinja2==3.1.2 \
+    --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \
+    --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   sphinx
@@ -562,33 +634,29 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   pylint
-mypy==1.0.1 \
-    --hash=sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6 \
-    --hash=sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3 \
-    --hash=sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c \
-    --hash=sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262 \
-    --hash=sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e \
-    --hash=sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0 \
-    --hash=sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d \
-    --hash=sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65 \
-    --hash=sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8 \
-    --hash=sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76 \
-    --hash=sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4 \
-    --hash=sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407 \
-    --hash=sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4 \
-    --hash=sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b \
-    --hash=sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a \
-    --hash=sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf \
-    --hash=sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5 \
-    --hash=sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf \
-    --hash=sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd \
-    --hash=sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8 \
-    --hash=sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994 \
-    --hash=sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff \
-    --hash=sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88 \
-    --hash=sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919 \
-    --hash=sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6 \
-    --hash=sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4
+mypy==1.5.0 \
+    --hash=sha256:1fe816e26e676c1311b9e04fd576543b873576d39439f7c24c8e5c7728391ecf \
+    --hash=sha256:2c9d570f53908cbea326ad8f96028a673b814d9dca7515bf71d95fa662c3eb6f \
+    --hash=sha256:35b13335c6c46a386577a51f3d38b2b5d14aa619e9633bb756bd77205e4bd09f \
+    --hash=sha256:372fd97293ed0076d52695849f59acbbb8461c4ab447858cdaeaf734a396d823 \
+    --hash=sha256:42170e68adb1603ccdc55a30068f72bcfcde2ce650188e4c1b2a93018b826735 \
+    --hash=sha256:69b32d0dedd211b80f1b7435644e1ef83033a2af2ac65adcdc87c38db68a86be \
+    --hash=sha256:725b57a19b7408ef66a0fd9db59b5d3e528922250fb56e50bded27fea9ff28f0 \
+    --hash=sha256:769ddb6bfe55c2bd9c7d6d7020885a5ea14289619db7ee650e06b1ef0852c6f4 \
+    --hash=sha256:79c520aa24f21852206b5ff2cf746dc13020113aa73fa55af504635a96e62718 \
+    --hash=sha256:84cf9f7d8a8a22bb6a36444480f4cbf089c917a4179fbf7eea003ea931944a7f \
+    --hash=sha256:9166186c498170e1ff478a7f540846b2169243feb95bc228d39a67a1a450cdc6 \
+    --hash=sha256:a2500ad063413bc873ae102cf655bf49889e0763b260a3a7cf544a0cbbf7e70a \
+    --hash=sha256:a551ed0fc02455fe2c1fb0145160df8336b90ab80224739627b15ebe2b45e9dc \
+    --hash=sha256:ad3109bec37cc33654de8db30fe8ff3a1bb57ea65144167d68185e6dced9868d \
+    --hash=sha256:b4ea3a0241cb005b0ccdbd318fb99619b21ae51bcf1660b95fc22e0e7d3ba4a1 \
+    --hash=sha256:c36011320e452eb30bec38b9fd3ba20569dc9545d7d4540d967f3ea1fab9c374 \
+    --hash=sha256:c8a7444d6fcac7e2585b10abb91ad900a576da7af8f5cffffbff6065d9115813 \
+    --hash=sha256:cbf18f8db7e5f060d61c91e334d3b96d6bb624ddc9ee8a1cde407b737acbca2c \
+    --hash=sha256:d145b81a8214687cfc1f85c03663a5bbe736777410e5580e54d526e7e904f564 \
+    --hash=sha256:eec5c927aa4b3e8b4781840f1550079969926d0a22ce38075f6cfcf4b13e3eb4 \
+    --hash=sha256:f3460f34b3839b9bc84ee3ed65076eb827cd99ed13ed08d723f9083cada4a212 \
+    --hash=sha256:f3940cf5845b2512b3ab95463198b0cdf87975dfd17fdcc6ce9709a9abe09e69
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
 mypy-extensions==1.0.0 \
     --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \
@@ -727,17 +795,17 @@
     --hash=sha256:5609aa6da1123fccfae2e8431a67b4146aa7fad5b3889f808df12b110f230937 \
     --hash=sha256:cde854e662774c5457d688ca41615f6594187ba7067af101232df889a6b7a66b
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-pygments==2.14.0 \
-    --hash=sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297 \
-    --hash=sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717
+pygments==2.16.1 \
+    --hash=sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692 \
+    --hash=sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   ipython
     #   ptpython
     #   sphinx
-pylint==2.16.2 \
-    --hash=sha256:13b2c805a404a9bf57d002cd5f054ca4d40b0b87542bdaba5e05321ae8262c84 \
-    --hash=sha256:ff22dde9c2128cd257c145cfd51adeff0be7df4d80d669055f24a962b351bbe4
+pylint==2.17.5 \
+    --hash=sha256:73995fb8216d3bed149c8d51bba25b2c52a8251a2c8ac846ec668ce38fab5413 \
+    --hash=sha256:f7b601cbc06fef7e62a754e2b41294c2aa31f1cb659624b9a85bcba29eaf8252
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
 pyperclip==1.8.2 \
     --hash=sha256:105254a8b04934f0bc84e9c24eb360a591aaf6535c9def5f29d92af107a9bf57
@@ -758,9 +826,9 @@
     --hash=sha256:3c77e014170dfffbd816e6ffc205e9842efb10be9f58ec16d3e8675b4925cddb \
     --hash=sha256:c4451db6ba391ca6ca299fb3ec7bae67a5c55dde170964c7a14ceefec02f2cf0
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-pytz==2021.3 \
-    --hash=sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c \
-    --hash=sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326
+pytz==2023.3 \
+    --hash=sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588 \
+    --hash=sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   babel
@@ -806,9 +874,9 @@
     --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \
     --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-requests==2.27.1 \
-    --hash=sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61 \
-    --hash=sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d
+requests==2.31.0 \
+    --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \
+    --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   google-api-core
@@ -833,30 +901,24 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   sphinx
-sphinx==5.3.0 \
-    --hash=sha256:060ca5c9f7ba57a08a1219e547b269fadf125ae25b06b9fa7f66768efb652d6d \
-    --hash=sha256:51026de0a9ff9fc13c05d74913ad66047e104f56a129ff73e174eb5c3ee794b5
+sphinx==7.1.2 \
+    --hash=sha256:780f4d32f1d7d1126576e0e5ecc19dc32ab76cd24e950228dcf7b1f6d3d9e22f \
+    --hash=sha256:d170a81825b2fcacb6dfd5a0d7f578a053e45d3f2b153fecc948c37344eb4cbe
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   sphinx-argparse
     #   sphinx-design
-    #   sphinx-rtd-theme
-    #   sphinxcontrib-jquery
 sphinx-argparse==0.4.0 \
     --hash=sha256:73bee01f7276fae2bf621ccfe4d167af7306e7288e3482005405d9f826f9b037 \
     --hash=sha256:e0f34184eb56f12face774fbc87b880abdb9017a0998d1ec559b267e9697e449
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-sphinx-design==0.3.0 \
-    --hash=sha256:7183fa1fae55b37ef01bda5125a21ee841f5bbcbf59a35382be598180c4cefba \
-    --hash=sha256:823c1dd74f31efb3285ec2f1254caefed29d762a40cd676f58413a1e4ed5cc96
+sphinx-design==0.5.0 \
+    --hash=sha256:1af1267b4cea2eedd6724614f19dcc88fe2e15aff65d06b2f6252cee9c4f4c1e \
+    --hash=sha256:e8e513acea6f92d15c6de3b34e954458f245b8e761b45b63950f65373352ab00
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-sphinx-rtd-theme==1.2.0 \
-    --hash=sha256:a0d8bd1a2ed52e0b338cbe19c4b2eef3c5e7a048769753dac6a9f059c7b641b8 \
-    --hash=sha256:f823f7e71890abe0ac6aaa6013361ea2696fc8d3e1fa798f463e82bdb77eeff2
-    # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-sphinxcontrib-applehelp==1.0.2 \
-    --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \
-    --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58
+sphinxcontrib-applehelp==1.0.4 \
+    --hash=sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228 \
+    --hash=sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   sphinx
@@ -866,18 +928,12 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   sphinx
-sphinxcontrib-htmlhelp==2.0.0 \
-    --hash=sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07 \
-    --hash=sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2
+sphinxcontrib-htmlhelp==2.0.1 \
+    --hash=sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff \
+    --hash=sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   sphinx
-sphinxcontrib-jquery==4.1 \
-    --hash=sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a \
-    --hash=sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae
-    # via
-    #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-    #   sphinx-rtd-theme
 sphinxcontrib-jsmath==1.0.1 \
     --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \
     --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8
@@ -934,9 +990,9 @@
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   ipython
     #   matplotlib-inline
-types-docutils==0.18.0 \
-    --hash=sha256:14f781eb28d89a1cd61f1c41bd0776ad3bb4e2333d317c37d2c67f2eaf5891fe \
-    --hash=sha256:f0305109169326edffd98c128f542f773be06a1502a17d96359c53e31082db19
+types-docutils==0.20.0.3 \
+    --hash=sha256:4928e790f42b99d5833990f99c8dd9fa9f16825f6ed30380ca981846d36870cd \
+    --hash=sha256:a930150d8e01a9170f9bca489f46808ddebccdd8bc1e47c07968a77e49fb9321
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   types-pygments
@@ -946,9 +1002,9 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   mypy-protobuf
-types-pygments==2.9.13 \
-    --hash=sha256:589b87a6295c2b8b40566e46ff5ab9b5097735b051158f36648f64acc2e98dbe \
-    --hash=sha256:8b4ff9be1208b1dce639e406fc4d4e6a4682b6b253a021f2a7ffff9dbce16944
+types-pygments==2.16.0.0 \
+    --hash=sha256:4624a547d5ba73c971fac5d6fd327141e85e65f6123448bee76f0c8557652a71 \
+    --hash=sha256:aa93e4664e2d6cfea7570cde156e3966bf939f9c7d736cd179c4c8e94f7600b2
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
 types-pyserial==3.5.0.7 \
     --hash=sha256:22b665a336539b85108e2f5d61e6cde1b59818eae78324c17bfe64edc1a2bd66 \
@@ -958,9 +1014,9 @@
     --hash=sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b \
     --hash=sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-types-requests==2.28.11.15 \
-    --hash=sha256:a05e4c7bc967518fba5789c341ea8b0c942776ee474c7873129a61161978e586 \
-    --hash=sha256:fc8eaa09cc014699c6b63c60c2e3add0c8b09a410c818b5ac6e65f92a26dde09
+types-requests==2.31.0.2 \
+    --hash=sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a \
+    --hash=sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
 types-setuptools==67.8.0.0 \
     --hash=sha256:6df73340d96b238a4188b7b7668814b37e8018168aef1eef94a3b1872e3f60ff \
@@ -988,9 +1044,9 @@
     #   ipython
     #   mypy
     #   pylint
-urllib3==1.26.14 \
-    --hash=sha256:076907bf8fd355cde77728471316625a4d2f7e713c125f51953bb5b3eecf4f72 \
-    --hash=sha256:75edcdc2f7d85b137124a6c3c9fc3933cdeaa12ecb9a6a959f22797a0feca7e1
+urllib3==2.0.4 \
+    --hash=sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11 \
+    --hash=sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   requests
@@ -1190,13 +1246,12 @@
     --hash=sha256:408fb9a2b254c302f49db83c59f9aa0b4b0fd0ec25be3a5c51181327922ff63d \
     --hash=sha256:e3a234ba8455fe201eaa649cdac872d590089a18b661e39bbac7020978dd9c2e
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-zipp==3.15.0 \
-    --hash=sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b \
-    --hash=sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556
+zipp==3.16.2 \
+    --hash=sha256:679e51dd4403591b2d6838a48de3d283f3d188412a9782faadf845f298736ba0 \
+    --hash=sha256:ebc15946aa78bd63458992fc81ec3b6f7b1e92d51c35e6de1c3804e73b799147
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   importlib-metadata
-
 # The following packages are considered to be unsafe in a requirements file:
 pip==23.2 \
     --hash=sha256:78e5353a9dda374b462f2054f83a7b63f3f065c98236a68361845c1b0ee7e35f \
diff --git a/pw_env_setup/py/pw_env_setup/virtualenv_setup/pigweed_upstream_requirements.txt b/pw_env_setup/py/pw_env_setup/virtualenv_setup/pigweed_upstream_requirements.txt
index 3db53bf..16834a0 100644
--- a/pw_env_setup/py/pw_env_setup/virtualenv_setup/pigweed_upstream_requirements.txt
+++ b/pw_env_setup/py/pw_env_setup/virtualenv_setup/pigweed_upstream_requirements.txt
@@ -15,11 +15,9 @@
 # Pigweed upstream specific depenencies:
 # pigweed.dev Sphinx themes
 beautifulsoup4==4.12.2
-furo @ https://github.com/chadnorvell/furo/releases/download/2023.03.27/furo-2023.3.27.dev1-py3-none-any.whl
+furo==2023.8.19
 sphinx-copybutton==0.5.1
-sphinx-tabs==3.4.1
-myst-parser==0.18.1
-breathe==4.34.0
+breathe==4.35.0
 kconfiglib==14.1.0
 # Renode requirements
 psutil==5.9.4
diff --git a/pw_env_setup/py/pw_env_setup/virtualenv_setup/upstream_requirements_darwin_lock.txt b/pw_env_setup/py/pw_env_setup/virtualenv_setup/upstream_requirements_darwin_lock.txt
index 4f6a6bc..8989987 100644
--- a/pw_env_setup/py/pw_env_setup/virtualenv_setup/upstream_requirements_darwin_lock.txt
+++ b/pw_env_setup/py/pw_env_setup/virtualenv_setup/upstream_requirements_darwin_lock.txt
@@ -4,9 +4,9 @@
 #
 #    pip-compile --allow-unsafe --generate-hashes --output-file=/upstream_requirements_darwin_lock.txt --strip-extras /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 #
-alabaster==0.7.12 \
-    --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \
-    --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02
+alabaster==0.7.13 \
+    --hash=sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3 \
+    --hash=sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -22,9 +22,9 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   ipython
-astroid==2.14.2 \
-    --hash=sha256:0e0e3709d64fbffd3037e4ff403580550f14471fd3eaae9fa11cc9a5c7901153 \
-    --hash=sha256:a3cf9f02c53dd259144a7e8f3ccd75d67c9a8c716ef183e0c1f291bc5d7bb3cf
+astroid==2.15.6 \
+    --hash=sha256:389656ca57b6108f939cf5d2f9a2a825a3be50ba9d589670f393236e0a03b91c \
+    --hash=sha256:903f024859b7c7687d7a7f3a3f73b17301f8e42dfd9cc9df9d4418172d3e2dbd
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   pylint
@@ -34,9 +34,9 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   stack-data
-babel==2.9.1 \
-    --hash=sha256:ab49e12b91d937cd11f0b67cb259a57ab4ad2b59ac7a3b41d6c06c0ac5b0def9 \
-    --hash=sha256:bc0c176f9f6a994582230df350aa6e05ba2ebe4b3ac317eab29d9be5d2768da0
+babel==2.12.1 \
+    --hash=sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610 \
+    --hash=sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -79,9 +79,9 @@
     --hash=sha256:c91dfc2c2a4e50df0026f88d2215e166616e0c80e86004d0003ece0488db2739 \
     --hash=sha256:e6663f91b6feca5d06f2ccd49a10f254f9298cc1f7f49c46e498a0771b507104
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-breathe==4.34.0 \
-    --hash=sha256:48804dcf0e607a89fb6ad88c729ef12743a42db03ae9489be4ef8f7c4011774a \
-    --hash=sha256:ac0768a5e84addad3e632028fe67749c567aba2b29088493b64c2c1634bcdba1
+breathe==4.35.0 \
+    --hash=sha256:5165541c3c67b6c7adde8b3ecfe895c6f7844783c4076b6d8d287e4f33d62386 \
+    --hash=sha256:52c581f42ca4310737f9e435e3851c3d1f15446205a85fbc272f1f97ed74f5be
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 build==0.10.0 \
     --hash=sha256:af266720050a66c893a6096a2f410989eeac74ff9a68ba194b3f6473e8e26171 \
@@ -169,9 +169,82 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   cryptography
-charset-normalizer==2.0.10 \
-    --hash=sha256:876d180e9d7432c5d1dfd4c5d26b72f099d503e8fcc0feb7532c9289be60fcbd \
-    --hash=sha256:cb957888737fc0bbcd78e3df769addb41fd1ff8cf950dc9e7ad7793f1bf44455
+charset-normalizer==3.2.0 \
+    --hash=sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96 \
+    --hash=sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c \
+    --hash=sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710 \
+    --hash=sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706 \
+    --hash=sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020 \
+    --hash=sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252 \
+    --hash=sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad \
+    --hash=sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329 \
+    --hash=sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a \
+    --hash=sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f \
+    --hash=sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6 \
+    --hash=sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4 \
+    --hash=sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a \
+    --hash=sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46 \
+    --hash=sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2 \
+    --hash=sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23 \
+    --hash=sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace \
+    --hash=sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd \
+    --hash=sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982 \
+    --hash=sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10 \
+    --hash=sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2 \
+    --hash=sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea \
+    --hash=sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09 \
+    --hash=sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5 \
+    --hash=sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149 \
+    --hash=sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489 \
+    --hash=sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9 \
+    --hash=sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80 \
+    --hash=sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592 \
+    --hash=sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3 \
+    --hash=sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6 \
+    --hash=sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed \
+    --hash=sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c \
+    --hash=sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200 \
+    --hash=sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a \
+    --hash=sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e \
+    --hash=sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d \
+    --hash=sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6 \
+    --hash=sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623 \
+    --hash=sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669 \
+    --hash=sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3 \
+    --hash=sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa \
+    --hash=sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9 \
+    --hash=sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2 \
+    --hash=sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f \
+    --hash=sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1 \
+    --hash=sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4 \
+    --hash=sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a \
+    --hash=sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8 \
+    --hash=sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3 \
+    --hash=sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029 \
+    --hash=sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f \
+    --hash=sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959 \
+    --hash=sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22 \
+    --hash=sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7 \
+    --hash=sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952 \
+    --hash=sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346 \
+    --hash=sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e \
+    --hash=sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d \
+    --hash=sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299 \
+    --hash=sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd \
+    --hash=sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a \
+    --hash=sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3 \
+    --hash=sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037 \
+    --hash=sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94 \
+    --hash=sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c \
+    --hash=sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858 \
+    --hash=sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a \
+    --hash=sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449 \
+    --hash=sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c \
+    --hash=sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918 \
+    --hash=sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1 \
+    --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \
+    --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \
+    --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   requests
@@ -285,24 +358,22 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   pylint
-docutils==0.18 \
-    --hash=sha256:a31688b2ea858517fa54293e5d5df06fbb875fb1f7e4c64529271b77781ca8fc \
-    --hash=sha256:c1d5dab2b11d16397406a282e53953fe495a46d69ae329f55aa98a5c4e3c5fbb
+docutils==0.20.1 \
+    --hash=sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6 \
+    --hash=sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   breathe
-    #   myst-parser
     #   sphinx
-    #   sphinx-rtd-theme
-    #   sphinx-tabs
 executing==1.2.0 \
     --hash=sha256:0314a69e37426e3608aada02473b4161d4caf5a4b244d1d0c48072b8fee7bacc \
     --hash=sha256:19da64c18d2d851112f09c287f8d3dbbdf725ab0e569077efb6cdcbd3497c107
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   stack-data
-furo @ https://github.com/chadnorvell/furo/releases/download/2023.03.27/furo-2023.3.27.dev1-py3-none-any.whl \
-    --hash=sha256:8cee0af40350696f8d9bd7a261f2d452d05b3a98c2af710c6e12d7badfe54865
+furo==2023.8.19 \
+    --hash=sha256:12f99f87a1873b6746228cfde18f77244e6c1ffb85d7fed95e638aae70d80590 \
+    --hash=sha256:e671ee638ab3f1b472f4033b0167f502ab407830e0db0f843b1c1028119c9cd1
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 google-api-core==2.7.1 \
     --hash=sha256:6be1fc59e2a7ba9f66808bbc22f976f81e4c3e7ab20fa0620ce42686288787d0 \
@@ -423,15 +494,15 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   coloredlogs
-idna==3.3 \
-    --hash=sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff \
-    --hash=sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d
+idna==3.4 \
+    --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
+    --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   requests
-imagesize==1.3.0 \
-    --hash=sha256:1db2f82529e53c3e929e8926a1fa9235aa82d0bd0c580359c67ec31b2fddaa8c \
-    --hash=sha256:cd1750d452385ca327479d45b64d9c7729ecf0b3969a58148298c77092261f9d
+imagesize==1.4.1 \
+    --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \
+    --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -458,12 +529,11 @@
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   ipython
     #   ptpython
-jinja2==3.0.3 \
-    --hash=sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8 \
-    --hash=sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7
+jinja2==3.1.2 \
+    --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \
+    --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-    #   myst-parser
     #   sphinx
 json5==0.9.11 \
     --hash=sha256:1aa54b80b5e507dfe31d12b7743a642e2ffa6f70bf73b8e3d7d1d5fba83d99bd \
@@ -513,13 +583,6 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   astroid
-markdown-it-py==2.2.0 \
-    --hash=sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30 \
-    --hash=sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1
-    # via
-    #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-    #   mdit-py-plugins
-    #   myst-parser
 markupsafe==2.1.3 \
     --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \
     --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \
@@ -586,45 +649,29 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   pylint
-mdit-py-plugins==0.3.5 \
-    --hash=sha256:ca9a0714ea59a24b2b044a1831f48d817dd0c817e84339f20e7889f392d77c4e \
-    --hash=sha256:eee0adc7195e5827e17e02d2a258a2ba159944a0748f59c5099a4a27f78fcf6a
-    # via
-    #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-    #   myst-parser
-mdurl==0.1.2 \
-    --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \
-    --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba
-    # via
-    #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-    #   markdown-it-py
-mypy==1.0.1 \
-    --hash=sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6 \
-    --hash=sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3 \
-    --hash=sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c \
-    --hash=sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262 \
-    --hash=sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e \
-    --hash=sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0 \
-    --hash=sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d \
-    --hash=sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65 \
-    --hash=sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8 \
-    --hash=sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76 \
-    --hash=sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4 \
-    --hash=sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407 \
-    --hash=sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4 \
-    --hash=sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b \
-    --hash=sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a \
-    --hash=sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf \
-    --hash=sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5 \
-    --hash=sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf \
-    --hash=sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd \
-    --hash=sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8 \
-    --hash=sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994 \
-    --hash=sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff \
-    --hash=sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88 \
-    --hash=sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919 \
-    --hash=sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6 \
-    --hash=sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4
+mypy==1.5.0 \
+    --hash=sha256:1fe816e26e676c1311b9e04fd576543b873576d39439f7c24c8e5c7728391ecf \
+    --hash=sha256:2c9d570f53908cbea326ad8f96028a673b814d9dca7515bf71d95fa662c3eb6f \
+    --hash=sha256:35b13335c6c46a386577a51f3d38b2b5d14aa619e9633bb756bd77205e4bd09f \
+    --hash=sha256:372fd97293ed0076d52695849f59acbbb8461c4ab447858cdaeaf734a396d823 \
+    --hash=sha256:42170e68adb1603ccdc55a30068f72bcfcde2ce650188e4c1b2a93018b826735 \
+    --hash=sha256:69b32d0dedd211b80f1b7435644e1ef83033a2af2ac65adcdc87c38db68a86be \
+    --hash=sha256:725b57a19b7408ef66a0fd9db59b5d3e528922250fb56e50bded27fea9ff28f0 \
+    --hash=sha256:769ddb6bfe55c2bd9c7d6d7020885a5ea14289619db7ee650e06b1ef0852c6f4 \
+    --hash=sha256:79c520aa24f21852206b5ff2cf746dc13020113aa73fa55af504635a96e62718 \
+    --hash=sha256:84cf9f7d8a8a22bb6a36444480f4cbf089c917a4179fbf7eea003ea931944a7f \
+    --hash=sha256:9166186c498170e1ff478a7f540846b2169243feb95bc228d39a67a1a450cdc6 \
+    --hash=sha256:a2500ad063413bc873ae102cf655bf49889e0763b260a3a7cf544a0cbbf7e70a \
+    --hash=sha256:a551ed0fc02455fe2c1fb0145160df8336b90ab80224739627b15ebe2b45e9dc \
+    --hash=sha256:ad3109bec37cc33654de8db30fe8ff3a1bb57ea65144167d68185e6dced9868d \
+    --hash=sha256:b4ea3a0241cb005b0ccdbd318fb99619b21ae51bcf1660b95fc22e0e7d3ba4a1 \
+    --hash=sha256:c36011320e452eb30bec38b9fd3ba20569dc9545d7d4540d967f3ea1fab9c374 \
+    --hash=sha256:c8a7444d6fcac7e2585b10abb91ad900a576da7af8f5cffffbff6065d9115813 \
+    --hash=sha256:cbf18f8db7e5f060d61c91e334d3b96d6bb624ddc9ee8a1cde407b737acbca2c \
+    --hash=sha256:d145b81a8214687cfc1f85c03663a5bbe736777410e5580e54d526e7e904f564 \
+    --hash=sha256:eec5c927aa4b3e8b4781840f1550079969926d0a22ce38075f6cfcf4b13e3eb4 \
+    --hash=sha256:f3460f34b3839b9bc84ee3ed65076eb827cd99ed13ed08d723f9083cada4a212 \
+    --hash=sha256:f3940cf5845b2512b3ab95463198b0cdf87975dfd17fdcc6ce9709a9abe09e69
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 mypy-extensions==1.0.0 \
     --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \
@@ -637,10 +684,6 @@
     --hash=sha256:15604f6943b16c05db646903261e3b3e775cf7f7990b7c37b03d043a907b650d \
     --hash=sha256:24f3b0aecb06656e983f58e07c732a90577b9d7af3e1066fc2b663bbf0370248
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-myst-parser==0.18.1 \
-    --hash=sha256:61b275b85d9f58aa327f370913ae1bec26ebad372cc99f3ab85c8ec3ee8d9fb8 \
-    --hash=sha256:79317f4bb2c13053dd6e64f9da1ba1da6cd9c40c8a430c447a7b146a594c246d
-    # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 packaging==23.0 \
     --hash=sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2 \
     --hash=sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97
@@ -779,19 +822,18 @@
     --hash=sha256:5609aa6da1123fccfae2e8431a67b4146aa7fad5b3889f808df12b110f230937 \
     --hash=sha256:cde854e662774c5457d688ca41615f6594187ba7067af101232df889a6b7a66b
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-pygments==2.14.0 \
-    --hash=sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297 \
-    --hash=sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717
+pygments==2.16.1 \
+    --hash=sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692 \
+    --hash=sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   furo
     #   ipython
     #   ptpython
     #   sphinx
-    #   sphinx-tabs
-pylint==2.16.2 \
-    --hash=sha256:13b2c805a404a9bf57d002cd5f054ca4d40b0b87542bdaba5e05321ae8262c84 \
-    --hash=sha256:ff22dde9c2128cd257c145cfd51adeff0be7df4d80d669055f24a962b351bbe4
+pylint==2.17.5 \
+    --hash=sha256:73995fb8216d3bed149c8d51bba25b2c52a8251a2c8ac846ec668ce38fab5413 \
+    --hash=sha256:f7b601cbc06fef7e62a754e2b41294c2aa31f1cb659624b9a85bcba29eaf8252
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 pyperclip==1.8.2 \
     --hash=sha256:105254a8b04934f0bc84e9c24eb360a591aaf6535c9def5f29d92af107a9bf57
@@ -806,9 +848,9 @@
     --hash=sha256:3c77e014170dfffbd816e6ffc205e9842efb10be9f58ec16d3e8675b4925cddb \
     --hash=sha256:c4451db6ba391ca6ca299fb3ec7bae67a5c55dde170964c7a14ceefec02f2cf0
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-pytz==2021.3 \
-    --hash=sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c \
-    --hash=sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326
+pytz==2023.3 \
+    --hash=sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588 \
+    --hash=sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   babel
@@ -857,12 +899,10 @@
     --hash=sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585 \
     --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \
     --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f
-    # via
-    #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-    #   myst-parser
-requests==2.27.1 \
-    --hash=sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61 \
-    --hash=sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d
+    # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
+requests==2.31.0 \
+    --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \
+    --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   google-api-core
@@ -897,21 +937,17 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   beautifulsoup4
-sphinx==5.3.0 \
-    --hash=sha256:060ca5c9f7ba57a08a1219e547b269fadf125ae25b06b9fa7f66768efb652d6d \
-    --hash=sha256:51026de0a9ff9fc13c05d74913ad66047e104f56a129ff73e174eb5c3ee794b5
+sphinx==7.1.2 \
+    --hash=sha256:780f4d32f1d7d1126576e0e5ecc19dc32ab76cd24e950228dcf7b1f6d3d9e22f \
+    --hash=sha256:d170a81825b2fcacb6dfd5a0d7f578a053e45d3f2b153fecc948c37344eb4cbe
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   breathe
     #   furo
-    #   myst-parser
     #   sphinx-argparse
     #   sphinx-basic-ng
     #   sphinx-copybutton
     #   sphinx-design
-    #   sphinx-rtd-theme
-    #   sphinx-tabs
-    #   sphinxcontrib-jquery
 sphinx-argparse==0.4.0 \
     --hash=sha256:73bee01f7276fae2bf621ccfe4d167af7306e7288e3482005405d9f826f9b037 \
     --hash=sha256:e0f34184eb56f12face774fbc87b880abdb9017a0998d1ec559b267e9697e449
@@ -926,21 +962,13 @@
     --hash=sha256:0842851b5955087a7ec7fc870b622cb168618ad408dee42692e9a5c97d071da8 \
     --hash=sha256:366251e28a6f6041514bfb5439425210418d6c750e98d3a695b73e56866a677a
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-sphinx-design==0.3.0 \
-    --hash=sha256:7183fa1fae55b37ef01bda5125a21ee841f5bbcbf59a35382be598180c4cefba \
-    --hash=sha256:823c1dd74f31efb3285ec2f1254caefed29d762a40cd676f58413a1e4ed5cc96
+sphinx-design==0.5.0 \
+    --hash=sha256:1af1267b4cea2eedd6724614f19dcc88fe2e15aff65d06b2f6252cee9c4f4c1e \
+    --hash=sha256:e8e513acea6f92d15c6de3b34e954458f245b8e761b45b63950f65373352ab00
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-sphinx-rtd-theme==1.2.0 \
-    --hash=sha256:a0d8bd1a2ed52e0b338cbe19c4b2eef3c5e7a048769753dac6a9f059c7b641b8 \
-    --hash=sha256:f823f7e71890abe0ac6aaa6013361ea2696fc8d3e1fa798f463e82bdb77eeff2
-    # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-sphinx-tabs==3.4.1 \
-    --hash=sha256:7cea8942aeccc5d01a995789c01804b787334b55927f29b36ba16ed1e7cb27c6 \
-    --hash=sha256:d2a09f9e8316e400d57503f6df1c78005fdde220e5af589cc79d493159e1b832
-    # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-sphinxcontrib-applehelp==1.0.2 \
-    --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \
-    --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58
+sphinxcontrib-applehelp==1.0.4 \
+    --hash=sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228 \
+    --hash=sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -950,18 +978,12 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
-sphinxcontrib-htmlhelp==2.0.0 \
-    --hash=sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07 \
-    --hash=sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2
+sphinxcontrib-htmlhelp==2.0.1 \
+    --hash=sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff \
+    --hash=sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
-sphinxcontrib-jquery==4.1 \
-    --hash=sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a \
-    --hash=sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae
-    # via
-    #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-    #   sphinx-rtd-theme
 sphinxcontrib-jsmath==1.0.1 \
     --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \
     --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8
@@ -1018,9 +1040,9 @@
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   ipython
     #   matplotlib-inline
-types-docutils==0.18.0 \
-    --hash=sha256:14f781eb28d89a1cd61f1c41bd0776ad3bb4e2333d317c37d2c67f2eaf5891fe \
-    --hash=sha256:f0305109169326edffd98c128f542f773be06a1502a17d96359c53e31082db19
+types-docutils==0.20.0.3 \
+    --hash=sha256:4928e790f42b99d5833990f99c8dd9fa9f16825f6ed30380ca981846d36870cd \
+    --hash=sha256:a930150d8e01a9170f9bca489f46808ddebccdd8bc1e47c07968a77e49fb9321
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   types-pygments
@@ -1030,9 +1052,9 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   mypy-protobuf
-types-pygments==2.9.13 \
-    --hash=sha256:589b87a6295c2b8b40566e46ff5ab9b5097735b051158f36648f64acc2e98dbe \
-    --hash=sha256:8b4ff9be1208b1dce639e406fc4d4e6a4682b6b253a021f2a7ffff9dbce16944
+types-pygments==2.16.0.0 \
+    --hash=sha256:4624a547d5ba73c971fac5d6fd327141e85e65f6123448bee76f0c8557652a71 \
+    --hash=sha256:aa93e4664e2d6cfea7570cde156e3966bf939f9c7d736cd179c4c8e94f7600b2
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 types-pyserial==3.5.0.7 \
     --hash=sha256:22b665a336539b85108e2f5d61e6cde1b59818eae78324c17bfe64edc1a2bd66 \
@@ -1042,9 +1064,9 @@
     --hash=sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b \
     --hash=sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-types-requests==2.28.11.15 \
-    --hash=sha256:a05e4c7bc967518fba5789c341ea8b0c942776ee474c7873129a61161978e586 \
-    --hash=sha256:fc8eaa09cc014699c6b63c60c2e3add0c8b09a410c818b5ac6e65f92a26dde09
+types-requests==2.31.0.2 \
+    --hash=sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a \
+    --hash=sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 types-setuptools==67.8.0.0 \
     --hash=sha256:6df73340d96b238a4188b7b7668814b37e8018168aef1eef94a3b1872e3f60ff \
@@ -1071,11 +1093,10 @@
     #   black
     #   ipython
     #   mypy
-    #   myst-parser
     #   pylint
-urllib3==1.26.14 \
-    --hash=sha256:076907bf8fd355cde77728471316625a4d2f7e713c125f51953bb5b3eecf4f72 \
-    --hash=sha256:75edcdc2f7d85b137124a6c3c9fc3933cdeaa12ecb9a6a959f22797a0feca7e1
+urllib3==2.0.4 \
+    --hash=sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11 \
+    --hash=sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   requests
@@ -1275,13 +1296,12 @@
     --hash=sha256:408fb9a2b254c302f49db83c59f9aa0b4b0fd0ec25be3a5c51181327922ff63d \
     --hash=sha256:e3a234ba8455fe201eaa649cdac872d590089a18b661e39bbac7020978dd9c2e
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-zipp==3.15.0 \
-    --hash=sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b \
-    --hash=sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556
+zipp==3.16.2 \
+    --hash=sha256:679e51dd4403591b2d6838a48de3d283f3d188412a9782faadf845f298736ba0 \
+    --hash=sha256:ebc15946aa78bd63458992fc81ec3b6f7b1e92d51c35e6de1c3804e73b799147
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   importlib-metadata
-
 # The following packages are considered to be unsafe in a requirements file:
 pip==23.2 \
     --hash=sha256:78e5353a9dda374b462f2054f83a7b63f3f065c98236a68361845c1b0ee7e35f \
diff --git a/pw_env_setup/py/pw_env_setup/virtualenv_setup/upstream_requirements_linux_lock.txt b/pw_env_setup/py/pw_env_setup/virtualenv_setup/upstream_requirements_linux_lock.txt
index db8c61c..b733308 100644
--- a/pw_env_setup/py/pw_env_setup/virtualenv_setup/upstream_requirements_linux_lock.txt
+++ b/pw_env_setup/py/pw_env_setup/virtualenv_setup/upstream_requirements_linux_lock.txt
@@ -4,9 +4,9 @@
 #
 #    pip-compile --allow-unsafe --generate-hashes --output-file=/upstream_requirements_linux_lock.txt --strip-extras /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 #
-alabaster==0.7.12 \
-    --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \
-    --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02
+alabaster==0.7.13 \
+    --hash=sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3 \
+    --hash=sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -16,9 +16,9 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   ptpython
-astroid==2.14.2 \
-    --hash=sha256:0e0e3709d64fbffd3037e4ff403580550f14471fd3eaae9fa11cc9a5c7901153 \
-    --hash=sha256:a3cf9f02c53dd259144a7e8f3ccd75d67c9a8c716ef183e0c1f291bc5d7bb3cf
+astroid==2.15.6 \
+    --hash=sha256:389656ca57b6108f939cf5d2f9a2a825a3be50ba9d589670f393236e0a03b91c \
+    --hash=sha256:903f024859b7c7687d7a7f3a3f73b17301f8e42dfd9cc9df9d4418172d3e2dbd
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   pylint
@@ -28,9 +28,9 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   stack-data
-babel==2.9.1 \
-    --hash=sha256:ab49e12b91d937cd11f0b67cb259a57ab4ad2b59ac7a3b41d6c06c0ac5b0def9 \
-    --hash=sha256:bc0c176f9f6a994582230df350aa6e05ba2ebe4b3ac317eab29d9be5d2768da0
+babel==2.12.1 \
+    --hash=sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610 \
+    --hash=sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -73,9 +73,9 @@
     --hash=sha256:c91dfc2c2a4e50df0026f88d2215e166616e0c80e86004d0003ece0488db2739 \
     --hash=sha256:e6663f91b6feca5d06f2ccd49a10f254f9298cc1f7f49c46e498a0771b507104
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-breathe==4.34.0 \
-    --hash=sha256:48804dcf0e607a89fb6ad88c729ef12743a42db03ae9489be4ef8f7c4011774a \
-    --hash=sha256:ac0768a5e84addad3e632028fe67749c567aba2b29088493b64c2c1634bcdba1
+breathe==4.35.0 \
+    --hash=sha256:5165541c3c67b6c7adde8b3ecfe895c6f7844783c4076b6d8d287e4f33d62386 \
+    --hash=sha256:52c581f42ca4310737f9e435e3851c3d1f15446205a85fbc272f1f97ed74f5be
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 build==0.10.0 \
     --hash=sha256:af266720050a66c893a6096a2f410989eeac74ff9a68ba194b3f6473e8e26171 \
@@ -163,9 +163,82 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   cryptography
-charset-normalizer==2.0.10 \
-    --hash=sha256:876d180e9d7432c5d1dfd4c5d26b72f099d503e8fcc0feb7532c9289be60fcbd \
-    --hash=sha256:cb957888737fc0bbcd78e3df769addb41fd1ff8cf950dc9e7ad7793f1bf44455
+charset-normalizer==3.2.0 \
+    --hash=sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96 \
+    --hash=sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c \
+    --hash=sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710 \
+    --hash=sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706 \
+    --hash=sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020 \
+    --hash=sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252 \
+    --hash=sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad \
+    --hash=sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329 \
+    --hash=sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a \
+    --hash=sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f \
+    --hash=sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6 \
+    --hash=sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4 \
+    --hash=sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a \
+    --hash=sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46 \
+    --hash=sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2 \
+    --hash=sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23 \
+    --hash=sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace \
+    --hash=sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd \
+    --hash=sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982 \
+    --hash=sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10 \
+    --hash=sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2 \
+    --hash=sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea \
+    --hash=sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09 \
+    --hash=sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5 \
+    --hash=sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149 \
+    --hash=sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489 \
+    --hash=sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9 \
+    --hash=sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80 \
+    --hash=sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592 \
+    --hash=sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3 \
+    --hash=sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6 \
+    --hash=sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed \
+    --hash=sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c \
+    --hash=sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200 \
+    --hash=sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a \
+    --hash=sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e \
+    --hash=sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d \
+    --hash=sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6 \
+    --hash=sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623 \
+    --hash=sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669 \
+    --hash=sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3 \
+    --hash=sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa \
+    --hash=sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9 \
+    --hash=sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2 \
+    --hash=sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f \
+    --hash=sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1 \
+    --hash=sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4 \
+    --hash=sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a \
+    --hash=sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8 \
+    --hash=sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3 \
+    --hash=sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029 \
+    --hash=sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f \
+    --hash=sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959 \
+    --hash=sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22 \
+    --hash=sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7 \
+    --hash=sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952 \
+    --hash=sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346 \
+    --hash=sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e \
+    --hash=sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d \
+    --hash=sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299 \
+    --hash=sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd \
+    --hash=sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a \
+    --hash=sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3 \
+    --hash=sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037 \
+    --hash=sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94 \
+    --hash=sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c \
+    --hash=sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858 \
+    --hash=sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a \
+    --hash=sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449 \
+    --hash=sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c \
+    --hash=sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918 \
+    --hash=sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1 \
+    --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \
+    --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \
+    --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   requests
@@ -279,24 +352,22 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   pylint
-docutils==0.18 \
-    --hash=sha256:a31688b2ea858517fa54293e5d5df06fbb875fb1f7e4c64529271b77781ca8fc \
-    --hash=sha256:c1d5dab2b11d16397406a282e53953fe495a46d69ae329f55aa98a5c4e3c5fbb
+docutils==0.20.1 \
+    --hash=sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6 \
+    --hash=sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   breathe
-    #   myst-parser
     #   sphinx
-    #   sphinx-rtd-theme
-    #   sphinx-tabs
 executing==1.2.0 \
     --hash=sha256:0314a69e37426e3608aada02473b4161d4caf5a4b244d1d0c48072b8fee7bacc \
     --hash=sha256:19da64c18d2d851112f09c287f8d3dbbdf725ab0e569077efb6cdcbd3497c107
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   stack-data
-furo @ https://github.com/chadnorvell/furo/releases/download/2023.03.27/furo-2023.3.27.dev1-py3-none-any.whl \
-    --hash=sha256:8cee0af40350696f8d9bd7a261f2d452d05b3a98c2af710c6e12d7badfe54865
+furo==2023.8.19 \
+    --hash=sha256:12f99f87a1873b6746228cfde18f77244e6c1ffb85d7fed95e638aae70d80590 \
+    --hash=sha256:e671ee638ab3f1b472f4033b0167f502ab407830e0db0f843b1c1028119c9cd1
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 google-api-core==2.7.1 \
     --hash=sha256:6be1fc59e2a7ba9f66808bbc22f976f81e4c3e7ab20fa0620ce42686288787d0 \
@@ -417,15 +488,15 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   coloredlogs
-idna==3.3 \
-    --hash=sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff \
-    --hash=sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d
+idna==3.4 \
+    --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
+    --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   requests
-imagesize==1.3.0 \
-    --hash=sha256:1db2f82529e53c3e929e8926a1fa9235aa82d0bd0c580359c67ec31b2fddaa8c \
-    --hash=sha256:cd1750d452385ca327479d45b64d9c7729ecf0b3969a58148298c77092261f9d
+imagesize==1.4.1 \
+    --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \
+    --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -452,12 +523,11 @@
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   ipython
     #   ptpython
-jinja2==3.0.3 \
-    --hash=sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8 \
-    --hash=sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7
+jinja2==3.1.2 \
+    --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \
+    --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-    #   myst-parser
     #   sphinx
 json5==0.9.11 \
     --hash=sha256:1aa54b80b5e507dfe31d12b7743a642e2ffa6f70bf73b8e3d7d1d5fba83d99bd \
@@ -507,13 +577,6 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   astroid
-markdown-it-py==2.2.0 \
-    --hash=sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30 \
-    --hash=sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1
-    # via
-    #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-    #   mdit-py-plugins
-    #   myst-parser
 markupsafe==2.1.3 \
     --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \
     --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \
@@ -580,45 +643,29 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   pylint
-mdit-py-plugins==0.3.5 \
-    --hash=sha256:ca9a0714ea59a24b2b044a1831f48d817dd0c817e84339f20e7889f392d77c4e \
-    --hash=sha256:eee0adc7195e5827e17e02d2a258a2ba159944a0748f59c5099a4a27f78fcf6a
-    # via
-    #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-    #   myst-parser
-mdurl==0.1.2 \
-    --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \
-    --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba
-    # via
-    #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-    #   markdown-it-py
-mypy==1.0.1 \
-    --hash=sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6 \
-    --hash=sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3 \
-    --hash=sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c \
-    --hash=sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262 \
-    --hash=sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e \
-    --hash=sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0 \
-    --hash=sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d \
-    --hash=sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65 \
-    --hash=sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8 \
-    --hash=sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76 \
-    --hash=sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4 \
-    --hash=sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407 \
-    --hash=sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4 \
-    --hash=sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b \
-    --hash=sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a \
-    --hash=sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf \
-    --hash=sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5 \
-    --hash=sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf \
-    --hash=sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd \
-    --hash=sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8 \
-    --hash=sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994 \
-    --hash=sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff \
-    --hash=sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88 \
-    --hash=sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919 \
-    --hash=sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6 \
-    --hash=sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4
+mypy==1.5.0 \
+    --hash=sha256:1fe816e26e676c1311b9e04fd576543b873576d39439f7c24c8e5c7728391ecf \
+    --hash=sha256:2c9d570f53908cbea326ad8f96028a673b814d9dca7515bf71d95fa662c3eb6f \
+    --hash=sha256:35b13335c6c46a386577a51f3d38b2b5d14aa619e9633bb756bd77205e4bd09f \
+    --hash=sha256:372fd97293ed0076d52695849f59acbbb8461c4ab447858cdaeaf734a396d823 \
+    --hash=sha256:42170e68adb1603ccdc55a30068f72bcfcde2ce650188e4c1b2a93018b826735 \
+    --hash=sha256:69b32d0dedd211b80f1b7435644e1ef83033a2af2ac65adcdc87c38db68a86be \
+    --hash=sha256:725b57a19b7408ef66a0fd9db59b5d3e528922250fb56e50bded27fea9ff28f0 \
+    --hash=sha256:769ddb6bfe55c2bd9c7d6d7020885a5ea14289619db7ee650e06b1ef0852c6f4 \
+    --hash=sha256:79c520aa24f21852206b5ff2cf746dc13020113aa73fa55af504635a96e62718 \
+    --hash=sha256:84cf9f7d8a8a22bb6a36444480f4cbf089c917a4179fbf7eea003ea931944a7f \
+    --hash=sha256:9166186c498170e1ff478a7f540846b2169243feb95bc228d39a67a1a450cdc6 \
+    --hash=sha256:a2500ad063413bc873ae102cf655bf49889e0763b260a3a7cf544a0cbbf7e70a \
+    --hash=sha256:a551ed0fc02455fe2c1fb0145160df8336b90ab80224739627b15ebe2b45e9dc \
+    --hash=sha256:ad3109bec37cc33654de8db30fe8ff3a1bb57ea65144167d68185e6dced9868d \
+    --hash=sha256:b4ea3a0241cb005b0ccdbd318fb99619b21ae51bcf1660b95fc22e0e7d3ba4a1 \
+    --hash=sha256:c36011320e452eb30bec38b9fd3ba20569dc9545d7d4540d967f3ea1fab9c374 \
+    --hash=sha256:c8a7444d6fcac7e2585b10abb91ad900a576da7af8f5cffffbff6065d9115813 \
+    --hash=sha256:cbf18f8db7e5f060d61c91e334d3b96d6bb624ddc9ee8a1cde407b737acbca2c \
+    --hash=sha256:d145b81a8214687cfc1f85c03663a5bbe736777410e5580e54d526e7e904f564 \
+    --hash=sha256:eec5c927aa4b3e8b4781840f1550079969926d0a22ce38075f6cfcf4b13e3eb4 \
+    --hash=sha256:f3460f34b3839b9bc84ee3ed65076eb827cd99ed13ed08d723f9083cada4a212 \
+    --hash=sha256:f3940cf5845b2512b3ab95463198b0cdf87975dfd17fdcc6ce9709a9abe09e69
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 mypy-extensions==1.0.0 \
     --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \
@@ -631,10 +678,6 @@
     --hash=sha256:15604f6943b16c05db646903261e3b3e775cf7f7990b7c37b03d043a907b650d \
     --hash=sha256:24f3b0aecb06656e983f58e07c732a90577b9d7af3e1066fc2b663bbf0370248
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-myst-parser==0.18.1 \
-    --hash=sha256:61b275b85d9f58aa327f370913ae1bec26ebad372cc99f3ab85c8ec3ee8d9fb8 \
-    --hash=sha256:79317f4bb2c13053dd6e64f9da1ba1da6cd9c40c8a430c447a7b146a594c246d
-    # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 packaging==23.0 \
     --hash=sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2 \
     --hash=sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97
@@ -773,19 +816,18 @@
     --hash=sha256:5609aa6da1123fccfae2e8431a67b4146aa7fad5b3889f808df12b110f230937 \
     --hash=sha256:cde854e662774c5457d688ca41615f6594187ba7067af101232df889a6b7a66b
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-pygments==2.14.0 \
-    --hash=sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297 \
-    --hash=sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717
+pygments==2.16.1 \
+    --hash=sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692 \
+    --hash=sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   furo
     #   ipython
     #   ptpython
     #   sphinx
-    #   sphinx-tabs
-pylint==2.16.2 \
-    --hash=sha256:13b2c805a404a9bf57d002cd5f054ca4d40b0b87542bdaba5e05321ae8262c84 \
-    --hash=sha256:ff22dde9c2128cd257c145cfd51adeff0be7df4d80d669055f24a962b351bbe4
+pylint==2.17.5 \
+    --hash=sha256:73995fb8216d3bed149c8d51bba25b2c52a8251a2c8ac846ec668ce38fab5413 \
+    --hash=sha256:f7b601cbc06fef7e62a754e2b41294c2aa31f1cb659624b9a85bcba29eaf8252
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 pyperclip==1.8.2 \
     --hash=sha256:105254a8b04934f0bc84e9c24eb360a591aaf6535c9def5f29d92af107a9bf57
@@ -800,9 +842,9 @@
     --hash=sha256:3c77e014170dfffbd816e6ffc205e9842efb10be9f58ec16d3e8675b4925cddb \
     --hash=sha256:c4451db6ba391ca6ca299fb3ec7bae67a5c55dde170964c7a14ceefec02f2cf0
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-pytz==2021.3 \
-    --hash=sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c \
-    --hash=sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326
+pytz==2023.3 \
+    --hash=sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588 \
+    --hash=sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   babel
@@ -851,12 +893,10 @@
     --hash=sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585 \
     --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \
     --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f
-    # via
-    #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-    #   myst-parser
-requests==2.27.1 \
-    --hash=sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61 \
-    --hash=sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d
+    # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
+requests==2.31.0 \
+    --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \
+    --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   google-api-core
@@ -891,21 +931,17 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   beautifulsoup4
-sphinx==5.3.0 \
-    --hash=sha256:060ca5c9f7ba57a08a1219e547b269fadf125ae25b06b9fa7f66768efb652d6d \
-    --hash=sha256:51026de0a9ff9fc13c05d74913ad66047e104f56a129ff73e174eb5c3ee794b5
+sphinx==7.1.2 \
+    --hash=sha256:780f4d32f1d7d1126576e0e5ecc19dc32ab76cd24e950228dcf7b1f6d3d9e22f \
+    --hash=sha256:d170a81825b2fcacb6dfd5a0d7f578a053e45d3f2b153fecc948c37344eb4cbe
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   breathe
     #   furo
-    #   myst-parser
     #   sphinx-argparse
     #   sphinx-basic-ng
     #   sphinx-copybutton
     #   sphinx-design
-    #   sphinx-rtd-theme
-    #   sphinx-tabs
-    #   sphinxcontrib-jquery
 sphinx-argparse==0.4.0 \
     --hash=sha256:73bee01f7276fae2bf621ccfe4d167af7306e7288e3482005405d9f826f9b037 \
     --hash=sha256:e0f34184eb56f12face774fbc87b880abdb9017a0998d1ec559b267e9697e449
@@ -920,21 +956,13 @@
     --hash=sha256:0842851b5955087a7ec7fc870b622cb168618ad408dee42692e9a5c97d071da8 \
     --hash=sha256:366251e28a6f6041514bfb5439425210418d6c750e98d3a695b73e56866a677a
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-sphinx-design==0.3.0 \
-    --hash=sha256:7183fa1fae55b37ef01bda5125a21ee841f5bbcbf59a35382be598180c4cefba \
-    --hash=sha256:823c1dd74f31efb3285ec2f1254caefed29d762a40cd676f58413a1e4ed5cc96
+sphinx-design==0.5.0 \
+    --hash=sha256:1af1267b4cea2eedd6724614f19dcc88fe2e15aff65d06b2f6252cee9c4f4c1e \
+    --hash=sha256:e8e513acea6f92d15c6de3b34e954458f245b8e761b45b63950f65373352ab00
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-sphinx-rtd-theme==1.2.0 \
-    --hash=sha256:a0d8bd1a2ed52e0b338cbe19c4b2eef3c5e7a048769753dac6a9f059c7b641b8 \
-    --hash=sha256:f823f7e71890abe0ac6aaa6013361ea2696fc8d3e1fa798f463e82bdb77eeff2
-    # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-sphinx-tabs==3.4.1 \
-    --hash=sha256:7cea8942aeccc5d01a995789c01804b787334b55927f29b36ba16ed1e7cb27c6 \
-    --hash=sha256:d2a09f9e8316e400d57503f6df1c78005fdde220e5af589cc79d493159e1b832
-    # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-sphinxcontrib-applehelp==1.0.2 \
-    --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \
-    --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58
+sphinxcontrib-applehelp==1.0.4 \
+    --hash=sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228 \
+    --hash=sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
@@ -944,18 +972,12 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
-sphinxcontrib-htmlhelp==2.0.0 \
-    --hash=sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07 \
-    --hash=sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2
+sphinxcontrib-htmlhelp==2.0.1 \
+    --hash=sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff \
+    --hash=sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   sphinx
-sphinxcontrib-jquery==4.1 \
-    --hash=sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a \
-    --hash=sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae
-    # via
-    #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-    #   sphinx-rtd-theme
 sphinxcontrib-jsmath==1.0.1 \
     --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \
     --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8
@@ -1012,9 +1034,9 @@
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   ipython
     #   matplotlib-inline
-types-docutils==0.18.0 \
-    --hash=sha256:14f781eb28d89a1cd61f1c41bd0776ad3bb4e2333d317c37d2c67f2eaf5891fe \
-    --hash=sha256:f0305109169326edffd98c128f542f773be06a1502a17d96359c53e31082db19
+types-docutils==0.20.0.3 \
+    --hash=sha256:4928e790f42b99d5833990f99c8dd9fa9f16825f6ed30380ca981846d36870cd \
+    --hash=sha256:a930150d8e01a9170f9bca489f46808ddebccdd8bc1e47c07968a77e49fb9321
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   types-pygments
@@ -1024,9 +1046,9 @@
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   mypy-protobuf
-types-pygments==2.9.13 \
-    --hash=sha256:589b87a6295c2b8b40566e46ff5ab9b5097735b051158f36648f64acc2e98dbe \
-    --hash=sha256:8b4ff9be1208b1dce639e406fc4d4e6a4682b6b253a021f2a7ffff9dbce16944
+types-pygments==2.16.0.0 \
+    --hash=sha256:4624a547d5ba73c971fac5d6fd327141e85e65f6123448bee76f0c8557652a71 \
+    --hash=sha256:aa93e4664e2d6cfea7570cde156e3966bf939f9c7d736cd179c4c8e94f7600b2
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 types-pyserial==3.5.0.7 \
     --hash=sha256:22b665a336539b85108e2f5d61e6cde1b59818eae78324c17bfe64edc1a2bd66 \
@@ -1036,9 +1058,9 @@
     --hash=sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b \
     --hash=sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-types-requests==2.28.11.15 \
-    --hash=sha256:a05e4c7bc967518fba5789c341ea8b0c942776ee474c7873129a61161978e586 \
-    --hash=sha256:fc8eaa09cc014699c6b63c60c2e3add0c8b09a410c818b5ac6e65f92a26dde09
+types-requests==2.31.0.2 \
+    --hash=sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a \
+    --hash=sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
 types-setuptools==67.8.0.0 \
     --hash=sha256:6df73340d96b238a4188b7b7668814b37e8018168aef1eef94a3b1872e3f60ff \
@@ -1065,11 +1087,10 @@
     #   black
     #   ipython
     #   mypy
-    #   myst-parser
     #   pylint
-urllib3==1.26.14 \
-    --hash=sha256:076907bf8fd355cde77728471316625a4d2f7e713c125f51953bb5b3eecf4f72 \
-    --hash=sha256:75edcdc2f7d85b137124a6c3c9fc3933cdeaa12ecb9a6a959f22797a0feca7e1
+urllib3==2.0.4 \
+    --hash=sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11 \
+    --hash=sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   requests
@@ -1269,13 +1290,12 @@
     --hash=sha256:408fb9a2b254c302f49db83c59f9aa0b4b0fd0ec25be3a5c51181327922ff63d \
     --hash=sha256:e3a234ba8455fe201eaa649cdac872d590089a18b661e39bbac7020978dd9c2e
     # via -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
-zipp==3.15.0 \
-    --hash=sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b \
-    --hash=sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556
+zipp==3.16.2 \
+    --hash=sha256:679e51dd4403591b2d6838a48de3d283f3d188412a9782faadf845f298736ba0 \
+    --hash=sha256:ebc15946aa78bd63458992fc81ec3b6f7b1e92d51c35e6de1c3804e73b799147
     # via
     #   -r /python/gen/pw_env_setup/pigweed_build_venv/compiled_requirements.txt
     #   importlib-metadata
-
 # The following packages are considered to be unsafe in a requirements file:
 pip==23.2 \
     --hash=sha256:78e5353a9dda374b462f2054f83a7b63f3f065c98236a68361845c1b0ee7e35f \
diff --git a/pw_env_setup/py/pw_env_setup/virtualenv_setup/upstream_requirements_windows_lock.txt b/pw_env_setup/py/pw_env_setup/virtualenv_setup/upstream_requirements_windows_lock.txt
index 6ac63eb..98865ed 100644
--- a/pw_env_setup/py/pw_env_setup/virtualenv_setup/upstream_requirements_windows_lock.txt
+++ b/pw_env_setup/py/pw_env_setup/virtualenv_setup/upstream_requirements_windows_lock.txt
@@ -4,9 +4,9 @@
 #
 #    pip-compile --allow-unsafe --generate-hashes --output-file='\upstream_requirements_windows_lock.txt' --strip-extras '\python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt'
 #
-alabaster==0.7.12 \
-    --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \
-    --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02
+alabaster==0.7.13 \
+    --hash=sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3 \
+    --hash=sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   sphinx
@@ -16,9 +16,9 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   ptpython
-astroid==2.14.2 \
-    --hash=sha256:0e0e3709d64fbffd3037e4ff403580550f14471fd3eaae9fa11cc9a5c7901153 \
-    --hash=sha256:a3cf9f02c53dd259144a7e8f3ccd75d67c9a8c716ef183e0c1f291bc5d7bb3cf
+astroid==2.15.6 \
+    --hash=sha256:389656ca57b6108f939cf5d2f9a2a825a3be50ba9d589670f393236e0a03b91c \
+    --hash=sha256:903f024859b7c7687d7a7f3a3f73b17301f8e42dfd9cc9df9d4418172d3e2dbd
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   pylint
@@ -28,9 +28,9 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   stack-data
-babel==2.9.1 \
-    --hash=sha256:ab49e12b91d937cd11f0b67cb259a57ab4ad2b59ac7a3b41d6c06c0ac5b0def9 \
-    --hash=sha256:bc0c176f9f6a994582230df350aa6e05ba2ebe4b3ac317eab29d9be5d2768da0
+babel==2.12.1 \
+    --hash=sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610 \
+    --hash=sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   sphinx
@@ -73,9 +73,9 @@
     --hash=sha256:c91dfc2c2a4e50df0026f88d2215e166616e0c80e86004d0003ece0488db2739 \
     --hash=sha256:e6663f91b6feca5d06f2ccd49a10f254f9298cc1f7f49c46e498a0771b507104
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-breathe==4.34.0 \
-    --hash=sha256:48804dcf0e607a89fb6ad88c729ef12743a42db03ae9489be4ef8f7c4011774a \
-    --hash=sha256:ac0768a5e84addad3e632028fe67749c567aba2b29088493b64c2c1634bcdba1
+breathe==4.35.0 \
+    --hash=sha256:5165541c3c67b6c7adde8b3ecfe895c6f7844783c4076b6d8d287e4f33d62386 \
+    --hash=sha256:52c581f42ca4310737f9e435e3851c3d1f15446205a85fbc272f1f97ed74f5be
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
 build==0.10.0 \
     --hash=sha256:af266720050a66c893a6096a2f410989eeac74ff9a68ba194b3f6473e8e26171 \
@@ -163,9 +163,82 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   cryptography
-charset-normalizer==2.0.10 \
-    --hash=sha256:876d180e9d7432c5d1dfd4c5d26b72f099d503e8fcc0feb7532c9289be60fcbd \
-    --hash=sha256:cb957888737fc0bbcd78e3df769addb41fd1ff8cf950dc9e7ad7793f1bf44455
+charset-normalizer==3.2.0 \
+    --hash=sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96 \
+    --hash=sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c \
+    --hash=sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710 \
+    --hash=sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706 \
+    --hash=sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020 \
+    --hash=sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252 \
+    --hash=sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad \
+    --hash=sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329 \
+    --hash=sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a \
+    --hash=sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f \
+    --hash=sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6 \
+    --hash=sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4 \
+    --hash=sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a \
+    --hash=sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46 \
+    --hash=sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2 \
+    --hash=sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23 \
+    --hash=sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace \
+    --hash=sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd \
+    --hash=sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982 \
+    --hash=sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10 \
+    --hash=sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2 \
+    --hash=sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea \
+    --hash=sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09 \
+    --hash=sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5 \
+    --hash=sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149 \
+    --hash=sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489 \
+    --hash=sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9 \
+    --hash=sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80 \
+    --hash=sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592 \
+    --hash=sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3 \
+    --hash=sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6 \
+    --hash=sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed \
+    --hash=sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c \
+    --hash=sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200 \
+    --hash=sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a \
+    --hash=sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e \
+    --hash=sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d \
+    --hash=sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6 \
+    --hash=sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623 \
+    --hash=sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669 \
+    --hash=sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3 \
+    --hash=sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa \
+    --hash=sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9 \
+    --hash=sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2 \
+    --hash=sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f \
+    --hash=sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1 \
+    --hash=sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4 \
+    --hash=sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a \
+    --hash=sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8 \
+    --hash=sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3 \
+    --hash=sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029 \
+    --hash=sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f \
+    --hash=sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959 \
+    --hash=sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22 \
+    --hash=sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7 \
+    --hash=sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952 \
+    --hash=sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346 \
+    --hash=sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e \
+    --hash=sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d \
+    --hash=sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299 \
+    --hash=sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd \
+    --hash=sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a \
+    --hash=sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3 \
+    --hash=sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037 \
+    --hash=sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94 \
+    --hash=sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c \
+    --hash=sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858 \
+    --hash=sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a \
+    --hash=sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449 \
+    --hash=sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c \
+    --hash=sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918 \
+    --hash=sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1 \
+    --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \
+    --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \
+    --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   requests
@@ -289,24 +362,22 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   pylint
-docutils==0.18 \
-    --hash=sha256:a31688b2ea858517fa54293e5d5df06fbb875fb1f7e4c64529271b77781ca8fc \
-    --hash=sha256:c1d5dab2b11d16397406a282e53953fe495a46d69ae329f55aa98a5c4e3c5fbb
+docutils==0.20.1 \
+    --hash=sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6 \
+    --hash=sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   breathe
-    #   myst-parser
     #   sphinx
-    #   sphinx-rtd-theme
-    #   sphinx-tabs
 executing==1.2.0 \
     --hash=sha256:0314a69e37426e3608aada02473b4161d4caf5a4b244d1d0c48072b8fee7bacc \
     --hash=sha256:19da64c18d2d851112f09c287f8d3dbbdf725ab0e569077efb6cdcbd3497c107
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   stack-data
-furo @ https://github.com/chadnorvell/furo/releases/download/2023.03.27/furo-2023.3.27.dev1-py3-none-any.whl \
-    --hash=sha256:8cee0af40350696f8d9bd7a261f2d452d05b3a98c2af710c6e12d7badfe54865
+furo==2023.8.19 \
+    --hash=sha256:12f99f87a1873b6746228cfde18f77244e6c1ffb85d7fed95e638aae70d80590 \
+    --hash=sha256:e671ee638ab3f1b472f4033b0167f502ab407830e0db0f843b1c1028119c9cd1
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
 google-api-core==2.7.1 \
     --hash=sha256:6be1fc59e2a7ba9f66808bbc22f976f81e4c3e7ab20fa0620ce42686288787d0 \
@@ -427,15 +498,15 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   coloredlogs
-idna==3.3 \
-    --hash=sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff \
-    --hash=sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d
+idna==3.4 \
+    --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
+    --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   requests
-imagesize==1.3.0 \
-    --hash=sha256:1db2f82529e53c3e929e8926a1fa9235aa82d0bd0c580359c67ec31b2fddaa8c \
-    --hash=sha256:cd1750d452385ca327479d45b64d9c7729ecf0b3969a58148298c77092261f9d
+imagesize==1.4.1 \
+    --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \
+    --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   sphinx
@@ -462,12 +533,11 @@
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   ipython
     #   ptpython
-jinja2==3.0.3 \
-    --hash=sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8 \
-    --hash=sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7
+jinja2==3.1.2 \
+    --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \
+    --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-    #   myst-parser
     #   sphinx
 json5==0.9.11 \
     --hash=sha256:1aa54b80b5e507dfe31d12b7743a642e2ffa6f70bf73b8e3d7d1d5fba83d99bd \
@@ -517,13 +587,6 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   astroid
-markdown-it-py==2.2.0 \
-    --hash=sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30 \
-    --hash=sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1
-    # via
-    #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-    #   mdit-py-plugins
-    #   myst-parser
 markupsafe==2.1.3 \
     --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \
     --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \
@@ -590,45 +653,29 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   pylint
-mdit-py-plugins==0.3.5 \
-    --hash=sha256:ca9a0714ea59a24b2b044a1831f48d817dd0c817e84339f20e7889f392d77c4e \
-    --hash=sha256:eee0adc7195e5827e17e02d2a258a2ba159944a0748f59c5099a4a27f78fcf6a
-    # via
-    #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-    #   myst-parser
-mdurl==0.1.2 \
-    --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \
-    --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba
-    # via
-    #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-    #   markdown-it-py
-mypy==1.0.1 \
-    --hash=sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6 \
-    --hash=sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3 \
-    --hash=sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c \
-    --hash=sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262 \
-    --hash=sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e \
-    --hash=sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0 \
-    --hash=sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d \
-    --hash=sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65 \
-    --hash=sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8 \
-    --hash=sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76 \
-    --hash=sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4 \
-    --hash=sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407 \
-    --hash=sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4 \
-    --hash=sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b \
-    --hash=sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a \
-    --hash=sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf \
-    --hash=sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5 \
-    --hash=sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf \
-    --hash=sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd \
-    --hash=sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8 \
-    --hash=sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994 \
-    --hash=sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff \
-    --hash=sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88 \
-    --hash=sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919 \
-    --hash=sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6 \
-    --hash=sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4
+mypy==1.5.0 \
+    --hash=sha256:1fe816e26e676c1311b9e04fd576543b873576d39439f7c24c8e5c7728391ecf \
+    --hash=sha256:2c9d570f53908cbea326ad8f96028a673b814d9dca7515bf71d95fa662c3eb6f \
+    --hash=sha256:35b13335c6c46a386577a51f3d38b2b5d14aa619e9633bb756bd77205e4bd09f \
+    --hash=sha256:372fd97293ed0076d52695849f59acbbb8461c4ab447858cdaeaf734a396d823 \
+    --hash=sha256:42170e68adb1603ccdc55a30068f72bcfcde2ce650188e4c1b2a93018b826735 \
+    --hash=sha256:69b32d0dedd211b80f1b7435644e1ef83033a2af2ac65adcdc87c38db68a86be \
+    --hash=sha256:725b57a19b7408ef66a0fd9db59b5d3e528922250fb56e50bded27fea9ff28f0 \
+    --hash=sha256:769ddb6bfe55c2bd9c7d6d7020885a5ea14289619db7ee650e06b1ef0852c6f4 \
+    --hash=sha256:79c520aa24f21852206b5ff2cf746dc13020113aa73fa55af504635a96e62718 \
+    --hash=sha256:84cf9f7d8a8a22bb6a36444480f4cbf089c917a4179fbf7eea003ea931944a7f \
+    --hash=sha256:9166186c498170e1ff478a7f540846b2169243feb95bc228d39a67a1a450cdc6 \
+    --hash=sha256:a2500ad063413bc873ae102cf655bf49889e0763b260a3a7cf544a0cbbf7e70a \
+    --hash=sha256:a551ed0fc02455fe2c1fb0145160df8336b90ab80224739627b15ebe2b45e9dc \
+    --hash=sha256:ad3109bec37cc33654de8db30fe8ff3a1bb57ea65144167d68185e6dced9868d \
+    --hash=sha256:b4ea3a0241cb005b0ccdbd318fb99619b21ae51bcf1660b95fc22e0e7d3ba4a1 \
+    --hash=sha256:c36011320e452eb30bec38b9fd3ba20569dc9545d7d4540d967f3ea1fab9c374 \
+    --hash=sha256:c8a7444d6fcac7e2585b10abb91ad900a576da7af8f5cffffbff6065d9115813 \
+    --hash=sha256:cbf18f8db7e5f060d61c91e334d3b96d6bb624ddc9ee8a1cde407b737acbca2c \
+    --hash=sha256:d145b81a8214687cfc1f85c03663a5bbe736777410e5580e54d526e7e904f564 \
+    --hash=sha256:eec5c927aa4b3e8b4781840f1550079969926d0a22ce38075f6cfcf4b13e3eb4 \
+    --hash=sha256:f3460f34b3839b9bc84ee3ed65076eb827cd99ed13ed08d723f9083cada4a212 \
+    --hash=sha256:f3940cf5845b2512b3ab95463198b0cdf87975dfd17fdcc6ce9709a9abe09e69
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
 mypy-extensions==1.0.0 \
     --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \
@@ -641,10 +688,6 @@
     --hash=sha256:15604f6943b16c05db646903261e3b3e775cf7f7990b7c37b03d043a907b650d \
     --hash=sha256:24f3b0aecb06656e983f58e07c732a90577b9d7af3e1066fc2b663bbf0370248
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-myst-parser==0.18.1 \
-    --hash=sha256:61b275b85d9f58aa327f370913ae1bec26ebad372cc99f3ab85c8ec3ee8d9fb8 \
-    --hash=sha256:79317f4bb2c13053dd6e64f9da1ba1da6cd9c40c8a430c447a7b146a594c246d
-    # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
 packaging==23.0 \
     --hash=sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2 \
     --hash=sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97
@@ -771,19 +814,18 @@
     --hash=sha256:5609aa6da1123fccfae2e8431a67b4146aa7fad5b3889f808df12b110f230937 \
     --hash=sha256:cde854e662774c5457d688ca41615f6594187ba7067af101232df889a6b7a66b
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-pygments==2.14.0 \
-    --hash=sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297 \
-    --hash=sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717
+pygments==2.16.1 \
+    --hash=sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692 \
+    --hash=sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   furo
     #   ipython
     #   ptpython
     #   sphinx
-    #   sphinx-tabs
-pylint==2.16.2 \
-    --hash=sha256:13b2c805a404a9bf57d002cd5f054ca4d40b0b87542bdaba5e05321ae8262c84 \
-    --hash=sha256:ff22dde9c2128cd257c145cfd51adeff0be7df4d80d669055f24a962b351bbe4
+pylint==2.17.5 \
+    --hash=sha256:73995fb8216d3bed149c8d51bba25b2c52a8251a2c8ac846ec668ce38fab5413 \
+    --hash=sha256:f7b601cbc06fef7e62a754e2b41294c2aa31f1cb659624b9a85bcba29eaf8252
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
 pyperclip==1.8.2 \
     --hash=sha256:105254a8b04934f0bc84e9c24eb360a591aaf6535c9def5f29d92af107a9bf57
@@ -804,9 +846,9 @@
     --hash=sha256:3c77e014170dfffbd816e6ffc205e9842efb10be9f58ec16d3e8675b4925cddb \
     --hash=sha256:c4451db6ba391ca6ca299fb3ec7bae67a5c55dde170964c7a14ceefec02f2cf0
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-pytz==2021.3 \
-    --hash=sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c \
-    --hash=sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326
+pytz==2023.3 \
+    --hash=sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588 \
+    --hash=sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   babel
@@ -855,12 +897,10 @@
     --hash=sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585 \
     --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \
     --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f
-    # via
-    #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-    #   myst-parser
-requests==2.27.1 \
-    --hash=sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61 \
-    --hash=sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d
+    # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
+requests==2.31.0 \
+    --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \
+    --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   google-api-core
@@ -895,21 +935,17 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   beautifulsoup4
-sphinx==5.3.0 \
-    --hash=sha256:060ca5c9f7ba57a08a1219e547b269fadf125ae25b06b9fa7f66768efb652d6d \
-    --hash=sha256:51026de0a9ff9fc13c05d74913ad66047e104f56a129ff73e174eb5c3ee794b5
+sphinx==7.1.2 \
+    --hash=sha256:780f4d32f1d7d1126576e0e5ecc19dc32ab76cd24e950228dcf7b1f6d3d9e22f \
+    --hash=sha256:d170a81825b2fcacb6dfd5a0d7f578a053e45d3f2b153fecc948c37344eb4cbe
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   breathe
     #   furo
-    #   myst-parser
     #   sphinx-argparse
     #   sphinx-basic-ng
     #   sphinx-copybutton
     #   sphinx-design
-    #   sphinx-rtd-theme
-    #   sphinx-tabs
-    #   sphinxcontrib-jquery
 sphinx-argparse==0.4.0 \
     --hash=sha256:73bee01f7276fae2bf621ccfe4d167af7306e7288e3482005405d9f826f9b037 \
     --hash=sha256:e0f34184eb56f12face774fbc87b880abdb9017a0998d1ec559b267e9697e449
@@ -924,21 +960,13 @@
     --hash=sha256:0842851b5955087a7ec7fc870b622cb168618ad408dee42692e9a5c97d071da8 \
     --hash=sha256:366251e28a6f6041514bfb5439425210418d6c750e98d3a695b73e56866a677a
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-sphinx-design==0.3.0 \
-    --hash=sha256:7183fa1fae55b37ef01bda5125a21ee841f5bbcbf59a35382be598180c4cefba \
-    --hash=sha256:823c1dd74f31efb3285ec2f1254caefed29d762a40cd676f58413a1e4ed5cc96
+sphinx-design==0.5.0 \
+    --hash=sha256:1af1267b4cea2eedd6724614f19dcc88fe2e15aff65d06b2f6252cee9c4f4c1e \
+    --hash=sha256:e8e513acea6f92d15c6de3b34e954458f245b8e761b45b63950f65373352ab00
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-sphinx-rtd-theme==1.2.0 \
-    --hash=sha256:a0d8bd1a2ed52e0b338cbe19c4b2eef3c5e7a048769753dac6a9f059c7b641b8 \
-    --hash=sha256:f823f7e71890abe0ac6aaa6013361ea2696fc8d3e1fa798f463e82bdb77eeff2
-    # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-sphinx-tabs==3.4.1 \
-    --hash=sha256:7cea8942aeccc5d01a995789c01804b787334b55927f29b36ba16ed1e7cb27c6 \
-    --hash=sha256:d2a09f9e8316e400d57503f6df1c78005fdde220e5af589cc79d493159e1b832
-    # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-sphinxcontrib-applehelp==1.0.2 \
-    --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \
-    --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58
+sphinxcontrib-applehelp==1.0.4 \
+    --hash=sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228 \
+    --hash=sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   sphinx
@@ -948,18 +976,12 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   sphinx
-sphinxcontrib-htmlhelp==2.0.0 \
-    --hash=sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07 \
-    --hash=sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2
+sphinxcontrib-htmlhelp==2.0.1 \
+    --hash=sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff \
+    --hash=sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   sphinx
-sphinxcontrib-jquery==4.1 \
-    --hash=sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a \
-    --hash=sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae
-    # via
-    #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-    #   sphinx-rtd-theme
 sphinxcontrib-jsmath==1.0.1 \
     --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \
     --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8
@@ -1016,9 +1038,9 @@
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   ipython
     #   matplotlib-inline
-types-docutils==0.18.0 \
-    --hash=sha256:14f781eb28d89a1cd61f1c41bd0776ad3bb4e2333d317c37d2c67f2eaf5891fe \
-    --hash=sha256:f0305109169326edffd98c128f542f773be06a1502a17d96359c53e31082db19
+types-docutils==0.20.0.3 \
+    --hash=sha256:4928e790f42b99d5833990f99c8dd9fa9f16825f6ed30380ca981846d36870cd \
+    --hash=sha256:a930150d8e01a9170f9bca489f46808ddebccdd8bc1e47c07968a77e49fb9321
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   types-pygments
@@ -1028,9 +1050,9 @@
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   mypy-protobuf
-types-pygments==2.9.13 \
-    --hash=sha256:589b87a6295c2b8b40566e46ff5ab9b5097735b051158f36648f64acc2e98dbe \
-    --hash=sha256:8b4ff9be1208b1dce639e406fc4d4e6a4682b6b253a021f2a7ffff9dbce16944
+types-pygments==2.16.0.0 \
+    --hash=sha256:4624a547d5ba73c971fac5d6fd327141e85e65f6123448bee76f0c8557652a71 \
+    --hash=sha256:aa93e4664e2d6cfea7570cde156e3966bf939f9c7d736cd179c4c8e94f7600b2
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
 types-pyserial==3.5.0.7 \
     --hash=sha256:22b665a336539b85108e2f5d61e6cde1b59818eae78324c17bfe64edc1a2bd66 \
@@ -1040,9 +1062,9 @@
     --hash=sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b \
     --hash=sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-types-requests==2.28.11.15 \
-    --hash=sha256:a05e4c7bc967518fba5789c341ea8b0c942776ee474c7873129a61161978e586 \
-    --hash=sha256:fc8eaa09cc014699c6b63c60c2e3add0c8b09a410c818b5ac6e65f92a26dde09
+types-requests==2.31.0.2 \
+    --hash=sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a \
+    --hash=sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
 types-setuptools==67.8.0.0 \
     --hash=sha256:6df73340d96b238a4188b7b7668814b37e8018168aef1eef94a3b1872e3f60ff \
@@ -1069,11 +1091,10 @@
     #   black
     #   ipython
     #   mypy
-    #   myst-parser
     #   pylint
-urllib3==1.26.14 \
-    --hash=sha256:076907bf8fd355cde77728471316625a4d2f7e713c125f51953bb5b3eecf4f72 \
-    --hash=sha256:75edcdc2f7d85b137124a6c3c9fc3933cdeaa12ecb9a6a959f22797a0feca7e1
+urllib3==2.0.4 \
+    --hash=sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11 \
+    --hash=sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   requests
@@ -1273,13 +1294,12 @@
     --hash=sha256:408fb9a2b254c302f49db83c59f9aa0b4b0fd0ec25be3a5c51181327922ff63d \
     --hash=sha256:e3a234ba8455fe201eaa649cdac872d590089a18b661e39bbac7020978dd9c2e
     # via -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
-zipp==3.15.0 \
-    --hash=sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b \
-    --hash=sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556
+zipp==3.16.2 \
+    --hash=sha256:679e51dd4403591b2d6838a48de3d283f3d188412a9782faadf845f298736ba0 \
+    --hash=sha256:ebc15946aa78bd63458992fc81ec3b6f7b1e92d51c35e6de1c3804e73b799147
     # via
     #   -r \python\gen\pw_env_setup\pigweed_build_venv\compiled_requirements.txt
     #   importlib-metadata
-
 # The following packages are considered to be unsafe in a requirements file:
 pip==23.2 \
     --hash=sha256:78e5353a9dda374b462f2054f83a7b63f3f065c98236a68361845c1b0ee7e35f \
diff --git a/pw_env_setup/py/pw_env_setup/windows_env_start.py b/pw_env_setup/py/pw_env_setup/windows_env_start.py
index 6524960..bce9af5 100644
--- a/pw_env_setup/py/pw_env_setup/windows_env_start.py
+++ b/pw_env_setup/py/pw_env_setup/windows_env_start.py
@@ -26,7 +26,12 @@
 import os
 import sys
 
-from .colors import Color, enable_colors  # type: ignore
+try:
+    from pw_env_setup.colors import Color, enable_colors
+except ImportError:
+    # Load from this directory if pw_env_setup is not available.
+    from colors import Color, enable_colors  # type: ignore
+
 
 _PIGWEED_BANNER = u'''
  ▒█████▄   █▓  ▄███▒  ▒█    ▒█ ░▓████▒ ░▓████▒ ▒▓████▄
@@ -42,7 +47,21 @@
     enable_colors()
 
     print(Color.green('\n  WELCOME TO...'))
-    print(Color.magenta(_PIGWEED_BANNER))
+
+    banner_file = os.environ.get('PW_BRANDING_BANNER', None)
+    banner_str = None
+    if banner_file:
+        try:
+            banner_str = open(
+                banner_file, 'r', encoding='utf-8', errors='replace'
+            ).read()
+        except FileNotFoundError:
+            pass
+    if banner_str:
+        print()
+        print(banner_str, end='')
+    else:
+        print(Color.magenta(_PIGWEED_BANNER), end='')
 
     if bootstrap:
         print(
diff --git a/pw_file/BUILD.bazel b/pw_file/BUILD.bazel
index 4b16226..f616629 100644
--- a/pw_file/BUILD.bazel
+++ b/pw_file/BUILD.bazel
@@ -17,8 +17,8 @@
     "pw_cc_library",
     "pw_cc_test",
 )
-load("//pw_protobuf_compiler:pw_proto_library.bzl", "pw_proto_library")
 load("//pw_build/bazel_internal:py_proto_library.bzl", "py_proto_library")
+load("//pw_protobuf_compiler:pw_proto_library.bzl", "pw_proto_library")
 
 package(default_visibility = ["//visibility:public"])
 
diff --git a/pw_file/BUILD.gn b/pw_file/BUILD.gn
index f9ee8a6..280c7d1 100644
--- a/pw_file/BUILD.gn
+++ b/pw_file/BUILD.gn
@@ -26,6 +26,7 @@
   sources = [ "file.proto" ]
   prefix = "pw_file"
   deps = [ "$dir_pw_protobuf:common_protos" ]
+  python_package = "py"
 }
 
 pw_source_set("flat_file_system") {
diff --git a/pw_file/CMakeLists.txt b/pw_file/CMakeLists.txt
index 9bb56d1..671f340 100644
--- a/pw_file/CMakeLists.txt
+++ b/pw_file/CMakeLists.txt
@@ -16,6 +16,8 @@
 include($ENV{PW_ROOT}/pw_protobuf_compiler/proto.cmake)
 
 pw_add_library(pw_file.flat_file_system INTERFACE
+  PUBLIC_INCLUDES
+    public
   PUBLIC_DEPS
     pw_file.proto.pwpb
     pw_file.proto.raw_rpc
diff --git a/pw_file/py/BUILD.gn b/pw_file/py/BUILD.gn
new file mode 100644
index 0000000..f42bcfe
--- /dev/null
+++ b/pw_file/py/BUILD.gn
@@ -0,0 +1,31 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  generate_setup = {
+    metadata = {
+      name = "pw_file"
+      version = "0.0.1"
+    }
+  }
+  sources = [ "pw_file/__init__.py" ]
+  python_deps = []
+  pylintrc = "$dir_pigweed/.pylintrc"
+  mypy_ini = "$dir_pigweed/.mypy.ini"
+  proto_library = "..:proto"
+}
diff --git a/pw_file/py/pw_file/__init__.py b/pw_file/py/pw_file/__init__.py
new file mode 100644
index 0000000..c3e1bdb
--- /dev/null
+++ b/pw_file/py/pw_file/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
diff --git a/pw_function/docs.rst b/pw_function/docs.rst
index 18041ef..a3e4157 100644
--- a/pw_function/docs.rst
+++ b/pw_function/docs.rst
@@ -23,36 +23,36 @@
 
 .. code-block:: c++
 
-  int Add(int a, int b) { return a + b; }
+   int Add(int a, int b) { return a + b; }
 
-  // Construct a Function object from a function pointer.
-  pw::Function<int(int, int)> add_function(Add);
+   // Construct a Function object from a function pointer.
+   pw::Function<int(int, int)> add_function(Add);
 
-  // Invoke the function object.
-  int result = add_function(3, 5);
-  EXPECT_EQ(result, 8);
+   // Invoke the function object.
+   int result = add_function(3, 5);
+   EXPECT_EQ(result, 8);
 
-  // Construct a function from a lambda.
-  pw::Function<int(int)> negate([](int value) { return -value; });
-  EXPECT_EQ(negate(27), -27);
+   // Construct a function from a lambda.
+   pw::Function<int(int)> negate([](int value) { return -value; });
+   EXPECT_EQ(negate(27), -27);
 
 Functions are nullable. Invoking a null function triggers a runtime assert.
 
 .. code-block:: c++
 
-  // A function initialized without a callable is implicitly null.
-  pw::Function<void()> null_function;
+   // A function initialized without a callable is implicitly null.
+   pw::Function<void()> null_function;
 
-  // Null functions may also be explicitly created or set.
-  pw::Function<void()> explicit_null_function(nullptr);
+   // Null functions may also be explicitly created or set.
+   pw::Function<void()> explicit_null_function(nullptr);
 
-  pw::Function<void()> function([]() {});  // Valid (non-null) function.
-  function = nullptr;  // Set to null, clearing the stored callable.
+   pw::Function<void()> function([]() {});  // Valid (non-null) function.
+   function = nullptr;  // Set to null, clearing the stored callable.
 
-  // Functions are comparable to nullptr.
-  if (function != nullptr) {
-    function();
-  }
+   // Functions are comparable to nullptr.
+   if (function != nullptr) {
+     function();
+   }
 
 :cpp:type:`pw::Function`'s default constructor is ``constexpr``, so
 default-constructed functions may be used in classes with ``constexpr``
@@ -60,16 +60,16 @@
 
 .. code-block:: c++
 
-  class MyClass {
-   public:
-    // Default construction of a pw::Function is constexpr.
-    constexpr MyClass() { ... }
+   class MyClass {
+    public:
+     // Default construction of a pw::Function is constexpr.
+     constexpr MyClass() { ... }
 
-    pw::Function<void(int)> my_function;
-  };
+     pw::Function<void(int)> my_function;
+   };
 
-  // pw::Function and classes that use it may be constant initialized.
-  constinit MyClass instance;
+   // pw::Function and classes that use it may be constant initialized.
+   constinit MyClass instance;
 
 Storage
 =======
@@ -88,36 +88,38 @@
 
 .. admonition:: Inline storage size
 
-  The default inline size of two pointers is sufficient to store most common
-  callable objects, including function pointers, simple non-capturing and
-  capturing lambdas, and lightweight custom classes.
+   The default inline size of two pointers is sufficient to store most common
+   callable objects, including function pointers, simple non-capturing and
+   capturing lambdas, and lightweight custom classes.
 
 .. code-block:: c++
 
-  // The lambda is moved into the function's internal storage.
-  pw::Function<int(int, int)> subtract([](int a, int b) { return a - b; });
+   // The lambda is moved into the function's internal storage.
+   pw::Function<int(int, int)> subtract([](int a, int b) { return a - b; });
 
-  // Functions can be also be constructed from custom classes that implement
-  // operator(). This particular object is large (8 ints of space).
-  class MyCallable {
-   public:
-    int operator()(int value);
+   // Functions can be also be constructed from custom classes that implement
+   // operator(). This particular object is large (8 ints of space).
+   class MyCallable {
+    public:
+     int operator()(int value);
 
-   private:
-    int data_[8];
-  };
+    private:
+     int data_[8];
+   };
 
-  // Compiler error: sizeof(MyCallable) exceeds function's inline storage size.
-  pw::Function<int(int)> function((MyCallable()));
+   // Compiler error: sizeof(MyCallable) exceeds function's inline storage size.
+   pw::Function<int(int)> function((MyCallable()));
 
 .. admonition:: Dynamic allocation
 
-  When ``PW_FUNCTION_ENABLE_DYNAMIC_ALLOCATION`` is enabled, a ``Function``
-  will use dynamic allocation to store callables that exceed the inline size.
-  When it is enabled but a compile-time check for the inlining is still required
-  ``pw::InlineFunction`` can be used.
+   When ``PW_FUNCTION_ENABLE_DYNAMIC_ALLOCATION`` is enabled, a ``Function``
+   will use dynamic allocation to store callables that exceed the inline size.
+   An Allocator can be optionally supplied as a template argument. When dynamic
+   allocation is enabled but a compile-time check for the inlining is still
+   required ``pw::InlineFunction`` can be used.
 
 .. warning::
+
    If ``PW_FUNCTION_ENABLE_DYNAMIC_ALLOCATION`` is enabled then attempts to cast
    from `:cpp:type:`pw::InlineFunction` to a regular :cpp:type:`pw::Function`
    will **ALWAYS** allocate memory.
@@ -132,6 +134,7 @@
 .. doxygentypedef:: pw::InlineFunction
 .. doxygentypedef:: pw::Callback
 .. doxygentypedef:: pw::InlineCallback
+.. doxygenfunction:: pw::bind_member
 
 ``pw::Function`` as a function parameter
 ========================================
@@ -140,12 +143,12 @@
 
 .. code-block:: c++
 
-  // Before:
-  void DoTheThing(int arg, void (*callback)(int result));
+   // Before:
+   void DoTheThing(int arg, void (*callback)(int result));
 
-  // After. Note that it is possible to have parameter names within the function
-  // signature template for clarity.
-  void DoTheThing(int arg, const pw::Function<void(int result)>& callback);
+   // After. Note that it is possible to have parameter names within the function
+   // signature template for clarity.
+   void DoTheThing(int arg, const pw::Function<void(int result)>& callback);
 
 :cpp:type:`pw::Function` is movable, but not copyable, so APIs must accept
 :cpp:type:`pw::Function` objects either by const reference (``const
@@ -157,29 +160,30 @@
 
 .. code-block:: c++
 
-  // This function calls a pw::Function but doesn't store it, so it takes a
-  // const reference.
-  void CallTheCallback(const pw::Function<void(int)>& callback) {
-    callback(123);
-  }
+   // This function calls a pw::Function but doesn't store it, so it takes a
+   // const reference.
+   void CallTheCallback(const pw::Function<void(int)>& callback) {
+     callback(123);
+   }
 
-  // This function move-assigns a pw::Function to another variable, so it takes
-  // an rvalue reference.
-  void StoreTheCallback(pw::Function<void(int)>&& callback) {
-    stored_callback_ = std::move(callback);
-  }
+   // This function move-assigns a pw::Function to another variable, so it takes
+   // an rvalue reference.
+   void StoreTheCallback(pw::Function<void(int)>&& callback) {
+     stored_callback_ = std::move(callback);
+   }
 
 .. admonition:: Rules of thumb for passing a :cpp:type:`pw::Function` to a function
 
    * **Pass by value**: Never.
-
      This results in unnecessary :cpp:type:`pw::Function` instances and move
      operations.
+
    * **Pass by const reference** (``const pw::Function&``): When the
      :cpp:type:`pw::Function` is only invoked.
 
      When a :cpp:type:`pw::Function` is called or inspected, but not moved, take
      a const reference to avoid copies and support temporaries.
+
    * **Pass by rvalue reference** (``pw::Function&&``): When the
      :cpp:type:`pw::Function` is moved.
 
@@ -190,6 +194,7 @@
      :cpp:type:`pw::Function` variable, which makes the transfer of ownership
      explicit. It is possible to move-assign from an lvalue reference, but this
      fails to make it obvious to the caller that the object is no longer valid.
+
    * **Pass by non-const reference** (``pw::Function&``): Rarely, when modifying
      a variable.
 
@@ -206,11 +211,11 @@
 
 .. code-block:: c++
 
-  // Implicitly creates a pw::Function from a capturing lambda and calls it.
-  CallTheCallback([this](int result) { result_ = result; });
+   // Implicitly creates a pw::Function from a capturing lambda and calls it.
+   CallTheCallback([this](int result) { result_ = result; });
 
-  // Implicitly creates a pw::Function from a capturing lambda and stores it.
-  StoreTheCallback([this](int result) { result_ = result; });
+   // Implicitly creates a pw::Function from a capturing lambda and stores it.
+   StoreTheCallback([this](int result) { result_ = result; });
 
 When working with an existing :cpp:type:`pw::Function` variable, the variable
 can be passed directly to functions that take a const reference. If the function
@@ -219,11 +224,11 @@
 
 .. code-block:: c++
 
-  // Accepts the pw::Function by const reference.
-  CallTheCallback(my_function_);
+   // Accepts the pw::Function by const reference.
+   CallTheCallback(my_function_);
 
-  // Takes ownership of the pw::Function.
-  void StoreTheCallback(std::move(my_function));
+   // Takes ownership of the pw::Function.
+   void StoreTheCallback(std::move(my_function));
 
 ``pw::Callback`` for one-shot functions
 =======================================
@@ -235,6 +240,11 @@
 
 Invoking ``pw::Function`` from a C-style API
 ============================================
+.. _trampoline layers: https://en.wikipedia.org/wiki/Trampoline_(computing)
+
+One use case for invoking ``pw_function`` from a C-style API is to automate
+the generation of `trampoline layers`_.
+
 .. doxygenfile:: pw_function/pointer.h
    :sections: detaileddescription
 
@@ -247,7 +257,7 @@
 ScopeGuard
 ----------
 .. doxygenclass:: pw::ScopeGuard
-    :members:
+   :members:
 
 ------------
 Size reports
@@ -271,10 +281,25 @@
 Design
 ------
 :cpp:type:`pw::Function` is an alias of
-`fit::function <https://cs.opensource.google/fuchsia/fuchsia/+/main:sdk/lib/fit/include/lib/fit/function.h;drc=f66f54fca0c11a1168d790bcc3d8a5a3d940218d>`_.
+`fit::function_impl <https://cs.opensource.google/fuchsia/fuchsia/+/main:sdk/lib/fit/include/lib/fit/function.h>`_.
 
 :cpp:type:`pw::Callback` is an alias of
-`fit::callback <https://cs.opensource.google/fuchsia/fuchsia/+/main:sdk/lib/fit/include/lib/fit/function.h;drc=f66f54fca0c11a1168d790bcc3d8a5a3d940218d>`_.
+`fit::callback_impl <https://cs.opensource.google/fuchsia/fuchsia/+/main:sdk/lib/fit/include/lib/fit/function.h>`_.
+
+.. _module-pw_function-non-literal:
+
+Why pw::Function is not a literal
+=================================
+The default constructor for ``pw::Function`` is ``constexpr`` but
+``pw::Function`` is not a literal type. Instances can be declared ``constinit``
+but can't be used in ``constexpr`` contexts. There are a few reasons for this:
+
+* ``pw::Function`` supports wrapping any callable type, and the wrapped type
+  might not be a literal type.
+* ``pw::Function`` stores inline callables in a bytes array, which is not
+  ``constexpr``-friendly.
+* ``pw::Function`` optionally uses dynamic allocation, which doesn't work in
+  ``constexpr`` contexts (at least before C++20).
 
 ------
 Zephyr
diff --git a/pw_function/public/pw_function/function.h b/pw_function/public/pw_function/function.h
index 99842b3..807b272 100644
--- a/pw_function/public/pw_function/function.h
+++ b/pw_function/public/pw_function/function.h
@@ -13,6 +13,8 @@
 // the License.
 #pragma once
 
+#include <cstddef>
+
 #include "lib/fit/function.h"
 #include "pw_function/config.h"
 
@@ -46,13 +48,19 @@
 ///   }
 ///
 /// @endcode
-template <typename Callable,
-          size_t inline_target_size =
-              function_internal::config::kInlineCallableSize>
+///
+/// @tparam Allocator The Allocator used to dynamically allocate the callable,
+/// if it exceeds `inline_target_size` and dynamic allocation is enabled. Its
+/// `value_type` is irrelevant, since it must support rebinding.
+template <typename FunctionType,
+          std::size_t inline_target_size =
+              function_internal::config::kInlineCallableSize,
+          typename Allocator = fit::default_callable_allocator>
 using Function = fit::function_impl<
     inline_target_size,
     /*require_inline=*/!function_internal::config::kEnableDynamicAllocation,
-    Callable>;
+    FunctionType,
+    Allocator>;
 
 /// Version of `pw::Function` that exclusively uses inline storage.
 ///
@@ -62,10 +70,10 @@
 ///
 // TODO(b/252852651): Remove warning above when conversion from
 // `fit::inline_function` to `fit::function` doesn't allocate anymore.
-template <typename Callable,
-          size_t inline_target_size =
+template <typename FunctionType,
+          std::size_t inline_target_size =
               function_internal::config::kInlineCallableSize>
-using InlineFunction = fit::inline_function<Callable, inline_target_size>;
+using InlineFunction = fit::inline_function<FunctionType, inline_target_size>;
 
 using Closure = Function<void()>;
 
@@ -79,18 +87,33 @@
 ///
 /// A `pw::Callback` in the "already called" state has the same state as a
 /// `pw::Callback` that has been assigned to `nullptr`.
-template <typename Callable,
-          size_t inline_target_size =
-              function_internal::config::kInlineCallableSize>
+template <typename FunctionType,
+          std::size_t inline_target_size =
+              function_internal::config::kInlineCallableSize,
+          typename Allocator = fit::default_callable_allocator>
 using Callback = fit::callback_impl<
     inline_target_size,
     /*require_inline=*/!function_internal::config::kEnableDynamicAllocation,
-    Callable>;
+    FunctionType,
+    Allocator>;
 
 /// Version of `pw::Callback` that exclusively uses inline storage.
-template <typename Callable,
-          size_t inline_target_size =
+template <typename FunctionType,
+          std::size_t inline_target_size =
               function_internal::config::kInlineCallableSize>
-using InlineCallback = fit::inline_callback<Callable, inline_target_size>;
+using InlineCallback = fit::inline_callback<FunctionType, inline_target_size>;
+
+/// Returns a `Callable` which, when called, invokes `method` on `instance`
+/// using the arguments provided.
+///
+/// This is useful for binding the `this` argument of a callable.
+///
+/// `pw::bind_member<&T::MethodName>(instance)` is roughly equivalent to
+/// `[instance](Arg arg1, ...) { instance->MethodName(arg1, ...) }`, albeit with
+/// proper support for overloads and argument forwarding.
+template <auto method, typename T>
+auto bind_member(T* instance) {
+  return fit::bind_member<method, T>(instance);
+}
 
 }  // namespace pw
diff --git a/pw_fuzzer/BUILD.gn b/pw_fuzzer/BUILD.gn
index 7bf7607..3b81538 100644
--- a/pw_fuzzer/BUILD.gn
+++ b/pw_fuzzer/BUILD.gn
@@ -115,9 +115,14 @@
   deps = [ ":fuzztest" ]
 }
 
-# This target should only be when defining a fuzzing toolchain, e.g. to set
+# This target should only be used when defining a fuzzing toolchain, e.g. to set
 # `pw_unit_test_GOOGLETEST_BACKEND = "$dir_pw_fuzzer:gtest"
-if (dir_pw_third_party_googletest != "") {
+# TODO: b/295961502 - Support running FuzzTest-based fuzzers on OSS-Fuzz.
+if (pw_toolchain_OSS_FUZZ_ENABLED) {
+  group("gtest") {
+    public_deps = [ "$dir_pw_unit_test:light" ]
+  }
+} else if (dir_pw_third_party_googletest != "") {
   group("gtest") {
     public_deps = [ "$dir_pw_third_party/googletest" ]
   }
@@ -130,9 +135,14 @@
   }
 }
 
-# This target should only be when defining a fuzzing toolchain, e.g. to set
+# This target should only be used when defining a fuzzing toolchain, e.g. to set
 # `pw_unit_test_MAIN = "$dir_pw_fuzzer:fuzztest_main"
-if (dir_pw_third_party_fuzztest != "") {
+# TODO: b/295961502 - Support running FuzzTest-based fuzzers on OSS-Fuzz.
+if (pw_toolchain_OSS_FUZZ_ENABLED) {
+  group("fuzztest_main") {
+    deps = [ "$dir_pw_unit_test:simple_printing_main" ]
+  }
+} else if (dir_pw_third_party_fuzztest != "") {
   group("fuzztest_main") {
     deps = [ "$dir_pw_third_party/fuzztest/fuzztest:fuzztest_gtest_main" ]
   }
@@ -162,7 +172,15 @@
 
     # OSS-Fuzz sets "-stdlib=libc++", which conflicts with the "-nostdinc++" set
     # by `pw_minimal_cpp_stdlib`.
-    cflags_cc += [ "-Wno-unused-command-line-argument" ]
+    if (cflags_cc + [ "-stdlib=libc++" ] - [ "-stdlib=libc++" ] != cflags_cc) {
+      cflags_cc += [ "-Wno-unused-command-line-argument" ]
+    }
+
+    # Disable UBSan vptr when the target is built with -fno-rtti.
+    if (cflags_cc + [ "-fno-rtti" ] - [ "-fno-rtti" ] != cflags_cc) {
+      cflags_cc += [ " -fno-sanitize=vptr" ]
+    }
+    cflags_cc += [ "-fcoverage-compilation-dir=" + getenv("PW_ROOT") ]
   } else {
     cflags = [ "-fsanitize=fuzzer-no-link" ]
   }
@@ -171,13 +189,18 @@
 # Add flags for linking against compiler-rt's libFuzzer. This is added
 # automatically by `pw_fuzzer`.
 config("libfuzzer_config") {
+  ldflags = []
+  engine = ""
   if (pw_toolchain_OSS_FUZZ_ENABLED) {
     # OSS-Fuzz manipulates linker flags directly. See
     # google.github.io/oss-fuzz/getting-started/new-project-guide/#Requirements.
-    ldflags = string_split(getenv("LDFLAGS")) + [ getenv("LIB_FUZZING_ENGINE") ]
-  } else {
-    ldflags = [ "-fsanitize=fuzzer" ]
+    ldflags = string_split(getenv("LDFLAGS"))
+    engine = getenv("LIB_FUZZING_ENGINE")
   }
+  if (engine == "") {
+    engine = "-fsanitize=fuzzer"
+  }
+  ldflags += [ engine ]
 }
 
 # Includes wrapper's for LLVM's libFuzzer compiler runtime library.
diff --git a/pw_fuzzer/fuzz_test.gni b/pw_fuzzer/fuzz_test.gni
new file mode 100644
index 0000000..ecaa46a
--- /dev/null
+++ b/pw_fuzzer/fuzz_test.gni
@@ -0,0 +1,42 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_unit_test/test.gni")
+
+# Creates a unit test that may also include fuzzing test cases.
+#
+# This template merely updates test metadata and adds a dependency on FuzzTest
+# before forwarding to `pw_test`. The resulting test can always be run as a unit
+# test. If `dir_pw_third_party_fuzztest` is set and the toolchain used to build
+# the test supports fuzzing, it can also be run as a fuzzer.
+#
+# As with `pw_test`, targets defined using this template will produce test
+# metadata with a `test_type` of "unit_test" and an additional `test_directory`
+# value describing the location of the test binary within the build output.
+# These tests can be distinguished from other unit tests by additionally having
+# a "fuzztest" tag.
+#
+# Args:
+#   - All of the `pw_test` args are accepted.
+template("pw_fuzz_test") {
+  pw_test(target_name) {
+    deps = []
+    tags = []
+    forward_variables_from(invoker, "*")
+    deps += [ "$dir_pw_fuzzer:fuzztest" ]
+    tags += [ "fuzztest" ]
+  }
+}
diff --git a/pw_fuzzer/fuzzer.bzl b/pw_fuzzer/fuzzer.bzl
index 8080370..7d37dae 100644
--- a/pw_fuzzer/fuzzer.bzl
+++ b/pw_fuzzer/fuzzer.bzl
@@ -30,7 +30,7 @@
     # TODO(b/234877642): Remove this implicit dependency once we have a better
     # way to handle the facades without introducing a circular dependency into
     # the build.
-    kwargs["deps"].append("@pigweed_config//:pw_assert_backend")
+    kwargs["deps"].append("@pigweed_config//:pw_assert_backend_impl")
 
     # TODO(b/292628774): Only linux is supported for now.
     kwargs["target_compatible_with"] = ["@platforms//os:linux"]
diff --git a/pw_fuzzer/fuzzer.gni b/pw_fuzzer/fuzzer.gni
index c328ef4..aa1bdfa 100644
--- a/pw_fuzzer/fuzzer.gni
+++ b/pw_fuzzer/fuzzer.gni
@@ -15,9 +15,7 @@
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/error.gni")
-import("$dir_pw_third_party/abseil-cpp/abseil-cpp.gni")
-import("$dir_pw_third_party/fuzztest/fuzztest.gni")
-import("$dir_pw_third_party/re2/re2.gni")
+import("$dir_pw_build/test_info.gni")
 import("$dir_pw_toolchain/host_clang/toolchains.gni")
 import("$dir_pw_unit_test/test.gni")
 
@@ -36,8 +34,7 @@
 #
 # Args:
 #   - enable_test_if: (optional) Passed as `enable_if` to the unit test.
-#   Remaining arguments are the same as `pw_executable`.
-#
+#   - All of the `pw_executable` args are accepted.
 template("pw_fuzzer") {
   if (!pw_toolchain_FUZZING_ENABLED) {
     pw_error(target_name) {
@@ -50,6 +47,30 @@
     }
     not_needed(invoker, "*")
   } else {
+    # Metadata for this test when used as part of a pw_test_group target.
+    _fuzzer_target_name = target_name
+    _fuzzer_output_dir = "${target_out_dir}/bin"
+    if (defined(invoker.output_dir)) {
+      _fuzzer_output_dir = invoker.output_dir
+    }
+
+    _tags = [ "libfuzzer" ]
+    if (defined(invoker.tags)) {
+      _tags += invoker.tags
+    }
+
+    _test_metadata = "${target_name}.metadata"
+    _extra_metadata = {
+      forward_variables_from(invoker, [ "extra_metadata" ])
+      test_directory = rebase_path(_fuzzer_output_dir, root_build_dir)
+    }
+    pw_test_info(_test_metadata) {
+      test_type = "fuzz_test"
+      test_name = _fuzzer_target_name
+      tags = _tags
+      extra_metadata = _extra_metadata
+    }
+
     pw_executable(target_name) {
       configs = []
       deps = []
@@ -61,10 +82,20 @@
                              ])
       forward_variables_from(invoker, [ "visibility" ])
       configs += [ "$dir_pw_fuzzer:libfuzzer_config" ]
-      deps += [ "$dir_pw_fuzzer:libfuzzer" ]
+      deps += [
+        ":$_test_metadata",
+        "$dir_pw_fuzzer:libfuzzer",
+      ]
+      output_dir = _fuzzer_output_dir
+      metadata = {
+        test_barrier = [ ":$_test_metadata" ]
+      }
     }
   }
 
+  group(target_name + ".run") {
+  }
+
   pw_test("${target_name}_test") {
     deps = []
     forward_variables_from(invoker, "*", [ "visibility" ])
@@ -73,3 +104,43 @@
     enable_if = !defined(enable_test_if) || enable_test_if
   }
 }
+
+# Defines a related collection of fuzzers.
+#
+# This template wraps `pw_test_group` to collect a set of libFuzzer-based fuzzer
+# tests. These unit tests do not perform fuzzing. Instead, they execute the fuzz
+# target function with a set of fixed inputs to verify the fuzzer can be built
+# and run.
+#
+# If and only if the current toolchain supports fuzzing, this template will also
+# include the fuzzers themselves.
+#
+# As with `pw_test_group`, targets defined using this template will produce test
+# metadata with a `test_type` of "test_group" and an additional `deps` list
+# describing the tests collected by this target.
+#
+# Args:
+#   - fuzzers: List of `pw_fuzzer` targets for each of the fuzzers in the group.
+#
+#   - The following args have the same meaning as for `pw_python_action`:
+#         group_deps
+#         enable_if
+#         output_metadata
+template("pw_fuzzer_group") {
+  _with_fuzzers = pw_toolchain_FUZZING_ENABLED && pw_toolchain_SANITIZERS != []
+  pw_test_group(target_name) {
+    forward_variables_from(invoker,
+                           "*",
+                           [
+                             "fuzzers",
+                             "tests",
+                           ])
+    tests = []
+    foreach(fuzzer, invoker.fuzzers) {
+      if (_with_fuzzers) {
+        tests += [ fuzzer ]
+      }
+      tests += [ fuzzer + "_test" ]
+    }
+  }
+}
diff --git a/pw_fuzzer/guides/fuzztest.rst b/pw_fuzzer/guides/fuzztest.rst
index 0a6ad97..eab4140 100644
--- a/pw_fuzzer/guides/fuzztest.rst
+++ b/pw_fuzzer/guides/fuzztest.rst
@@ -196,14 +196,16 @@
 ------------------------------------
 Step 4: Add the fuzzer to your build
 ------------------------------------
-Indicate that the unit test includes one or more fuzz tests by adding a
-dependency on FuzzTest.
+Next, indicate that the unit test includes one or more fuzz tests.
 
 .. tab-set::
 
    .. tab-item:: GN
       :sync: gn
 
+      The ``pw_fuzz_test`` template can be used to add the necessary FuzzTest
+      dependency and generate test metadata.
+
       For example, consider the following ``BUILD.gn``:
 
       .. literalinclude:: ../examples/fuzztest/BUILD.gn
@@ -214,6 +216,9 @@
    .. tab-item:: CMake
       :sync: cmake
 
+      Unit tests can support fuzz tests by simply adding a dependency on
+      FuzzTest.
+
       For example, consider the following ``CMakeLists.txt``:
 
       .. literalinclude:: ../examples/fuzztest/CMakeLists.txt
@@ -224,6 +229,9 @@
    .. tab-item:: Bazel
       :sync: bazel
 
+      Unit tests can support fuzz tests by simply adding a dependency on
+      FuzzTest.
+
       For example, consider the following ``BUILD.bazel``:
 
       .. literalinclude:: ../examples/fuzztest/BUILD.bazel
@@ -242,13 +250,14 @@
       Build using ``ninja`` on a target that includes your fuzzer with a
       :ref:`fuzzing toolchain<module-pw_fuzzer-guides-using_fuzztest-toolchain>`.
 
-      For example, Pigweed itself includes a ``//:host_clang_fuzz`` target that
-      builds all tests, including those with fuzzers, using a fuzzing toolchain:
+      Pigweed includes a ``//:fuzzers`` target that builds all tests, including
+      those with fuzzers, using a fuzzing toolchain. You may wish to add a
+      similar top-level to your project. For example:
 
       .. code-block::
 
-         group("host_clang_fuzz") {
-           deps = [ ":pigweed_default($_internal_toolchains:pw_strict_host_clang_fuzz)" ]
+         group("fuzzers") {
+           deps = [ ":pw_module_tests.run($dir_pigweed/targets/host:host_clang_fuzz)" ]
          }
 
    .. tab-item:: CMake
@@ -292,6 +301,15 @@
       will include the fuzzers, but only run them for a limited time. This makes
       them suitable for automated testing as in CQ.
 
+      If you used the top-level ``//:fuzzers`` described in the previous
+      section, you can find available fuzzers using the generated JSON test
+      metadata file:
+
+      .. code-block:: sh
+
+         jq '.[] | select(contains({tags: ["fuzztest"]}))' \
+           out/host_clang_fuzz/obj/pw_module_tests.testinfo.json
+
       To run a fuzz with different options, you can pass additional flags to the
       fuzzer binary. This binary will be in a subdirectory related to the
       toolchain. For example:
@@ -337,12 +355,12 @@
 
       .. code-block:: sh
 
-         bazel run //my_module:metrics_test --config=fuzztest
+         bazel run //my_module:metrics_test --config=fuzztest \
            --fuzz=MetricsTest.Roundtrip
 
 Running the fuzzer should produce output similar to the following:
 
-.. code::
+.. code-block::
 
    [.] Sanitizer coverage enabled. Counter map size: 21290, Cmp map size: 262144
    Note: Google Test filter = MetricsTest.Roundtrip
diff --git a/pw_fuzzer/guides/libfuzzer.rst b/pw_fuzzer/guides/libfuzzer.rst
index 2cddd22..a0c51a4 100644
--- a/pw_fuzzer/guides/libfuzzer.rst
+++ b/pw_fuzzer/guides/libfuzzer.rst
@@ -98,7 +98,7 @@
 To write a fuzzer, a developer needs to write a `fuzz target function`_
 following the guidelines given by libFuzzer:
 
-.. code:: cpp
+.. code-block:: cpp
 
   extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
     DoSomethingInterestingWithMyAPI(data, size);
@@ -245,7 +245,7 @@
 
       Select a sanitizer runtime. See LLVM for `valid options`_.
 
-      .. code:: sh
+      .. code-block:: sh
 
         $ gn gen out --args='pw_toolchain_SANITIZERS=["address"]'
 
@@ -254,7 +254,7 @@
 
       Build the fuzzers using ``ninja`` directly.
 
-      .. code:: sh
+      .. code-block:: sh
 
         $ ninja -C out fuzzers
 
@@ -271,7 +271,7 @@
       :ref:`fuzzing toolchain<module-pw_fuzzer-guides-using_libfuzzer-toolchain>`
       via a ``--config`` when building fuzzers.
 
-      .. code:: sh
+      .. code-block:: sh
 
         $ bazel build //my_module:my_fuzzer --config=asan-libfuzzer
 
@@ -287,7 +287,7 @@
       Additional `libFuzzer options`_ and `corpus`_ arguments can be passed on
       the command line. For example:
 
-      .. code:: sh
+      .. code-block:: sh
 
         $ out/host_clang_fuzz/obj/my_module/bin/my_fuzzer -seed=1 path/to/corpus
 
@@ -308,14 +308,14 @@
       `libFuzzer options`_ and `corpus`_ arguments can be passed on the command
       line. For example:
 
-      .. code:: sh
+      .. code-block:: sh
 
         $ bazel run //my_module:my_fuzzer --config=asan-libfuzzer -- \
           -seed=1 path/to/corpus
 
 Running the fuzzer should produce output similar to the following:
 
-.. code::
+.. code-block::
 
    INFO: Seed: 305325345
    INFO: Loaded 1 modules   (46 inline 8-bit counters): 46 [0x38dfc0, 0x38dfee),
diff --git a/pw_fuzzer/guides/reproducing_oss_fuzz_bugs.rst b/pw_fuzzer/guides/reproducing_oss_fuzz_bugs.rst
index 42dd182..bec831c 100644
--- a/pw_fuzzer/guides/reproducing_oss_fuzz_bugs.rst
+++ b/pw_fuzzer/guides/reproducing_oss_fuzz_bugs.rst
@@ -1,7 +1,7 @@
 .. _module-pw_fuzzer-guides-reproducing_oss_fuzz_bugs:
 
 =============================================
-pw_fuzzer: Reproducing Bugs Found by OSS-Fuzz
+pw_fuzzer: Using OSS-Fuzz
 =============================================
 .. pigweed-module-subpage::
    :name: pw_fuzzer
@@ -14,6 +14,10 @@
 automatically start being run within a day or so of appearing in the git
 repository.
 
+-------------------------
+Reproducing Bugs Directly
+-------------------------
+
 Bugs produced by OSS-Fuzz can be found in its `Monorail instance`_. These bugs
 include:
 
@@ -24,41 +28,89 @@
 
 To reproduce a bug:
 
-#. Build the fuzzers.
-#. Download the minimized testcase.
+#. Build the fuzzers in a local source checkout using one of the
+   :ref:`module-pw_fuzzer-guides`.
+#. Download the minimized testcase from the OSS-Fuzz bug.
 #. Run the fuzzer with the testcase as an argument.
 
 For example, if the testcase is saved as ``~/Downloads/testcase``
 and the fuzzer is the same as in the examples above, you could run:
 
-.. code::
+.. code-block::
 
-  $ ./out/host/obj/pw_fuzzer/toy_fuzzer ~/Downloads/testcase
+   $ ./out/host/obj/pw_fuzzer/toy_fuzzer ~/Downloads/testcase
 
-If you need to recreate the OSS-Fuzz environment locally, you can use its
-documentation on `reproducing`_ issues.
+As noted in OSS-Fuzz's documentation on `timeouts and OOMs`_, you may want to
+add a `-timeout=25` or `-rss_limit_mb=2560` argument to reproduce timeouts or
+OOMs, respectively.
+
+--------------------------------
+Using a OSS-Fuzz Docker Instance
+--------------------------------
+
+If Pigweed fails to build for OSS-Fuzz, or if a fuzzer only triggers a bug in
+OSS-Fuzz and not when run directly, you may want to recreate the OSS-Fuzz
+environment locally using Docker. You can do so using OSS-Fuzz's documentation
+on `reproducing`_ issues.
 
 In particular, you can recreate the OSS-Fuzz environment using:
 
-.. code::
+.. code-block::
 
-  $ python infra/helper.py pull_images
-  $ python infra/helper.py build_image pigweed
-  $ python infra/helper.py build_fuzzers --sanitizer <address/undefined> pigweed
+   $ python infra/helper.py pull_images
+   $ python infra/helper.py build_image pigweed
+   $ python infra/helper.py build_fuzzers --sanitizer <address/undefined> pigweed
 
-With that environment, you can run the reproduce bugs using:
+Using a Local Source Checkout
+=============================
 
-.. code::
+When addressing build failures or issues related to specific fuzzers, it is
+very useful to have an OSS-Fuzz instance use a local source checkout with edits
+rather than pull from a public repo. Unfortunately, the normal workflow for
+`using a local source checkout`_ **does not work** for Pigweed. Pigweed provides
+an embedded development environment along with source code for individual
+modules, and this environment includes checks that conflict with the way
+OSS-Fuzz tries to remap and change ownership of the source code.
 
-  python infra/helper.py reproduce pigweed <pw_module>_<fuzzer_name> ~/Downloads/testcase
+To work around this, a helper script is provided as part of the ``pigweed``
+project on OSS-Fuzz that wraps the usual ``infra/helper.py``. For commands that
+take a local source path, the wrapper instead provides a ``--local`` flag. This
+flag will use the ``PW_ROOT`` environment variable to find the source checkout,
+and attempt to mount it in the correct location and set specific environment
+variables in order to present a working development environment to the OSS-Fuzz
+instance. Also, the ``pigweed`` project is implied:
 
-You can even verify fixes in your local source checkout:
+.. code-block::
 
-.. code::
+   $ python project/pigweed/helper.py build_fuzzers --sanitizer <sanitizer> --local
 
-  $ python infra/helper.py build_fuzzers --sanitizer <address/undefined> pigweed $PW_ROOT
-  $ python infra/helper.py reproduce pigweed <pw_module>_<fuzzer_name> ~/Downloads/testcase
+The ``sanitizer`` value is one of the usual values based to Clang via
+``-fsanitize=...``, e.g. "address" or "undefined".
 
-.. _Monorail instance: https://bugs.chromium.org/p/oss-fuzz
+After building with a local source checkout, you can verify an issue previously
+found by a fuzzer is fixed:
+
+.. code-block::
+
+   $ python project/pigweed/helper.py reproduce <fuzzer> ~/Downloads/testcase
+
+For libFuzzer-based fuzzers, ``fuzzer`` will be of the form
+``{module_name}_{fuzzer_name}``, e.g. ``pw_protobuf_encoder_fuzzer``.
+
+For FuzzTest-based fuzzers, ``fuzzer`` will additionally include the test case
+and be of the form ``{module_name}_{fuzzer_name}@{test_case}``, e.g.
+``pw_hdlc_decoder_test@Decoder.ProcessNeverCrashes``.
+
+The helper script attempts to restore proper ownership of the source checkout to
+the current user on completion. This can also be triggered manually using:
+
+.. code-block::
+
+   $ python project/pigweed/helper.py reset_local
+
+
+.. _Monorail instance: https://bugs.chromium.org/p/oss-fuzz/issues/list?q=pigweed
 .. _OSS-Fuzz: https://github.com/google/oss-fuzz
 .. _reproducing: https://google.github.io/oss-fuzz/advanced-topics/reproducing/
+.. _timeouts and OOMs: https://google.github.io/oss-fuzz/faq/#how-do-you-handle-timeouts-and-ooms
+.. _using a local source checkout: https://google.github.io/oss-fuzz/advanced-topics/reproducing/#reproduce-using-local-source-checkout
diff --git a/pw_hdlc/BUILD.gn b/pw_hdlc/BUILD.gn
index 7ce7dc9..5b742dd 100644
--- a/pw_hdlc/BUILD.gn
+++ b/pw_hdlc/BUILD.gn
@@ -18,6 +18,7 @@
 import("$dir_pw_build/python.gni")
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_fuzzer/fuzz_test.gni")
 import("$dir_pw_unit_test/test.gni")
 
 config("default_config") {
@@ -184,11 +185,8 @@
   ]
 }
 
-pw_test("decoder_test") {
-  deps = [
-    ":pw_hdlc",
-    "$dir_pw_fuzzer:fuzztest",
-  ]
+pw_fuzz_test("decoder_test") {
+  deps = [ ":pw_hdlc" ]
   source_gen_deps = [ ":generate_decoder_test" ]
   sources = [ "decoder_test.cc" ]
 
diff --git a/pw_hdlc/api.rst b/pw_hdlc/api.rst
index 18ed106..c90df8b 100644
--- a/pw_hdlc/api.rst
+++ b/pw_hdlc/api.rst
@@ -17,9 +17,9 @@
 The Encoder API provides a single function that encodes data as an HDLC
 unnumbered information frame.
 
-.. tabs::
+.. tab-set::
 
-   .. group-tab:: C++
+   .. tab-item:: C++
 
       .. doxygenfunction:: pw::hdlc::WriteUIFrame(uint64_t address, ConstByteSpan data, stream::Writer &writer)
 
@@ -44,7 +44,7 @@
            }
          }
 
-   .. group-tab:: Python
+   .. tab-item:: Python
 
       .. automodule:: pw_hdlc.encode
          :members:
@@ -65,7 +65,7 @@
          address = 123
          ser.write(encode.ui_frame(address, b'your data here!'))
 
-   .. group-tab:: TypeScript
+   .. tab-item:: TypeScript
 
       The Encoder class provides a way to build complete, escaped HDLC UI frames.
 
@@ -82,9 +82,9 @@
 =======
 
 
-.. tabs::
+.. tab-set::
 
-   .. group-tab:: C++
+   .. tab-item:: C++
 
       .. doxygenclass:: pw::hdlc::Decoder
          :members:
@@ -114,7 +114,7 @@
            }
          }
 
-   .. group-tab:: Python
+   .. tab-item:: Python
 
       .. autoclass:: pw_hdlc.decode.FrameDecoder
          :members:
@@ -149,7 +149,7 @@
         :members:
         :noindex:
 
-   .. group-tab:: TypeScript
+   .. tab-item:: TypeScript
 
       The decoder class unescapes received bytes and adds them to a buffer. Complete,
       valid HDLC frames are yielded as they are received.
@@ -172,9 +172,9 @@
 RPC
 ===
 
-.. tabs::
+.. tab-set::
 
-   .. group-tab:: C++
+   .. tab-item:: C++
 
       .. autoclass:: pw_hdlc.rpc.HdlcRpcClient
          :members:
diff --git a/pw_hdlc/design.rst b/pw_hdlc/design.rst
index 06e970d..d20d774 100644
--- a/pw_hdlc/design.rst
+++ b/pw_hdlc/design.rst
@@ -22,16 +22,16 @@
 
 .. code-block:: text
 
-    _________________________________________
-    | | | |                          |    | |...
-    | | | |                          |    | |... [More frames]
-    |_|_|_|__________________________|____|_|...
-     F A C       Payload              FCS  F
+   _________________________________________
+   | | | |                          |    | |...
+   | | | |                          |    | |... [More frames]
+   |_|_|_|__________________________|____|_|...
+    F A C       Payload              FCS  F
 
-     F = flag byte (0x7e, the ~ character)
-     A = address field
-     C = control field
-     FCS = frame check sequence (CRC-32)
+    F = flag byte (0x7e, the ~ character)
+    A = address field
+    C = control field
+    FCS = frame check sequence (CRC-32)
 
 
 Encoding and sending data
@@ -40,13 +40,13 @@
 beginning of the frame. Before sending any of the payload data through serial,
 the special bytes are escaped:
 
-            +-------------------------+-----------------------+
-            | Unescaped Special Bytes | Escaped Special Bytes |
-            +=========================+=======================+
-            |           7E            |        7D 5E          |
-            +-------------------------+-----------------------+
-            |           7D            |        7D 5D          |
-            +-------------------------+-----------------------+
++-------------------------+-----------------------+
+| Unescaped Special Bytes | Escaped Special Bytes |
++=========================+=======================+
+|           7E            |        7D 5E          |
++-------------------------+-----------------------+
+|           7D            |        7D 5D          |
++-------------------------+-----------------------+
 
 The bytes of the payload are escaped and written in a single pass. The
 frame check sequence is calculated, escaped, and written after. After this, a
diff --git a/pw_hdlc/guide.rst b/pw_hdlc/guide.rst
index f259f48..2029acf 100644
--- a/pw_hdlc/guide.rst
+++ b/pw_hdlc/guide.rst
@@ -13,9 +13,9 @@
 --------
 Encoding
 --------
-.. tabs::
+.. tab-set::
 
-   .. group-tab:: C++
+   .. tab-item:: C++
 
       ..
         TODO(b/279648188): Share this code between api.rst and guide.rst.
@@ -37,7 +37,7 @@
            }
          }
 
-   .. group-tab:: Python
+   .. tab-item:: Python
 
       ..
         TODO(b/279648188): Share this code between api.rst and guide.rst.
@@ -55,9 +55,9 @@
 
 Allocating buffers when encoding
 ================================
-.. tabs::
+.. tab-set::
 
-   .. group-tab:: C++
+   .. tab-item:: C++
 
       Since HDLC's encoding overhead changes with payload size and what data is being
       encoded, this module provides helper functions that are useful for determining
@@ -86,9 +86,9 @@
 --------
 Decoding
 --------
-.. tabs::
+.. tab-set::
 
-   .. group-tab:: C++
+   .. tab-item:: C++
 
       ..
         TODO(b/279648188): Share this code between api.rst and guide.rst.
@@ -114,7 +114,7 @@
            }
          }
 
-   .. group-tab:: Python
+   .. tab-item:: Python
 
       ..
         TODO(b/279648188): Share this code between api.rst and guide.rst.
@@ -135,9 +135,9 @@
 
 Allocating buffers when decoding
 ================================
-.. tabs::
+.. tab-set::
 
-   .. group-tab:: C++
+   .. tab-item:: C++
 
       The HDLC ``Decoder`` has its own helper for allocating a buffer since it doesn't
       need the entire escaped frame in-memory to decode, and therefore has slightly
diff --git a/pw_hdlc/public/pw_hdlc/decoder.h b/pw_hdlc/public/pw_hdlc/decoder.h
index 30956c8..8806850 100644
--- a/pw_hdlc/public/pw_hdlc/decoder.h
+++ b/pw_hdlc/public/pw_hdlc/decoder.h
@@ -74,6 +74,8 @@
 
   Decoder(const Decoder&) = delete;
   Decoder& operator=(const Decoder&) = delete;
+  Decoder(Decoder&&) = default;
+  Decoder& operator=(Decoder&&) = default;
 
   /// @brief Parses a single byte of an HDLC stream.
   ///
@@ -141,7 +143,7 @@
 
   bool VerifyFrameCheckSequence() const;
 
-  const ByteSpan buffer_;
+  ByteSpan buffer_;
 
   // Ring buffer of the last four bytes read into the current frame, to allow
   // calculating the frame's CRC incrementally. As data is evicted from this
diff --git a/pw_hdlc/py/pw_hdlc/rpc.py b/pw_hdlc/py/pw_hdlc/rpc.py
index a60c4b8..d8acb29 100644
--- a/pw_hdlc/py/pw_hdlc/rpc.py
+++ b/pw_hdlc/py/pw_hdlc/rpc.py
@@ -42,7 +42,7 @@
 from pw_hdlc.decode import Frame, FrameDecoder
 from pw_hdlc import encode
 
-_LOG = logging.getLogger(__name__)
+_LOG = logging.getLogger('pw_hdlc.rpc')
 
 STDOUT_ADDRESS = 1
 DEFAULT_ADDRESS = ord('R')
diff --git a/pw_hdlc/rpc_example/docs.rst b/pw_hdlc/rpc_example/docs.rst
index b2dfaee..e65e247 100644
--- a/pw_hdlc/rpc_example/docs.rst
+++ b/pw_hdlc/rpc_example/docs.rst
@@ -65,7 +65,7 @@
 is in the ``pw.rpc`` package. To invoke it synchronously, call
 ``rpcs.pw.rpc.EchoService.Echo``:
 
-.. code:: pycon
+.. code-block:: pycon
 
    >>> device.rpcs.pw.rpc.EchoService.Echo(msg='Hello, world!')
    (Status.OK, pw.rpc.EchoMessage(msg='Hello, world!'))
@@ -138,7 +138,7 @@
 
 Then you can invoke RPCs from the interactive console on the client side.
 
-.. code:: pycon
+.. code-block:: pycon
 
    >>> device.rpcs.pw.rpc.EchoService.Echo(msg='Hello, world!')
    (Status.OK, pw.rpc.EchoMessage(msg='Hello, world!'))
diff --git a/pw_i2c/BUILD.bazel b/pw_i2c/BUILD.bazel
index d17d9ea..212ae79 100644
--- a/pw_i2c/BUILD.bazel
+++ b/pw_i2c/BUILD.bazel
@@ -12,6 +12,7 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+load("@rules_proto//proto:defs.bzl", "proto_library")
 load(
     "//pw_build:pigweed.bzl",
     "pw_cc_library",
@@ -22,7 +23,6 @@
     "pw_proto_filegroup",
     "pw_proto_library",
 )
-load("@rules_proto//proto:defs.bzl", "proto_library")
 
 package(default_visibility = ["//visibility:public"])
 
diff --git a/pw_i2c/docs.rst b/pw_i2c/docs.rst
index 20da554..4ebddcf 100644
--- a/pw_i2c/docs.rst
+++ b/pw_i2c/docs.rst
@@ -3,10 +3,9 @@
 ------
 pw_i2c
 ------
-
 .. warning::
-  This module is under construction, not ready for use, and the documentation
-  is incomplete.
+   This module is under construction, not ready for use, and the documentation
+   is incomplete.
 
 pw_i2c contains interfaces and utility functions for using I2C.
 
@@ -15,13 +14,9 @@
 
 pw::i2c::Initiator
 ------------------
-.. inclusive-language: disable
-
 .. doxygenclass:: pw::i2c::Initiator
    :members:
 
-.. inclusive-language: enable
-
 pw::i2c::Device
 ---------------
 The common interface for interfacing with generic I2C devices. This object
@@ -30,7 +25,6 @@
 with devices with a single device address.
 
 .. note::
-
    ``Device`` is intended to represent ownership of a specific responder.
    Individual transactions are atomic (as described under ``Initiator``), but
    there is no synchronization for sequences of transactions. Therefore, shared
@@ -103,7 +97,7 @@
 -------------------
 Using the pigweed console, you can invoke the service to perform an I2C read:
 
-.. code:: python
+.. code-block:: python
 
   device.rpcs.pw.i2c.I2c.I2cRead(bus_index=0, target_address=0x22, register_address=b'\x0e', read_size=1)
 
@@ -112,14 +106,14 @@
 
 For responders that support 4 byte register width, you can specify as:
 
-.. code:: python
+.. code-block:: python
 
   device.rpcs.pw.i2c.I2c.I2cRead(bus_index=0, target_address=<address>, register_address=b'\x00\x00\x00\x00', read_size=4)
 
 
 And similarly, for performing I2C write:
 
-.. code:: python
+.. code-block:: python
 
   device.rpcs.pw.i2c.I2c.I2cWrite(bus_index=0, target_address=0x22,register_address=b'\x0e', value=b'\xbc')
 
diff --git a/pw_i2c/i2c_service.cc b/pw_i2c/i2c_service.cc
index 5d6da05..0dad619 100644
--- a/pw_i2c/i2c_service.cc
+++ b/pw_i2c/i2c_service.cc
@@ -40,12 +40,9 @@
     return;
   }
 
-  // Get the underlying buffer size of the register_address and value fields.
-  pwpb::I2cWriteRequest::Message size_message;
-  // NOLINTNEXTLINE(readability-static-accessed-through-instance)
   constexpr auto kMaxWriteSize =
-      size_message.register_address.max_size() + size_message.value.max_size();
-
+      pwpb::I2cWriteRequest::kRegisterAddressMaxSize +
+      pwpb::I2cWriteRequest::kValueMaxSize;
   Vector<std::byte, kMaxWriteSize> write_buffer{};
   write_buffer.assign(std::begin(request.register_address),
                       std::end(request.register_address));
@@ -62,10 +59,7 @@
 void I2cService::I2cRead(
     const pwpb::I2cReadRequest::Message& request,
     rpc::PwpbUnaryResponder<pwpb::I2cReadResponse::Message>& responder) {
-  // Get the underlying buffer size of the ReadResponse message.
-  pwpb::I2cReadResponse::Message size_message;
-  // NOLINTNEXTLINE(readability-static-accessed-through-instance)
-  constexpr auto kMaxReadSize = size_message.value.max_size();
+  constexpr auto kMaxReadSize = pwpb::I2cReadResponse::kValueMaxSize;
 
   Initiator* initiator = initiator_selector_(request.bus_index);
   if (initiator == nullptr || request.read_size > kMaxReadSize) {
diff --git a/pw_i2c/i2c_service_test.cc b/pw_i2c/i2c_service_test.cc
index 7f70dda..7f069da 100644
--- a/pw_i2c/i2c_service_test.cc
+++ b/pw_i2c/i2c_service_test.cc
@@ -170,8 +170,7 @@
 TEST(I2cServiceTest, I2cReadMaxByteOk) {
   constexpr auto kExpectWrite = bytes::Array<0x02, 0x04, 0x06, 0x08>();
   constexpr auto kExpectRead = bytes::Array<0x03, 0x05, 0x07, 0x09>();
-  pwpb::I2cReadResponse::Message size_message;
-  static_assert(sizeof(kExpectRead) <= size_message.value.max_size());
+  static_assert(sizeof(kExpectRead) <= pwpb::I2cReadResponse::kValueMaxSize);
 
   Vector<std::byte, 4> register_addr{};
   std::copy(kExpectWrite.begin(),
diff --git a/pw_i2c_linux/docs.rst b/pw_i2c_linux/docs.rst
index 34cbf79..9659d9b 100644
--- a/pw_i2c_linux/docs.rst
+++ b/pw_i2c_linux/docs.rst
@@ -18,7 +18,7 @@
 ========
 A simple example illustrating the usage:
 
-.. code:: C++
+.. code-block:: C++
 
    #include "pw_i2c/address.h"
    #include "pw_i2c/device.h"
@@ -41,7 +41,7 @@
 In real-world use cases, you may want to create an initiator singleton. This
 can be done by initializing a function-local static variable with a lambda:
 
-.. code:: C++
+.. code-block:: C++
 
    #include <functional>
 
@@ -81,4 +81,4 @@
 =======
 Only 7-bit addresses are supported right now, but it should be possible to add
 support for 10-bit addresses with minimal changes - as long as the Linux driver
-supports 10-bit addresses.
\ No newline at end of file
+supports 10-bit addresses.
diff --git a/pw_i2c_mcuxpresso/docs.rst b/pw_i2c_mcuxpresso/docs.rst
index 1247cd3..2211258 100644
--- a/pw_i2c_mcuxpresso/docs.rst
+++ b/pw_i2c_mcuxpresso/docs.rst
@@ -14,15 +14,15 @@
 =====
 This module requires following setup:
 
- 1. Use ``pw_build_mcuxpresso`` to create a ``pw_source_set`` for an
-    MCUXpresso SDK.
- 2. Include the i2c driver component in this SDK definition.
- 3. Specify the ``pw_third_party_mcuxpresso_SDK`` GN global variable to specify
-    the name of this source set.
- 4. Use ``pw::i2c::McuxpressoInitiator`` implementation of
-    ``pw::i2c::Initiator`` while creating ``pw::i2c::Device`` or
-    ``pw::i2c::RegisterDevice`` interface to access the I2C devices connected to
-    target.
+1. Use ``pw_build_mcuxpresso`` to create a ``pw_source_set`` for an
+   MCUXpresso SDK.
+2. Include the i2c driver component in this SDK definition.
+3. Specify the ``pw_third_party_mcuxpresso_SDK`` GN global variable to specify
+   the name of this source set.
+4. Use ``pw::i2c::McuxpressoInitiator`` implementation of
+   ``pw::i2c::Initiator`` while creating ``pw::i2c::Device`` or
+   ``pw::i2c::RegisterDevice`` interface to access the I2C devices connected to
+   target.
 
 Usage
 =====
diff --git a/pw_ide/py/pw_ide/cli.py b/pw_ide/py/pw_ide/cli.py
index ebd5ce6..999de30 100644
--- a/pw_ide/py/pw_ide/cli.py
+++ b/pw_ide/py/pw_ide/cli.py
@@ -334,6 +334,11 @@
         action='store_true',
         help='print the path to the Pigweed Python virtual environment',
     )
+    parser_python.add_argument(
+        '--install-editable',
+        metavar='MODULE',
+        help='install a Pigweed Python module in editable mode',
+    )
 
     parser_vscode = add_parser(cmd_vscode, 'vscode')
     parser_vscode.add_argument(
diff --git a/pw_ide/py/pw_ide/commands.py b/pw_ide/py/pw_ide/commands.py
index 0c0e427..0877286 100644
--- a/pw_ide/py/pw_ide/commands.py
+++ b/pw_ide/py/pw_ide/commands.py
@@ -16,6 +16,7 @@
 import logging
 from pathlib import Path
 import shlex
+import shutil
 import subprocess
 import sys
 from typing import cast, Dict, List, Optional, Set, Tuple
@@ -714,8 +715,51 @@
         )
 
 
+def install_py_module_as_editable(
+    module_name: str,
+    reporter: StatusReporter,
+) -> None:
+    """Install a Pigweed Python module in editable mode."""
+    reporter.info(f'Installing {module_name} as an editable module')
+    try:
+        site_packages_path = [
+            path for path in sys.path if 'site-packages' in path
+        ][0]
+    except IndexError:
+        reporter.err(f'Could not find {module_name} in the Python path!')
+        sys.exit(1)
+
+    reporter.info(f'Found {module_name} at: {site_packages_path}')
+    shutil.rmtree(Path(site_packages_path) / module_name)
+
+    try:
+        subprocess.run(
+            [
+                'pip',
+                'install',
+                '--no-deps',
+                '-e',
+                f'{module_name}/py',
+            ],
+            check=True,
+            stdout=subprocess.PIPE,
+        )
+    except subprocess.CalledProcessError:
+        reporter.err(
+            [
+                f'Failed to install {module_name}!',
+                'You may need to re-bootstrap',
+            ]
+        )
+
+    reporter.new('Success!')
+    reporter.wrn('Note that running bootstrap or building will reverse this.')
+
+
 def cmd_python(
-    should_print_venv: bool, reporter: StatusReporter = StatusReporter()
+    should_print_venv: bool,
+    install_editable: Optional[str] = None,
+    reporter: StatusReporter = StatusReporter(),
 ) -> None:
     """Configure Python code intelligence support.
 
@@ -725,11 +769,25 @@
     .. code-block:: bash
 
        pw ide python --venv
+
+    When working on Pigweed's Python modules, it can be convenient to install
+    them in editable mode to instantly realize code changes. You can do this by
+    running:
+
+    .. code-block:: bash
+
+       pw ide python --install-editable pw_{module name}
+
+    Just note that running bootstrap or building will override this.
     """
     # If true, no arguments were provided and we should do the default
     # behavior.
     default = True
 
+    if install_editable is not None:
+        default = False
+        install_py_module_as_editable(install_editable, reporter)
+
     if should_print_venv or default:
         reporter.info(
             [
diff --git a/pw_ide/vscode/package.json b/pw_ide/vscode/package.json
index 3528cae..b72cf78 100644
--- a/pw_ide/vscode/package.json
+++ b/pw_ide/vscode/package.json
@@ -4,7 +4,7 @@
   "description": "IDE features for Pigweed projects",
   "version": "0.0.1",
   "engines": {
-    "vscode": "^1.79.0"
+    "vscode": "^1.64.0"
   },
   "categories": [
     "Other"
@@ -28,7 +28,7 @@
     "test": "node ./out/test/runTest.js"
   },
   "devDependencies": {
-    "@types/vscode": "^1.79.0",
+    "@types/vscode": "^1.64.0",
     "@types/glob": "^8.1.0",
     "@types/hjson": "2.4.3",
     "@types/mocha": "^10.0.1",
diff --git a/pw_ide/vscode/pigweed-ide-0.0.1.vsix b/pw_ide/vscode/pigweed-ide-0.0.1.vsix
index 084e59a..3d8ab20 100644
--- a/pw_ide/vscode/pigweed-ide-0.0.1.vsix
+++ b/pw_ide/vscode/pigweed-ide-0.0.1.vsix
Binary files differ
diff --git a/pw_kvs/key_value_store.cc b/pw_kvs/key_value_store.cc
index 6dc570c..ae063da 100644
--- a/pw_kvs/key_value_store.cc
+++ b/pw_kvs/key_value_store.cc
@@ -24,7 +24,7 @@
 
 #include "pw_assert/check.h"
 #include "pw_kvs_private/config.h"
-#include "pw_log/shorter.h"
+#include "pw_log/log.h"
 #include "pw_status/try.h"
 
 namespace pw::kvs {
@@ -61,9 +61,10 @@
   error_detected_ = false;
   last_transaction_id_ = 0;
 
-  INF("Initializing key value store");
+  PW_LOG_INFO("Initializing key value store");
   if (partition_.sector_count() > sectors_.max_size()) {
-    ERR("KVS init failed: kMaxUsableSectors (=%u) must be at least as "
+    PW_LOG_ERROR(
+        "KVS init failed: kMaxUsableSectors (=%u) must be at least as "
         "large as the number of sectors in the flash partition (=%u)",
         unsigned(sectors_.max_size()),
         unsigned(partition_.sector_count()));
@@ -71,7 +72,8 @@
   }
 
   if (partition_.sector_count() < 2) {
-    ERR("KVS init failed: FlashParition sector count (=%u) must be at 2. KVS "
+    PW_LOG_ERROR(
+        "KVS init failed: FlashParition sector count (=%u) must be at 2. KVS "
         "requires at least 1 working sector + 1 free/reserved sector",
         unsigned(partition_.sector_count()));
     return Status::FailedPrecondition();
@@ -82,7 +84,8 @@
   // TODO(davidrogers): investigate doing this as a static assert/compile-time
   // check.
   if (sector_size_bytes > SectorDescriptor::max_sector_size()) {
-    ERR("KVS init failed: sector_size_bytes (=%u) is greater than maximum "
+    PW_LOG_ERROR(
+        "KVS init failed: sector_size_bytes (=%u) is greater than maximum "
         "allowed sector size (=%u)",
         unsigned(sector_size_bytes),
         unsigned(SectorDescriptor::max_sector_size()));
@@ -105,22 +108,24 @@
         if (metadata_result.IsOutOfRange()) {
           internal_stats_.missing_redundant_entries_recovered =
               pre_fix_redundancy_errors;
-          INF("KVS init: Redundancy level successfully updated");
+          PW_LOG_INFO("KVS init: Redundancy level successfully updated");
         } else {
-          WRN("KVS init: Corruption detected and fully repaired");
+          PW_LOG_WARN("KVS init: Corruption detected and fully repaired");
         }
         initialized_ = InitializationState::kReady;
       } else if (recovery_status.IsResourceExhausted()) {
-        WRN("KVS init: Unable to maintain required free sector");
+        PW_LOG_WARN("KVS init: Unable to maintain required free sector");
       } else {
-        WRN("KVS init: Corruption detected and unable repair");
+        PW_LOG_WARN("KVS init: Corruption detected and unable repair");
       }
     } else {
-      WRN("KVS init: Corruption detected, no repair attempted due to options");
+      PW_LOG_WARN(
+          "KVS init: Corruption detected, no repair attempted due to options");
     }
   }
 
-  INF("KeyValueStore init complete: active keys %u, deleted keys %u, sectors "
+  PW_LOG_INFO(
+      "KeyValueStore init complete: active keys %u, deleted keys %u, sectors "
       "%u, logical sector size %u bytes",
       unsigned(size()),
       unsigned(entry_cache_.total_entries() - size()),
@@ -129,7 +134,8 @@
 
   // Report any corruption was not repaired.
   if (error_detected_) {
-    WRN("KVS init: Corruption found but not repaired, KVS unavailable until "
+    PW_LOG_WARN(
+        "KVS init: Corruption found but not repaired, KVS unavailable until "
         "successful maintenance.");
     return Status::DataLoss();
   }
@@ -143,7 +149,7 @@
   sectors_.Reset();
   entry_cache_.Reset();
 
-  DBG("First pass: Read all entries from all sectors");
+  PW_LOG_DEBUG("First pass: Read all entries from all sectors");
   Address sector_address = 0;
 
   size_t total_corrupt_bytes = 0;
@@ -157,20 +163,21 @@
     size_t sector_corrupt_bytes = 0;
 
     for (int num_entries_in_sector = 0; true; num_entries_in_sector++) {
-      DBG("Load entry: sector=%u, entry#=%d, address=%u",
-          unsigned(sector_address),
-          num_entries_in_sector,
-          unsigned(entry_address));
+      PW_LOG_DEBUG("Load entry: sector=%u, entry#=%d, address=%u",
+                   unsigned(sector_address),
+                   num_entries_in_sector,
+                   unsigned(entry_address));
 
       if (!sectors_.AddressInSector(sector, entry_address)) {
-        DBG("Fell off end of sector; moving to the next sector");
+        PW_LOG_DEBUG("Fell off end of sector; moving to the next sector");
         break;
       }
 
       Address next_entry_address;
       Status status = LoadEntry(entry_address, &next_entry_address);
       if (status.IsNotFound()) {
-        DBG("Hit un-written data in sector; moving to the next sector");
+        PW_LOG_DEBUG(
+            "Hit un-written data in sector; moving to the next sector");
         break;
       } else if (!status.ok()) {
         // The entry could not be read, indicating likely data corruption within
@@ -211,9 +218,9 @@
       sector.mark_corrupt();
       error_detected_ = true;
 
-      WRN("Sector %u contains %uB of corrupt data",
-          sectors_.Index(sector),
-          unsigned(sector_corrupt_bytes));
+      PW_LOG_WARN("Sector %u contains %uB of corrupt data",
+                  sectors_.Index(sector),
+                  unsigned(sector_corrupt_bytes));
     }
 
     if (sector.Empty(sector_size_bytes)) {
@@ -223,7 +230,7 @@
     total_corrupt_bytes += sector_corrupt_bytes;
   }
 
-  DBG("Second pass: Count valid bytes in each sector");
+  PW_LOG_DEBUG("Second pass: Count valid bytes in each sector");
   Address newest_key = 0;
 
   // For every valid entry, for each address, count the valid bytes in that
@@ -232,10 +239,10 @@
   // initializing last_new_sector_.
   for (EntryMetadata& metadata : entry_cache_) {
     if (metadata.addresses().size() < redundancy()) {
-      DBG("Key 0x%08x missing copies, has %u, needs %u",
-          unsigned(metadata.hash()),
-          unsigned(metadata.addresses().size()),
-          unsigned(redundancy()));
+      PW_LOG_DEBUG("Key 0x%08x missing copies, has %u, needs %u",
+                   unsigned(metadata.hash()),
+                   unsigned(metadata.addresses().size()),
+                   unsigned(redundancy()));
       entry_copies_missing++;
     }
     size_t index = 0;
@@ -272,7 +279,7 @@
   sectors_.set_last_new_sector(newest_key);
 
   if (!empty_sector_found) {
-    DBG("No empty sector found");
+    PW_LOG_DEBUG("No empty sector found");
     error_detected_ = true;
   }
 
@@ -281,14 +288,16 @@
     error_detected_ = true;
 
     if (!other_errors && entry_copies_missing == entry_cache_.total_entries()) {
-      INF("KVS configuration changed to redundancy of %u total copies per key",
+      PW_LOG_INFO(
+          "KVS configuration changed to redundancy of %u total copies per key",
           unsigned(redundancy()));
       return Status::OutOfRange();
     }
   }
 
   if (error_detected_) {
-    WRN("Corruption detected. Found %u corrupt bytes, %u corrupt entries, "
+    PW_LOG_WARN(
+        "Corruption detected. Found %u corrupt bytes, %u corrupt entries, "
         "and %u keys missing redundant copies.",
         unsigned(total_corrupt_bytes),
         unsigned(corrupt_entries),
@@ -374,9 +383,9 @@
 Status KeyValueStore::ScanForEntry(const SectorDescriptor& sector,
                                    Address start_address,
                                    Address* next_entry_address) {
-  DBG("Scanning sector %u for entries starting from address %u",
-      sectors_.Index(sector),
-      unsigned(start_address));
+  PW_LOG_DEBUG("Scanning sector %u for entries starting from address %u",
+               sectors_.Index(sector),
+               unsigned(start_address));
 
   // Entries must start at addresses which are aligned on a multiple of
   // Entry::kMinAlignmentBytes. However, that multiple can vary between entries.
@@ -392,7 +401,7 @@
       continue;
     }
     if (formats_.KnownMagic(magic)) {
-      DBG("Found entry magic at address %u", unsigned(address));
+      PW_LOG_DEBUG("Found entry magic at address %u", unsigned(address));
       *next_entry_address = address;
       return OkStatus();
     }
@@ -448,14 +457,14 @@
 
 Status KeyValueStore::PutBytes(Key key, span<const byte> value) {
   PW_TRY(CheckWriteOperation(key));
-  DBG("Writing key/value; key length=%u, value length=%u",
-      unsigned(key.size()),
-      unsigned(value.size()));
+  PW_LOG_DEBUG("Writing key/value; key length=%u, value length=%u",
+               unsigned(key.size()),
+               unsigned(value.size()));
 
   if (Entry::size(partition_, key, value) > partition_.sector_size_bytes()) {
-    DBG("%u B value with %u B key cannot fit in one sector",
-        unsigned(value.size()),
-        unsigned(key.size()));
+    PW_LOG_DEBUG("%u B value with %u B key cannot fit in one sector",
+                 unsigned(value.size()),
+                 unsigned(key.size()));
     return Status::InvalidArgument();
   }
 
@@ -464,10 +473,10 @@
 
   if (status.ok()) {
     // TODO(davidrogers): figure out logging how to support multiple addresses.
-    DBG("Overwriting entry for key 0x%08x in %u sectors including %u",
-        unsigned(metadata.hash()),
-        unsigned(metadata.addresses().size()),
-        sectors_.Index(metadata.first_address()));
+    PW_LOG_DEBUG("Overwriting entry for key 0x%08x in %u sectors including %u",
+                 unsigned(metadata.hash()),
+                 unsigned(metadata.addresses().size()),
+                 sectors_.Index(metadata.first_address()));
     return WriteEntryForExistingKey(metadata, EntryState::kValid, key, value);
   }
 
@@ -485,10 +494,10 @@
   PW_TRY(FindExisting(key, &metadata));
 
   // TODO(davidrogers): figure out logging how to support multiple addresses.
-  DBG("Writing tombstone for key 0x%08x in %u sectors including %u",
-      unsigned(metadata.hash()),
-      unsigned(metadata.addresses().size()),
-      sectors_.Index(metadata.first_address()));
+  PW_LOG_DEBUG("Writing tombstone for key 0x%08x in %u sectors including %u",
+               unsigned(metadata.hash()),
+               unsigned(metadata.addresses().size()),
+               sectors_.Index(metadata.first_address()));
   return WriteEntryForExistingKey(metadata, EntryState::kDeleted, key, {});
 }
 
@@ -544,7 +553,7 @@
     sectors_.FromAddress(address).mark_corrupt();
   }
 
-  ERR("No valid entries for key. Data has been lost!");
+  PW_LOG_ERROR("No valid entries for key. Data has been lost!");
   return read_result;
 }
 
@@ -612,9 +621,9 @@
   PW_TRY_ASSIGN(const size_t actual_size, ValueSize(metadata));
 
   if (actual_size != size_bytes) {
-    DBG("Requested %u B read, but value is %u B",
-        unsigned(size_bytes),
-        unsigned(actual_size));
+    PW_LOG_DEBUG("Requested %u B read, but value is %u B",
+                 unsigned(size_bytes),
+                 unsigned(actual_size));
     return Status::InvalidArgument();
   }
 
@@ -677,14 +686,16 @@
       entry_cache_.full()) {
     Status maintenance_status = HeavyMaintenance();
     if (!maintenance_status.ok()) {
-      WRN("KVS Maintenance failed for write: %s", maintenance_status.str());
+      PW_LOG_WARN("KVS Maintenance failed for write: %s",
+                  maintenance_status.str());
       return maintenance_status;
     }
   }
 #endif  // PW_KVS_REMOVE_DELETED_KEYS_IN_HEAVY_MAINTENANCE
 
   if (entry_cache_.full()) {
-    WRN("KVS full: trying to store a new entry, but can't. Have %u entries",
+    PW_LOG_WARN(
+        "KVS full: trying to store a new entry, but can't. Have %u entries",
         unsigned(entry_cache_.total_entries()));
     return Status::ResourceExhausted();
   }
@@ -706,8 +717,8 @@
       prior_entry->ValueMatches(value).ok()) {
     // The new value matches the prior value, don't need to write anything. Just
     // keep the existing entry.
-    DBG("Write for key 0x%08x with matching value skipped",
-        unsigned(prior_metadata->hash()));
+    PW_LOG_DEBUG("Write for key 0x%08x with matching value skipped",
+                 unsigned(prior_metadata->hash()));
     return OkStatus();
   }
 
@@ -773,9 +784,9 @@
     PW_TRY(GetSectorForWrite(&sector, write_size, span(write_addresses, i)));
     write_addresses[i] = sectors_.NextWritableAddress(*sector);
 
-    DBG("Found space for entry in sector %u at address %u",
-        sectors_.Index(sector),
-        unsigned(write_addresses[i]));
+    PW_LOG_DEBUG("Found space for entry in sector %u at address %u",
+                 sectors_.Index(sector),
+                 unsigned(write_addresses[i]));
   }
 
   return OkStatus();
@@ -820,13 +831,13 @@
     // moving entries for keys other than the one being worked on in to sectors
     // that have copies of the key trying to be written.
     if (gc_sector_count > (partition_.sector_count() + 2)) {
-      ERR("Did more GC sectors than total sectors!!!!");
+      PW_LOG_ERROR("Did more GC sectors than total sectors!!!!");
       return Status::ResourceExhausted();
     }
   }
 
   if (!result.ok()) {
-    WRN("Unable to find sector to write %u B", unsigned(entry_size));
+    PW_LOG_WARN("Unable to find sector to write %u B", unsigned(entry_size));
   }
   return result;
 }
@@ -834,7 +845,7 @@
 Status KeyValueStore::MarkSectorCorruptIfNotOk(Status status,
                                                SectorDescriptor* sector) {
   if (!status.ok()) {
-    DBG("  Sector %u corrupt", sectors_.Index(sector));
+    PW_LOG_DEBUG("  Sector %u corrupt", sectors_.Index(sector));
     sector->mark_corrupt();
     error_detected_ = true;
   }
@@ -849,10 +860,10 @@
   SectorDescriptor& sector = sectors_.FromAddress(entry.address());
 
   if (!result.ok()) {
-    ERR("Failed to write %u bytes at %#x. %u actually written",
-        unsigned(entry.size()),
-        unsigned(entry.address()),
-        unsigned(result.size()));
+    PW_LOG_ERROR("Failed to write %u bytes at %#x. %u actually written",
+                 unsigned(entry.size()),
+                 unsigned(entry.address()),
+                 unsigned(result.size()));
     PW_TRY(MarkSectorCorruptIfNotOk(result.status(), &sector));
   }
 
@@ -923,7 +934,7 @@
 
   // Full maintenance can be a potentially heavy operation, and should be
   // relatively infrequent, so log start/end at INFO level.
-  INF("Beginning full maintenance");
+  PW_LOG_INFO("Beginning full maintenance");
   CheckForErrors();
 
   // Step 1: Repair errors
@@ -936,7 +947,7 @@
   Status overall_status = update_status.status();
 
   if (!overall_status.ok()) {
-    ERR("Failed to update all entries to the primary format");
+    PW_LOG_ERROR("Failed to update all entries to the primary format");
   }
 
   SectorDescriptor* sector = sectors_.last_new();
@@ -965,7 +976,7 @@
           (force_gc || sector->valid_bytes() == 0)) {
         gc_status = GarbageCollectSector(*sector, {});
         if (!gc_status.ok()) {
-          ERR("Failed to garbage collect all sectors");
+          PW_LOG_ERROR("Failed to garbage collect all sectors");
           break;
         }
       }
@@ -999,9 +1010,9 @@
 #endif  // PW_KVS_REMOVE_DELETED_KEYS_IN_HEAVY_MAINTENANCE
 
   if (overall_status.ok()) {
-    INF("Full maintenance complete");
+    PW_LOG_INFO("Full maintenance complete");
   } else {
-    ERR("Full maintenance finished with some errors");
+    PW_LOG_ERROR("Full maintenance finished with some errors");
   }
   return overall_status;
 }
@@ -1020,9 +1031,9 @@
 }
 
 Status KeyValueStore::GarbageCollect(span<const Address> reserved_addresses) {
-  DBG("Garbage Collect a single sector");
+  PW_LOG_DEBUG("Garbage Collect a single sector");
   for ([[maybe_unused]] Address address : reserved_addresses) {
-    DBG("   Avoid address %u", unsigned(address));
+    PW_LOG_DEBUG("   Avoid address %u", unsigned(address));
   }
 
   // Step 1: Find the sector to garbage collect
@@ -1044,9 +1055,9 @@
     span<const Address> reserved_addresses) {
   for (FlashPartition::Address& address : metadata.addresses()) {
     if (sectors_.AddressInSector(sector_to_gc, address)) {
-      DBG("  Relocate entry for Key 0x%08" PRIx32 ", sector %u",
-          metadata.hash(),
-          sectors_.Index(sectors_.FromAddress(address)));
+      PW_LOG_DEBUG("  Relocate entry for Key 0x%08" PRIx32 ", sector %u",
+                   metadata.hash(),
+                   sectors_.Index(sectors_.FromAddress(address)));
       PW_TRY(RelocateEntry(metadata, address, reserved_addresses));
     }
   }
@@ -1056,7 +1067,7 @@
 
 Status KeyValueStore::GarbageCollectSector(
     SectorDescriptor& sector_to_gc, span<const Address> reserved_addresses) {
-  DBG("  Garbage Collect sector %u", sectors_.Index(sector_to_gc));
+  PW_LOG_DEBUG("  Garbage Collect sector %u", sectors_.Index(sector_to_gc));
 
   // Step 1: Move any valid entries in the GC sector to other sectors
   if (sector_to_gc.valid_bytes() != 0) {
@@ -1067,7 +1078,8 @@
   }
 
   if (sector_to_gc.valid_bytes() != 0) {
-    ERR("  Failed to relocate valid entries from sector being garbage "
+    PW_LOG_ERROR(
+        "  Failed to relocate valid entries from sector being garbage "
         "collected, %u valid bytes remain",
         unsigned(sector_to_gc.valid_bytes()));
     return Status::Internal();
@@ -1081,7 +1093,8 @@
     sector_to_gc.set_writable_bytes(partition_.sector_size_bytes());
   }
 
-  DBG("  Garbage Collect sector %u complete", sectors_.Index(sector_to_gc));
+  PW_LOG_DEBUG("  Garbage Collect sector %u complete",
+               sectors_.Index(sector_to_gc));
   return OkStatus();
 }
 
@@ -1095,7 +1108,8 @@
       continue;
     }
 
-    DBG("Updating entry 0x%08x from old format [0x%08x] to new format "
+    PW_LOG_DEBUG(
+        "Updating entry 0x%08x from old format [0x%08x] to new format "
         "[0x%08x]",
         unsigned(prior_metadata.hash()),
         unsigned(entry.magic()),
@@ -1171,10 +1185,11 @@
       repair_status = OkStatus();
     }
 
-    DBG("   Pass %u", unsigned(loop_count));
+    PW_LOG_DEBUG("   Pass %u", unsigned(loop_count));
     for (SectorDescriptor& sector : sectors_) {
       if (sector.corrupt()) {
-        DBG("   Found sector %u with corruption", sectors_.Index(sector));
+        PW_LOG_DEBUG("   Found sector %u with corruption",
+                     sectors_.Index(sector));
         Status sector_status = GarbageCollectSector(sector, {});
         if (sector_status.ok()) {
           internal_stats_.corrupt_sectors_recovered += 1;
@@ -1183,7 +1198,7 @@
         }
       }
     }
-    DBG("   Pass %u complete", unsigned(loop_count));
+    PW_LOG_DEBUG("   Pass %u complete", unsigned(loop_count));
   } while (!repair_status.ok() && loop_count < 2);
 
   return repair_status;
@@ -1193,19 +1208,19 @@
   Status repair_status = OkStatus();
   bool empty_sector_found = false;
 
-  DBG("   Find empty sector");
+  PW_LOG_DEBUG("   Find empty sector");
   for (SectorDescriptor& sector : sectors_) {
     if (sector.Empty(partition_.sector_size_bytes())) {
       empty_sector_found = true;
-      DBG("   Empty sector found");
+      PW_LOG_DEBUG("   Empty sector found");
       break;
     }
   }
   if (empty_sector_found == false) {
-    DBG("   No empty sector found, attempting to GC a free sector");
+    PW_LOG_DEBUG("   No empty sector found, attempting to GC a free sector");
     Status sector_status = GarbageCollect(span<const Address, 0>());
     if (repair_status.ok() && !sector_status.ok()) {
-      DBG("   Unable to free an empty sector");
+      PW_LOG_DEBUG("   Unable to free an empty sector");
       repair_status = sector_status;
     }
   }
@@ -1217,11 +1232,12 @@
   Status repair_status = OkStatus();
 
   if (redundancy() == 1) {
-    DBG("   Redundancy not in use, nothting to check");
+    PW_LOG_DEBUG("   Redundancy not in use, nothting to check");
     return OkStatus();
   }
 
-  DBG("   Write any needed additional duplicate copies of keys to fulfill %u"
+  PW_LOG_DEBUG(
+      "   Write any needed additional duplicate copies of keys to fulfill %u"
       " redundancy",
       unsigned(redundancy()));
   for (EntryMetadata& metadata : entry_cache_) {
@@ -1229,15 +1245,15 @@
       continue;
     }
 
-    DBG("   Key with %u of %u copies found, adding missing copies",
-        unsigned(metadata.addresses().size()),
-        unsigned(redundancy()));
+    PW_LOG_DEBUG("   Key with %u of %u copies found, adding missing copies",
+                 unsigned(metadata.addresses().size()),
+                 unsigned(redundancy()));
     Status fill_status = AddRedundantEntries(metadata);
     if (fill_status.ok()) {
       internal_stats_.missing_redundant_entries_recovered += 1;
-      DBG("   Key missing copies added");
+      PW_LOG_DEBUG("   Key missing copies added");
     } else {
-      DBG("   Failed to add key missing copies");
+      PW_LOG_DEBUG("   Failed to add key missing copies");
       if (repair_status.ok()) {
         repair_status = fill_status;
       }
@@ -1248,7 +1264,7 @@
 }
 
 Status KeyValueStore::FixErrors() {
-  DBG("Fixing KVS errors");
+  PW_LOG_DEBUG("Fixing KVS errors");
 
   // Step 1: Garbage collect any sectors marked as corrupt.
   Status overall_status = RepairCorruptSectors();
@@ -1279,9 +1295,9 @@
 Status KeyValueStore::Repair() {
   // If errors have been detected, just reinit the KVS metadata. This does a
   // full deep error check and any needed repairs. Then repair any errors.
-  INF("Starting KVS repair");
+  PW_LOG_INFO("Starting KVS repair");
 
-  DBG("Reinitialize KVS metadata");
+  PW_LOG_DEBUG("Reinitialize KVS metadata");
   InitializeMetadata()
       .IgnoreError();  // TODO(b/242598609): Handle Status properly
 
@@ -1320,83 +1336,88 @@
 
 void KeyValueStore::LogDebugInfo() const {
   const size_t sector_size_bytes = partition_.sector_size_bytes();
-  DBG("====================== KEY VALUE STORE DUMP =========================");
-  DBG(" ");
-  DBG("Flash partition:");
-  DBG("  Sector count     = %u", unsigned(partition_.sector_count()));
-  DBG("  Sector max count = %u", unsigned(sectors_.max_size()));
-  DBG("  Sectors in use   = %u", unsigned(sectors_.size()));
-  DBG("  Sector size      = %u", unsigned(sector_size_bytes));
-  DBG("  Total size       = %u", unsigned(partition_.size_bytes()));
-  DBG("  Alignment        = %u", unsigned(partition_.alignment_bytes()));
-  DBG(" ");
-  DBG("Key descriptors:");
-  DBG("  Entry count     = %u", unsigned(entry_cache_.total_entries()));
-  DBG("  Max entry count = %u", unsigned(entry_cache_.max_entries()));
-  DBG(" ");
-  DBG("      #     hash        version    address   address (hex)");
+  PW_LOG_DEBUG(
+      "====================== KEY VALUE STORE DUMP =========================");
+  PW_LOG_DEBUG(" ");
+  PW_LOG_DEBUG("Flash partition:");
+  PW_LOG_DEBUG("  Sector count     = %u", unsigned(partition_.sector_count()));
+  PW_LOG_DEBUG("  Sector max count = %u", unsigned(sectors_.max_size()));
+  PW_LOG_DEBUG("  Sectors in use   = %u", unsigned(sectors_.size()));
+  PW_LOG_DEBUG("  Sector size      = %u", unsigned(sector_size_bytes));
+  PW_LOG_DEBUG("  Total size       = %u", unsigned(partition_.size_bytes()));
+  PW_LOG_DEBUG("  Alignment        = %u",
+               unsigned(partition_.alignment_bytes()));
+  PW_LOG_DEBUG(" ");
+  PW_LOG_DEBUG("Key descriptors:");
+  PW_LOG_DEBUG("  Entry count     = %u",
+               unsigned(entry_cache_.total_entries()));
+  PW_LOG_DEBUG("  Max entry count = %u", unsigned(entry_cache_.max_entries()));
+  PW_LOG_DEBUG(" ");
+  PW_LOG_DEBUG("      #     hash        version    address   address (hex)");
   size_t count = 0;
   for (const EntryMetadata& metadata : entry_cache_) {
-    DBG("   |%3zu: | %8zx  |%8zu  | %8zu | %8zx",
-        count++,
-        size_t(metadata.hash()),
-        size_t(metadata.transaction_id()),
-        size_t(metadata.first_address()),
-        size_t(metadata.first_address()));
+    PW_LOG_DEBUG("   |%3zu: | %8zx  |%8zu  | %8zu | %8zx",
+                 count++,
+                 size_t(metadata.hash()),
+                 size_t(metadata.transaction_id()),
+                 size_t(metadata.first_address()),
+                 size_t(metadata.first_address()));
   }
-  DBG(" ");
+  PW_LOG_DEBUG(" ");
 
-  DBG("Sector descriptors:");
-  DBG("      #     tail free  valid    has_space");
+  PW_LOG_DEBUG("Sector descriptors:");
+  PW_LOG_DEBUG("      #     tail free  valid    has_space");
   for (const SectorDescriptor& sd : sectors_) {
-    DBG("   |%3u: | %8zu  |%8zu  | %s",
-        sectors_.Index(sd),
-        size_t(sd.writable_bytes()),
-        sd.valid_bytes(),
-        sd.writable_bytes() ? "YES" : "");
+    PW_LOG_DEBUG("   |%3u: | %8zu  |%8zu  | %s",
+                 sectors_.Index(sd),
+                 size_t(sd.writable_bytes()),
+                 sd.valid_bytes(),
+                 sd.writable_bytes() ? "YES" : "");
   }
-  DBG(" ");
+  PW_LOG_DEBUG(" ");
 
   // TODO(keir): This should stop logging after some threshold.
   // size_t dumped_bytes = 0;
-  DBG("Sector raw data:");
+  PW_LOG_DEBUG("Sector raw data:");
   for (size_t sector_id = 0; sector_id < sectors_.size(); ++sector_id) {
     // Read sector data. Yes, this will blow the stack on embedded.
     std::array<byte, 500> raw_sector_data;  // TODO!!!
     [[maybe_unused]] StatusWithSize sws =
         partition_.Read(sector_id * sector_size_bytes, raw_sector_data);
-    DBG("Read: %u bytes", unsigned(sws.size()));
+    PW_LOG_DEBUG("Read: %u bytes", unsigned(sws.size()));
 
-    DBG("  base    addr  offs   0  1  2  3  4  5  6  7");
+    PW_LOG_DEBUG("  base    addr  offs   0  1  2  3  4  5  6  7");
     for (size_t i = 0; i < sector_size_bytes; i += 8) {
-      DBG("  %3zu %8zx %5zu | %02x %02x %02x %02x %02x %02x %02x %02x",
-          sector_id,
-          (sector_id * sector_size_bytes) + i,
-          i,
-          static_cast<unsigned int>(raw_sector_data[i + 0]),
-          static_cast<unsigned int>(raw_sector_data[i + 1]),
-          static_cast<unsigned int>(raw_sector_data[i + 2]),
-          static_cast<unsigned int>(raw_sector_data[i + 3]),
-          static_cast<unsigned int>(raw_sector_data[i + 4]),
-          static_cast<unsigned int>(raw_sector_data[i + 5]),
-          static_cast<unsigned int>(raw_sector_data[i + 6]),
-          static_cast<unsigned int>(raw_sector_data[i + 7]));
+      PW_LOG_DEBUG("  %3zu %8zx %5zu | %02x %02x %02x %02x %02x %02x %02x %02x",
+                   sector_id,
+                   (sector_id * sector_size_bytes) + i,
+                   i,
+                   static_cast<unsigned int>(raw_sector_data[i + 0]),
+                   static_cast<unsigned int>(raw_sector_data[i + 1]),
+                   static_cast<unsigned int>(raw_sector_data[i + 2]),
+                   static_cast<unsigned int>(raw_sector_data[i + 3]),
+                   static_cast<unsigned int>(raw_sector_data[i + 4]),
+                   static_cast<unsigned int>(raw_sector_data[i + 5]),
+                   static_cast<unsigned int>(raw_sector_data[i + 6]),
+                   static_cast<unsigned int>(raw_sector_data[i + 7]));
 
       // TODO(keir): Fix exit condition.
       if (i > 128) {
         break;
       }
     }
-    DBG(" ");
+    PW_LOG_DEBUG(" ");
   }
 
-  DBG("////////////////////// KEY VALUE STORE DUMP END /////////////////////");
+  PW_LOG_DEBUG(
+      "////////////////////// KEY VALUE STORE DUMP END /////////////////////");
 }
 
 void KeyValueStore::LogSectors() const {
-  DBG("Sector descriptors: count %u", unsigned(sectors_.size()));
+  PW_LOG_DEBUG("Sector descriptors: count %u", unsigned(sectors_.size()));
   for (auto& sector : sectors_) {
-    DBG("  - Sector %u: valid %u, recoverable %u, free %u",
+    PW_LOG_DEBUG(
+        "  - Sector %u: valid %u, recoverable %u, free %u",
         sectors_.Index(sector),
         unsigned(sector.valid_bytes()),
         unsigned(sector.RecoverableBytes(partition_.sector_size_bytes())),
@@ -1405,13 +1426,14 @@
 }
 
 void KeyValueStore::LogKeyDescriptor() const {
-  DBG("Key descriptors: count %u", unsigned(entry_cache_.total_entries()));
+  PW_LOG_DEBUG("Key descriptors: count %u",
+               unsigned(entry_cache_.total_entries()));
   for (const EntryMetadata& metadata : entry_cache_) {
-    DBG("  - Key: %s, hash %#x, transaction ID %u, first address %#x",
-        metadata.state() == EntryState::kDeleted ? "Deleted" : "Valid",
-        unsigned(metadata.hash()),
-        unsigned(metadata.transaction_id()),
-        unsigned(metadata.first_address()));
+    PW_LOG_DEBUG("  - Key: %s, hash %#x, transaction ID %u, first address %#x",
+                 metadata.state() == EntryState::kDeleted ? "Deleted" : "Valid",
+                 unsigned(metadata.hash()),
+                 unsigned(metadata.transaction_id()),
+                 unsigned(metadata.first_address()));
   }
 }
 
diff --git a/pw_kvs/key_value_store_test.cc b/pw_kvs/key_value_store_test.cc
index 11e4102..d840bd5 100644
--- a/pw_kvs/key_value_store_test.cc
+++ b/pw_kvs/key_value_store_test.cc
@@ -37,7 +37,6 @@
 #include "pw_kvs/internal/entry.h"
 #include "pw_kvs_private/config.h"
 #include "pw_log/log.h"
-#include "pw_log/shorter.h"
 #include "pw_status/status.h"
 #include "pw_string/string_builder.h"
 
@@ -168,11 +167,11 @@
 
   int num_reloads = 2;
   for (int reload = 0; reload < num_reloads; ++reload) {
-    DBG("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
-    DBG("xxx                                      xxxx");
-    DBG("xxx               Reload %2d              xxxx", reload);
-    DBG("xxx                                      xxxx");
-    DBG("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
+    PW_LOG_DEBUG("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
+    PW_LOG_DEBUG("xxx                                      xxxx");
+    PW_LOG_DEBUG("xxx               Reload %2d              xxxx", reload);
+    PW_LOG_DEBUG("xxx                                      xxxx");
+    PW_LOG_DEBUG("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
 
     // Create and initialize the KVS. For KVS magic value always use a random 32
     // bit integer rather than a human readable 4 bytes. See pw_kvs/format.h for
@@ -188,7 +187,8 @@
     uint32_t written_value;
     EXPECT_EQ(kvs.size(), (reload == 0) ? 0 : 1u);
     for (uint32_t i = 0; i < num_writes; ++i) {
-      DBG("PUT #%zu for key %s with value %zu", size_t(i), key, size_t(i));
+      PW_LOG_DEBUG(
+          "PUT #%zu for key %s with value %zu", size_t(i), key, size_t(i));
 
       written_value = i + 0xfc;  // Prevent accidental pass with zero.
       EXPECT_OK(kvs.Put(key, written_value));
@@ -196,7 +196,7 @@
     }
 
     // Verify that we can read the value back.
-    DBG("GET final value for key: %s", key);
+    PW_LOG_DEBUG("GET final value for key: %s", key);
     uint32_t actual_value;
     EXPECT_OK(kvs.Get(key, &actual_value));
     EXPECT_EQ(actual_value, written_value);
@@ -229,7 +229,7 @@
   for (size_t i = 0; i < num_writes; ++i) {
     StringBuffer<150> key;
     key << "key_" << i;
-    DBG("PUT #%zu for key %s with value %zu", i, key.c_str(), i);
+    PW_LOG_DEBUG("PUT #%zu for key %s with value %zu", i, key.c_str(), i);
 
     size_t value = i + 77;  // Prevent accidental pass with zero.
     EXPECT_OK(kvs.Put(key.view(), value));
@@ -253,12 +253,12 @@
 
   // Add one entry.
   const char* key = "Key1";
-  DBG("PUT value for key: %s", key);
+  PW_LOG_DEBUG("PUT value for key: %s", key);
   uint8_t written_value = 0xDA;
   ASSERT_OK(kvs.Put(key, written_value));
   EXPECT_EQ(kvs.size(), 1u);
 
-  DBG("GET value for key: %s", key);
+  PW_LOG_DEBUG("GET value for key: %s", key);
   uint8_t actual_value;
   ASSERT_OK(kvs.Get(key, &actual_value));
   EXPECT_EQ(actual_value, written_value);
@@ -280,12 +280,12 @@
   const char* key = "Key1";
   uint8_t written_value = 0xDA;
   for (int i = 0; i < 50; i++) {
-    DBG("PUT [%d] value for key: %s", i, key);
+    PW_LOG_DEBUG("PUT [%d] value for key: %s", i, key);
     ASSERT_OK(kvs.Put(key, written_value));
     EXPECT_EQ(kvs.size(), 1u);
   }
 
-  DBG("GET value for key: %s", key);
+  PW_LOG_DEBUG("GET value for key: %s", key);
   uint8_t actual_value;
   ASSERT_OK(kvs.Get(key, &actual_value));
   EXPECT_EQ(actual_value, written_value);
diff --git a/pw_kvs/sectors.cc b/pw_kvs/sectors.cc
index 25edcf6..50fe996 100644
--- a/pw_kvs/sectors.cc
+++ b/pw_kvs/sectors.cc
@@ -18,7 +18,7 @@
 #include "pw_kvs/internal/sectors.h"
 
 #include "pw_kvs_private/config.h"
-#include "pw_log/shorter.h"
+#include "pw_log/log.h"
 
 namespace pw::kvs::internal {
 namespace {
@@ -64,12 +64,13 @@
     temp_sectors_to_skip_[sectors_to_skip++] = &FromAddress(address);
   }
 
-  DBG("Find sector with %u bytes available, starting with sector %u, %s",
+  PW_LOG_DEBUG(
+      "Find sector with %u bytes available, starting with sector %u, %s",
       unsigned(size),
       Index(last_new_),
       (find_mode == kAppendEntry) ? "Append" : "GC");
   for (size_t i = 0; i < sectors_to_skip; ++i) {
-    DBG("  Skip sector %u", Index(temp_sectors_to_skip_[i]));
+    PW_LOG_DEBUG("  Skip sector %u", Index(temp_sectors_to_skip_[i]));
   }
 
   // last_new_ is the sector that was last selected as the "new empty sector" to
@@ -136,7 +137,8 @@
   // to keep 1 empty sector after the sector found here, but that rule does not
   // apply during GC.
   if (first_empty_sector != nullptr && at_least_two_empty_sectors) {
-    DBG("  Found a usable empty sector; returning the first found (%u)",
+    PW_LOG_DEBUG(
+        "  Found a usable empty sector; returning the first found (%u)",
         Index(first_empty_sector));
     last_new_ = first_empty_sector;
     *found_sector = first_empty_sector;
@@ -147,14 +149,15 @@
   // bytes
   if (non_empty_least_reclaimable_sector != nullptr) {
     *found_sector = non_empty_least_reclaimable_sector;
-    DBG("  Found a usable sector %u, with %u B recoverable, in GC",
+    PW_LOG_DEBUG(
+        "  Found a usable sector %u, with %u B recoverable, in GC",
         Index(*found_sector),
         unsigned((*found_sector)->RecoverableBytes(sector_size_bytes)));
     return OkStatus();
   }
 
   // No sector was found.
-  DBG("  Unable to find a usable sector");
+  PW_LOG_DEBUG("  Unable to find a usable sector");
   *found_sector = nullptr;
   return Status::ResourceExhausted();
 }
@@ -173,7 +176,7 @@
   // Build a vector of sectors to avoid.
   for (size_t i = 0; i < reserved_addresses.size(); ++i) {
     temp_sectors_to_skip_[i] = &FromAddress(reserved_addresses[i]);
-    DBG("    Skip sector %u", Index(reserved_addresses[i]));
+    PW_LOG_DEBUG("    Skip sector %u", Index(reserved_addresses[i]));
   }
   const span sectors_to_skip(temp_sectors_to_skip_, reserved_addresses.size());
 
@@ -216,17 +219,18 @@
           !Contains(sectors_to_skip, &sector)) {
         sector_candidate = &sector;
         candidate_bytes = sector.valid_bytes();
-        DBG("    Doing GC on sector with no reclaimable bytes!");
+        PW_LOG_DEBUG("    Doing GC on sector with no reclaimable bytes!");
       }
     }
   }
 
   if (sector_candidate != nullptr) {
-    DBG("Found sector %u to Garbage Collect, %u recoverable bytes",
+    PW_LOG_DEBUG(
+        "Found sector %u to Garbage Collect, %u recoverable bytes",
         Index(sector_candidate),
         unsigned(sector_candidate->RecoverableBytes(sector_size_bytes)));
   } else {
-    DBG("Unable to find sector to garbage collect!");
+    PW_LOG_DEBUG("Unable to find sector to garbage collect!");
   }
   return sector_candidate;
 }
diff --git a/pw_libc/BUILD.gn b/pw_libc/BUILD.gn
index 909e658..b927682 100644
--- a/pw_libc/BUILD.gn
+++ b/pw_libc/BUILD.gn
@@ -101,11 +101,35 @@
     no_test_functions = functions
   }
 
+  pw_libc_source_set("stdio") {
+    functions = [ "snprintf" ]
+
+    additional_srcs = [
+      "printf_core/string_writer.cpp",
+      "printf_core/printf_main.cpp",
+      "printf_core/writer.cpp",
+      "printf_core/parser.cpp",
+      "printf_core/converter.cpp",
+    ]
+
+    defines = [
+      "LIBC_COPT_PRINTF_DISABLE_FLOAT",
+      "LIBC_COPT_PRINTF_DISABLE_WRITE_INT",
+      "LIBC_COPT_PRINTF_DISABLE_INDEX_MODE",
+    ]
+
+    # This config includes -Wshadow. On gcc, this warns even for constructor
+    # arguments which shadow members. This is too pedantic and shouldn't be
+    # changed upstream.
+    remove_configs = [ "//pw_build:extra_strict_warnings" ]
+  }
+
   pw_static_library("pw_libc") {
     complete_static_lib = true
     deps = [
       ":ctype",
       ":math",
+      ":stdio",
       ":stdlib",
       ":string",
     ]
@@ -115,6 +139,7 @@
     tests = [
       ":ctype_tests",
       ":math_tests",
+      ":stdio_tests",
       ":stdlib_tests",
       ":string_tests",
     ]
diff --git a/pw_log/Android.bp b/pw_log/Android.bp
index 398523b..73ce109 100644
--- a/pw_log/Android.bp
+++ b/pw_log/Android.bp
@@ -32,12 +32,16 @@
     generated_headers: [
         "google_protobuf_descriptor_pwpb_h",
         "pw_log_log_proto_pwpb_h",
+        "pw_log_log_rpc_pwpb_h",
+        "pw_log_log_raw_rpc_h",
         "pw_protobuf_protos_common_pwpb_h",
         "pw_tokenizer_proto_options_pwpb_h",
     ],
     export_generated_headers: [
         "google_protobuf_descriptor_pwpb_h",
         "pw_log_log_proto_pwpb_h",
+        "pw_log_log_rpc_pwpb_h",
+        "pw_log_log_raw_rpc_h",
         "pw_protobuf_protos_common_pwpb_h",
         "pw_tokenizer_proto_options_pwpb_h",
     ],
@@ -70,10 +74,11 @@
          "--proto-path=external/pigweed/pw_protobuf/ " +
          // Requires the generated pw_tokenizer/proto/options.proto filepath.
          "--proto-path=$$(dirname $$(dirname $$(dirname $(location :pw_tokenizer_proto_options_proto)))) " +
+         "--proto-path=$$(dirname $$(dirname $$(dirname $(location :pw_log_log_proto_with_prefix)))) " +
          "--proto-path=external/protobuf/src/ " +
-         "--out-dir=$$(dirname $(location pw_log/proto/log.pwpb.h)) " +
+         "--out-dir=$(genDir) " +
          "--plugin-path=$(location pw_protobuf_plugin_py) " +
-         "--compile-dir=$$(dirname $(in)) " +
+         "--compile-dir=$(genDir) " +
          "--sources $(in) " +
          "--language pwpb " +
          "--no-experimental-proto3-optional " +
@@ -91,6 +96,64 @@
     ],
 }
 
+genrule {
+    name: "pw_log_log_rpc_pwpb_h",
+    srcs: [":pw_log_log_proto_with_prefix",],
+    cmd: "python3 $(location pw_protobuf_compiler_py) " +
+         "--proto-path=external/pigweed/pw_protobuf/ " +
+         // Requires the generated pw_tokenizer/proto/options.proto filepath.
+         "--proto-path=$$(dirname $$(dirname $$(dirname $(location :pw_tokenizer_proto_options_proto)))) " +
+         "--proto-path=$$(dirname $$(dirname $$(dirname $(location :pw_log_log_proto_with_prefix)))) " +
+         "--proto-path=external/protobuf/src/ " +
+         "--out-dir=$(genDir) " +
+         "--plugin-path=$(location pw_rpc_plugin_pwpb_py) " +
+         "--compile-dir=$(genDir) " +
+         "--sources $(in) " +
+         "--language pwpb_rpc " +
+         "--no-experimental-proto3-optional " +
+         "--protoc=$(location aprotoc) ",
+    out: [
+        "pw_log/proto/log.rpc.pwpb.h",
+    ],
+    tool_files: [
+      ":pw_tokenizer_proto_options_proto",
+    ],
+    tools: [
+        "aprotoc",
+        "pw_protobuf_compiler_py",
+        "pw_rpc_plugin_pwpb_py",
+    ],
+}
+
+genrule {
+    name: "pw_log_log_raw_rpc_h",
+    srcs: [":pw_log_log_proto_with_prefix",],
+    cmd: "python3 $(location pw_protobuf_compiler_py) " +
+         "--proto-path=external/pigweed/pw_protobuf/ " +
+         // Requires the generated pw_tokenizer/proto/options.proto filepath.
+         "--proto-path=$$(dirname $$(dirname $$(dirname $(location :pw_tokenizer_proto_options_proto)))) " +
+         "--proto-path=$$(dirname $$(dirname $$(dirname $(location :pw_log_log_proto_with_prefix)))) " +
+         "--proto-path=external/protobuf/src/ " +
+         "--out-dir=$(genDir) " +
+         "--plugin-path=$(location pw_rpc_plugin_rawpb_py) " +
+         "--compile-dir=$(genDir) " +
+         "--sources $(in) " +
+         "--language raw_rpc " +
+         "--no-experimental-proto3-optional " +
+         "--protoc=$(location aprotoc) ",
+    out: [
+        "pw_log/proto/log.raw_rpc.pb.h",
+    ],
+    tool_files: [
+      ":pw_tokenizer_proto_options_proto",
+    ],
+    tools: [
+        "aprotoc",
+        "pw_protobuf_compiler_py",
+        "pw_rpc_plugin_rawpb_py",
+    ],
+}
+
 android_library {
     name: "pw_log_android_java",
     srcs: ["java/android_main/dev/pigweed/pw_log/*.java"],
diff --git a/pw_log/BUILD.bazel b/pw_log/BUILD.bazel
index 525545f..22e6337 100644
--- a/pw_log/BUILD.bazel
+++ b/pw_log/BUILD.bazel
@@ -18,8 +18,8 @@
     "pw_cc_library",
     "pw_cc_test",
 )
-load("//pw_protobuf_compiler:pw_proto_library.bzl", "pw_proto_library")
 load("//pw_build/bazel_internal:py_proto_library.bzl", "py_proto_library")
+load("//pw_protobuf_compiler:pw_proto_library.bzl", "pw_proto_library")
 
 package(default_visibility = ["//visibility:public"])
 
diff --git a/pw_log/docs.rst b/pw_log/docs.rst
index d59f011..2739937 100644
--- a/pw_log/docs.rst
+++ b/pw_log/docs.rst
@@ -17,9 +17,9 @@
 :ref:`module-pw_log-protobuf` for details.
 
 .. toctree::
-  :hidden:
+   :hidden:
 
-  protobuf
+   protobuf
 
 --------------
 Usage examples
@@ -29,39 +29,44 @@
 
 .. code-block:: cpp
 
-  #define PW_LOG_MODULE_NAME "BLE"
+   #define PW_LOG_MODULE_NAME "BLE"
 
-  #include "pw_log/log.h"
+   #include "pw_log/log.h"
 
-  int main() {
-    PW_LOG_INFO("Booting...");
-    PW_LOG_DEBUG("CPU temp: %.2f", cpu_temperature);
-    if (BootFailed()) {
-      PW_LOG_CRITICAL("Had trouble booting due to error %d", GetErrorCode());
-      ReportErrorsAndHalt();
-    }
-    PW_LOG_INFO("Successfully booted");
-  }
+   int main() {
+     PW_LOG_INFO("Booting...");
+     PW_LOG_DEBUG("CPU temp: %.2f", cpu_temperature);
+     if (BootFailed()) {
+       PW_LOG_CRITICAL("Had trouble booting due to error %d", GetErrorCode());
+       ReportErrorsAndHalt();
+     }
+     PW_LOG_INFO("Successfully booted");
+   }
 
 In ``.cc`` files, it is possible to dispense with the ``PW_`` part of the log
 names and go for shorter log macros. Include ``pw_log/short.h`` or
 ``pw_log/shorter.h`` for shorter versions of the macros.
 
+.. warning::
+   The shorter log macros collide with `Abseil's logging API
+   <https://abseil.io/docs/cpp/guides/logging>`_. Do not use them in upstream
+   Pigweed modules, or any code that may depend on Abseil.
+
 .. code-block:: cpp
 
-  #define PW_LOG_MODULE_NAME "BLE"
+   #define PW_LOG_MODULE_NAME "BLE"
 
-  #include "pw_log/shorter.h"
+   #include "pw_log/shorter.h"
 
-  int main() {
-    INF("Booting...");
-    DBG("CPU temp: %.2f", cpu_temperature);
-    if (BootFailed()) {
-      CRT("Had trouble booting due to error %d", GetErrorCode());
-      ReportErrorsAndHalt();
-    }
-    INF("Successfully booted");
-  }
+   int main() {
+     INF("Booting...");
+     DBG("CPU temp: %.2f", cpu_temperature);
+     if (BootFailed()) {
+       CRT("Had trouble booting due to error %d", GetErrorCode());
+       ReportErrorsAndHalt();
+     }
+     INF("Successfully booted");
+   }
 
 Layer diagram example: ``stm32f429i-disc1``
 ===========================================
@@ -73,6 +78,8 @@
 
 .. image:: example_layer_diagram.svg
 
+.. _module-pw_log-macros:
+
 Logging macros
 ==============
 These are the primary macros for logging information about the functioning of a
@@ -80,45 +87,45 @@
 
 .. c:macro:: PW_LOG(level, module, flags, fmt, ...)
 
-  This is the primary mechanism for logging.
+   This is the primary mechanism for logging.
 
-  *level* - An integer level as defined by ``pw_log/levels.h``.
+   *level* - An integer level as defined by ``pw_log/levels.h``.
 
-  *module* - A string literal for the module name. Defaults to
-  :c:macro:`PW_LOG_MODULE_NAME`.
+   *module* - A string literal for the module name. Defaults to
+   :c:macro:`PW_LOG_MODULE_NAME`.
 
-  *flags* - Arbitrary flags the backend can leverage. The semantics of these
-  flags are not defined in the facade, but are instead meant as a general
-  mechanism for communication bits of information to the logging backend.
-  ``pw_log`` reserves 2 flag bits by default, but log backends may provide for
-  more or fewer flag bits.
+   *flags* - Arbitrary flags the backend can leverage. The semantics of these
+   flags are not defined in the facade, but are instead meant as a general
+   mechanism for communication bits of information to the logging backend.
+   ``pw_log`` reserves 2 flag bits by default, but log backends may provide for
+   more or fewer flag bits.
 
-  Here are some ideas for what a backend might use flags for:
+   Here are some ideas for what a backend might use flags for:
 
-  - Example: ``HAS_PII`` - A log has personally-identifying data
-  - Example: ``HAS_DII`` - A log has device-identifying data
-  - Example: ``RELIABLE_DELIVERY`` - Ask the backend to ensure the log is
-    delivered; this may entail blocking other logs.
-  - Example: ``BEST_EFFORT`` - Don't deliver this log if it would mean blocking
-    or dropping important-flagged logs
+   - Example: ``HAS_PII`` - A log has personally-identifying data
+   - Example: ``HAS_DII`` - A log has device-identifying data
+   - Example: ``RELIABLE_DELIVERY`` - Ask the backend to ensure the log is
+     delivered; this may entail blocking other logs.
+   - Example: ``BEST_EFFORT`` - Don't deliver this log if it would mean blocking
+     or dropping important-flagged logs
 
-  *fmt* - The message to log, which may contain format specifiers like ``%d``
-  or ``%0.2f``.
+   *fmt* - The message to log, which may contain format specifiers like ``%d``
+   or ``%0.2f``.
 
-  Example:
+   Example:
 
-  .. code-block:: cpp
+   .. code-block:: cpp
 
-    PW_LOG(PW_LOG_LEVEL_INFO, PW_LOG_MODULE_NAME, PW_LOG_FLAGS, "Temp is %d degrees", temp);
-    PW_LOG(PW_LOG_LEVEL_ERROR, PW_LOG_MODULE_NAME, UNRELIABLE_DELIVERY, "It didn't work!");
+      PW_LOG(PW_LOG_LEVEL_INFO, PW_LOG_MODULE_NAME, PW_LOG_FLAGS, "Temp is %d degrees", temp);
+      PW_LOG(PW_LOG_LEVEL_ERROR, PW_LOG_MODULE_NAME, UNRELIABLE_DELIVERY, "It didn't work!");
 
-  .. note::
+   .. note::
 
-    ``PW_LOG()`` should not be used frequently; typically only when adding
-    flags to a particular message to mark PII or to indicate delivery
-    guarantees.  For most cases, prefer to use the direct ``PW_LOG_INFO`` or
-    ``PW_LOG_DEBUG`` style macros, which are often implemented more efficiently
-    in the backend.
+      ``PW_LOG()`` should not be used frequently; typically only when adding
+      flags to a particular message to mark PII or to indicate delivery
+      guarantees.  For most cases, prefer to use the direct ``PW_LOG_INFO`` or
+      ``PW_LOG_DEBUG`` style macros, which are often implemented more efficiently
+      in the backend.
 
 
 .. c:macro:: PW_LOG_DEBUG(fmt, ...)
@@ -127,7 +134,7 @@
 .. c:macro:: PW_LOG_ERROR(fmt, ...)
 .. c:macro:: PW_LOG_CRITICAL(fmt, ...)
 
-  Shorthand for ``PW_LOG(<level>, PW_LOG_MODULE_NAME, PW_LOG_FLAGS, fmt, ...)``.
+   Shorthand for ``PW_LOG(<level>, PW_LOG_MODULE_NAME, PW_LOG_FLAGS, fmt, ...)``.
 
 --------------------
 Module configuration
@@ -139,22 +146,22 @@
 
 .. c:macro:: PW_LOG_LEVEL_DEFAULT
 
-  Controls the default value of ``PW_LOG_LEVEL``. Setting
-  ``PW_LOG_LEVEL_DEFAULT`` will change the behavior of all source files that
-  have not explicitly set ``PW_LOG_LEVEL``. Defaults to ``PW_LOG_LEVEL_DEBUG``.
+   Controls the default value of ``PW_LOG_LEVEL``. Setting
+   ``PW_LOG_LEVEL_DEFAULT`` will change the behavior of all source files that
+   have not explicitly set ``PW_LOG_LEVEL``. Defaults to ``PW_LOG_LEVEL_DEBUG``.
 
 .. c:macro:: PW_LOG_FLAGS_DEFAULT
 
-  Controls the default value of ``PW_LOG_FLAGS``. Setting
-  ``PW_LOG_FLAGS_DEFAULT`` will change the behavior of all source files that
-  have not explicitly set ``PW_LOG_FLAGS``. Defaults to ``0``.
+   Controls the default value of ``PW_LOG_FLAGS``. Setting
+   ``PW_LOG_FLAGS_DEFAULT`` will change the behavior of all source files that
+   have not explicitly set ``PW_LOG_FLAGS``. Defaults to ``0``.
 
 .. c:macro:: PW_LOG_ENABLE_IF_DEFAULT
 
-  Controls the default value of ``PW_LOG_ENABLE_IF``. Setting
-  ``PW_LOG_ENABLE_IF_DEFAULT`` will change the behavior of all source files that
-  have not explicitly set ``PW_LOG_ENABLE_IF``. Defaults to
-  ``((level) >= PW_LOG_LEVEL)``.
+   Controls the default value of ``PW_LOG_ENABLE_IF``. Setting
+   ``PW_LOG_ENABLE_IF_DEFAULT`` will change the behavior of all source files that
+   have not explicitly set ``PW_LOG_ENABLE_IF``. Defaults to
+   ``((level) >= PW_LOG_LEVEL)``.
 
 
 Per-source file configuration
@@ -168,38 +175,38 @@
 ``#defines`` before all ``#include`` statements. This should only be done in
 source files, not headers. For example:
 
-  .. code-block:: cpp
+.. code-block:: cpp
 
-    // Set the pw_log option macros here, before ALL of the #includes.
-    #define PW_LOG_MODULE_NAME "Calibration"
-    #define PW_LOG_LEVEL PW_LOG_LEVEL_WARN
+   // Set the pw_log option macros here, before ALL of the #includes.
+   #define PW_LOG_MODULE_NAME "Calibration"
+   #define PW_LOG_LEVEL PW_LOG_LEVEL_WARN
 
-    #include <array>
-    #include <random>
+   #include <array>
+   #include <random>
 
-    #include "devices/hal9000.h"
-    #include "pw_log/log.h"
-    #include "pw_rpc/server.h"
+   #include "devices/hal9000.h"
+   #include "pw_log/log.h"
+   #include "pw_rpc/server.h"
 
-    int MyFunction() {
-      PW_LOG_INFO("hello???");
-    }
+   int MyFunction() {
+     PW_LOG_INFO("hello???");
+   }
 
 .. c:macro:: PW_LOG_MODULE_NAME
 
-  A string literal module name to use in logs. Log backends may attach this
-  name to log messages or use it for runtime filtering. Defaults to ``""``. The
-  ``PW_LOG_MODULE_NAME_DEFINED`` macro is set to ``1`` or ``0`` to indicate
-  whether ``PW_LOG_MODULE_NAME`` was overridden.
+   A string literal module name to use in logs. Log backends may attach this
+   name to log messages or use it for runtime filtering. Defaults to ``""``. The
+   ``PW_LOG_MODULE_NAME_DEFINED`` macro is set to ``1`` or ``0`` to indicate
+   whether ``PW_LOG_MODULE_NAME`` was overridden.
 
 .. c:macro:: PW_LOG_FLAGS
 
-  Log flags to use for the ``PW_LOG_<level>`` macros. Different flags may be
-  applied when using the ``PW_LOG`` macro directly.
+   Log flags to use for the ``PW_LOG_<level>`` macros. Different flags may be
+   applied when using the ``PW_LOG`` macro directly.
 
-  Log backends use flags to change how they handle individual log messages.
-  Potential uses include assigning logs priority or marking them as containing
-  personal information. Defaults to ``PW_LOG_FLAGS_DEFAULT``.
+   Log backends use flags to change how they handle individual log messages.
+   Potential uses include assigning logs priority or marking them as containing
+   personal information. Defaults to ``PW_LOG_FLAGS_DEFAULT``.
 
 .. c:macro:: PW_LOG_LEVEL
 
@@ -211,15 +218,15 @@
 
    .. code-block:: cpp
 
-     #define PW_LOG_LEVEL PW_LOG_LEVEL_INFO
+      #define PW_LOG_LEVEL PW_LOG_LEVEL_INFO
 
-     #include "pw_log/log.h"
+      #include "pw_log/log.h"
 
-     void DoSomething() {
-       PW_LOG_DEBUG("This won't be logged at all");
-       PW_LOG_INFO("This is INFO level, and will display");
-       PW_LOG_WARN("This is above INFO level, and will display");
-     }
+      void DoSomething() {
+        PW_LOG_DEBUG("This won't be logged at all");
+        PW_LOG_INFO("This is INFO level, and will display");
+        PW_LOG_WARN("This is above INFO level, and will display");
+      }
 
 .. c:macro:: PW_LOG_ENABLE_IF(level, flags)
 
@@ -232,37 +239,37 @@
 
    .. code-block:: cpp
 
-     // Pigweed's log facade will call this macro to decide to log or not. In
-     // this case, it will drop logs with the PII flag set if display of PII is
-     // not enabled for the application.
-     #define PW_LOG_ENABLE_IF(level, flags) \
-         (level >= PW_LOG_LEVEL_INFO && \
-          !((flags & MY_PRODUCT_PII_MASK) && MY_PRODUCT_LOG_PII_ENABLED)
+      // Pigweed's log facade will call this macro to decide to log or not. In
+      // this case, it will drop logs with the PII flag set if display of PII is
+      // not enabled for the application.
+      #define PW_LOG_ENABLE_IF(level, flags) \
+          (level >= PW_LOG_LEVEL_INFO && \
+           !((flags & MY_PRODUCT_PII_MASK) && MY_PRODUCT_LOG_PII_ENABLED)
 
-     #include "pw_log/log.h"
+      #include "pw_log/log.h"
 
-     // This define might be supplied by the build system.
-     #define MY_PRODUCT_LOG_PII_ENABLED false
+      // This define might be supplied by the build system.
+      #define MY_PRODUCT_LOG_PII_ENABLED false
 
-     // This is the PII mask bit selected by the application.
-     #define MY_PRODUCT_PII_MASK (1 << 5)
+      // This is the PII mask bit selected by the application.
+      #define MY_PRODUCT_PII_MASK (1 << 5)
 
-     void DoSomethingWithSensitiveInfo() {
-       PW_LOG_DEBUG("This won't be logged at all");
-       PW_LOG_INFO("This is INFO level, and will display");
+      void DoSomethingWithSensitiveInfo() {
+        PW_LOG_DEBUG("This won't be logged at all");
+        PW_LOG_INFO("This is INFO level, and will display");
 
-       // In this example, this will not be logged since logging with PII
-       // is disabled by the above macros.
-       PW_LOG(PW_LOG_LEVEL_INFO,
-              MY_PRODUCT_PII_MASK,
-              "Sensitive: %d",
-              sensitive_info);
-     }
+        // In this example, this will not be logged since logging with PII
+        // is disabled by the above macros.
+        PW_LOG(PW_LOG_LEVEL_INFO,
+               MY_PRODUCT_PII_MASK,
+               "Sensitive: %d",
+               sensitive_info);
+      }
 
 .. attention::
 
-  At this time, only compile time filtering is supported. In the future, we
-  plan to add support for runtime filtering.
+   At this time, only compile time filtering is supported. In the future, we
+   plan to add support for runtime filtering.
 
 ------------------
 Logging attributes
@@ -329,15 +336,15 @@
 
 .. c:macro:: PW_LOG_CFG_GLOG_BUFFER_SIZE_BYTES
 
-  The size of the stack-allocated buffer used by the Google Logging (glog)
-  macros. This only affects the glog macros provided through pw_log/glog.h.
+   The size of the stack-allocated buffer used by the Google Logging (glog)
+   macros. This only affects the glog macros provided through pw_log/glog.h.
 
-  Pigweed strongly recommends sticking to printf-style logging instead
-  of C++ stream-style Google Log logging unless absolutely necessary. The glog
-  macros are only provided for compatibility with non-embedded code. See
-  :ref:`module-pw_log-design-discussion` for more details.
+   Pigweed strongly recommends sticking to printf-style logging instead
+   of C++ stream-style Google Log logging unless absolutely necessary. The glog
+   macros are only provided for compatibility with non-embedded code. See
+   :ref:`module-pw_log-design-discussion` for more details.
 
-  Undersizing this buffer will result in truncated log messages.
+   Undersizing this buffer will result in truncated log messages.
 
 -----------------
 Design discussion
@@ -356,14 +363,14 @@
 
 .. code-block:: cpp
 
-  LOG(INFO) << "My temperature is " << temperature << ". State: " << state;
+   LOG(INFO) << "My temperature is " << temperature << ". State: " << state;
 
 This log statement has two string literals. It might seem like one could convert
 move to tokenization:
 
 .. code-block:: cpp
 
-  LOG(INFO) << TOKEN("My temperature is ") << temperature << TOKEN(". State: ") << state;
+   LOG(INFO) << TOKEN("My temperature is ") << temperature << TOKEN(". State: ") << state;
 
 However, this doesn't work. The key problem is that the tokenization system
 needs to allocate the string in a linker section that is excluded from the
@@ -378,8 +385,8 @@
 
 .. code-block:: cpp
 
-  // Note: LOG_INFO can be tokenized behind the macro; transparent to users.
-  PW_LOG_INFO("My temperature is %d. State: %s", temperature, state);
+   // Note: LOG_INFO can be tokenized behind the macro; transparent to users.
+   PW_LOG_INFO("My temperature is %d. State: %s", temperature, state);
 
 Additionally, while Pigweed is mostly C++, it a practical reality that at times
 projects using Pigweed will need to log from third-party libraries written in
diff --git a/pw_log_string/BUILD.gn b/pw_log_string/BUILD.gn
index 68c3718..c8d65f5 100644
--- a/pw_log_string/BUILD.gn
+++ b/pw_log_string/BUILD.gn
@@ -37,7 +37,10 @@
 pw_source_set("config") {
   public = [ "public/pw_log_string/config.h" ]
   public_configs = [ ":public_include_path" ]
-  public_deps = [ pw_log_string_CONFIG ]
+  public_deps = [
+    "$dir_pw_preprocessor",
+    pw_log_string_CONFIG,
+  ]
 }
 
 # This source set only provides pw_log's backend interface by invoking the
diff --git a/pw_log_string/CMakeLists.txt b/pw_log_string/CMakeLists.txt
index cf77703..3cecc20 100644
--- a/pw_log_string/CMakeLists.txt
+++ b/pw_log_string/CMakeLists.txt
@@ -24,6 +24,7 @@
     public
   PUBLIC_DEPS
     ${pw_log_string_CONFIG}
+    pw_preprocessor
 )
 
 pw_add_library(pw_log_string INTERFACE
diff --git a/pw_log_string/docs.rst b/pw_log_string/docs.rst
index 8306ccd..8cd07eb 100644
--- a/pw_log_string/docs.rst
+++ b/pw_log_string/docs.rst
@@ -20,20 +20,153 @@
 useful to mix tokenized and string based logging in case you have a C ABI where
 tokenization can not be used on the other side.
 
----------------
-Getting started
----------------
-This module is extremely minimal to set up:
+----------------
+Get started (GN)
+----------------
+This section outlines how to implement a ``pw_log_string`` backend in a
+GN-based project.
 
-1. Implement ``pw_log_string_HandleMessageVaList()``
-2. Set ``pw_log_BACKEND`` to ``"$dir_pw_log_string"``
-3. Set ``pw_log_string_HANDLER_BACKEND`` to point to the source set that
-   implements ``pw_log_string_HandleMessageVaList()``
-4. Optionally provide your own implementation of
-   ``PW_LOG_STRING_HANDLE_MESSAGE`` which invokes
-   ``pw_log_string_HANDLER_BACKEND`` with your selected arguments.
+.. note::
+   The example code was written for a :ref:`host <target-host>` target running
+   on Linux.
+
+Invoke a logging macro
+======================
+Call one of the :ref:`pw_log macros <module-pw_log-macros>` in your project
+code:
+
+.. code-block:: cpp
+   :emphasize-lines: 9
+
+   /* //src/app.cc */
+
+   #include <unistd.h>
+
+   #include "pw_log/log.h"
+
+   int main() {
+       while (true) {
+           PW_LOG_INFO("Hello, world!");
+           sleep(5);
+       }
+       return 0;
+   }
+
+Implement the logging function
+==============================
+Implement :cpp:func:`pw_log_string_HandleMessageVaList()` in C. Macros like
+:cpp:func:`PW_LOG()` hand off the actual logging implementation to this
+function.
+
+The function signature of your implementation must match the one specified by
+Pigweed.
+
+The example code below just logs most of the available information to
+``stdout``:
+
+.. code-block:: c
+
+   /* //src/pw_log_string_backend.c */
+
+   #include <stdio.h>
+   #include <stdarg.h>
+
+   void pw_log_string_HandleMessageVaList(int level,
+                                          unsigned int flags,
+                                          const char* module_name,
+                                          const char* file_name,
+                                          int line_number,
+                                          const char* message,
+                                          va_list args) {
+       printf("Entering custom pw_log_string backend...\n");
+       printf("%d\n", level);
+       printf("%u\n", flags);
+       printf("%s\n", module_name);
+       printf("%s\n", file_name);
+       printf("%d\n", line_number);
+       printf("%s\n", message);
+       if (args) { /* Do something with your args here... */ }
+       printf("Exiting custom pw_log_string backend...\n\n");
+   }
 
 What exactly ``pw_log_string_HandleMessageVaList()`` should do is entirely up to
-the implementation. ``pw_log_basic``'s log handler is one example, but it's also
-possible to encode as protobuf and send over a TCP port, write to a file, or
-blink an LED to log as morse code.
+the implementation. The log handler in ``pw_log_basic`` is one example, but it's
+also possible to encode as protobuf and send over a TCP port, write to a file,
+or even blink an LED to log as morse code.
+
+Create source sets
+==================
+.. _source set: https://gn.googlesource.com/gn/+/main/docs/reference.md#c_language-source_sets
+
+Use ``pw_source_set`` to create a `source set`_ for your logging
+implementation. Do not use GN's built-in ``source_set`` feature.
+
+.. code-block:: python
+
+   # //src/BUILD.gn
+
+   ...
+
+   pw_source_set("pw_log_string_backend") {
+       sources = [ "pw_log_string_backend.c" ]
+   }
+
+   pw_source_set("pw_log_string_backend.impl") {
+       sources = []
+   }
+
+   ...
+
+.. _//pw_log/BUILD.gn: https://cs.opensource.google/pigweed/pigweed/+/main:pw_log/BUILD.gn
+
+The empty ``pw_log_string_backend.impl`` source set prevents circular
+dependencies. See the comment for ``group("impl")`` in `//pw_log/BUILD.gn`_
+for more context.
+
+Configure backends
+==================
+Update your target toolchain configuration file:
+
+* Set ``pw_log_BACKEND`` to ``dir_pw_log_string``
+* Point ``pw_log_string_HANDLER_BACKEND`` to your source set that implements
+  :cpp:func:`pw_log_string_HandleMessageVaList()`
+* Update :ref:`pw_build_LINK_DEPS <module-pw_build-link-deps>` to include
+  ``"$dir_pw_log:impl"`` and ``"$dir_pw_log_string:handler:impl"``
+
+.. code-block:: python
+   :emphasize-lines: 11,12,14,15
+
+   # //targets/my_target/target_toolchains.gni
+
+   ...
+
+   my_target = {
+     ...
+     my_toolchain = {
+       name = "my_toolchain"
+       defaults = {
+         ...
+         pw_log_BACKEND = dir_pw_log_string
+         pw_log_string_HANDLER_BACKEND = "//src:pw_log_string_backend"
+         pw_build_LINK_DEPS = [
+           "$dir_pw_log:impl",
+           "$dir_pw_log_string:handler.impl",
+           ...
+         ]
+         ...
+       }
+     }
+   }
+
+   ...
+
+
+(Optional) Implement message handler
+====================================
+Optionally provide your own implementation of ``PW_LOG_STRING_HANDLE_MESSAGE``
+which invokes ``pw_log_string_HANDLER_BACKEND`` with your selected arguments.
+
+-------------
+API reference
+-------------
+.. doxygenfunction:: pw_log_string_HandleMessageVaList(int level, unsigned int flags, const char* module_name, const char* file_name, int line_number, const char* message, va_list args)
diff --git a/pw_log_string/public/pw_log_string/config.h b/pw_log_string/public/pw_log_string/config.h
index 807c5ed..78c98d3 100644
--- a/pw_log_string/public/pw_log_string/config.h
+++ b/pw_log_string/public/pw_log_string/config.h
@@ -13,6 +13,8 @@
 // the License.
 #pragma once
 
+#include "pw_preprocessor/arguments.h"
+
 // User-provided header to optionally override options in this file.
 #if defined(PW_LOG_STRING_CONFIG_HEADER)
 #include PW_LOG_STRING_CONFIG_HEADER
@@ -23,7 +25,10 @@
 #ifndef PW_LOG_STRING_CONFIG_HANDLE_MESSAGE
 #define PW_LOG_STRING_CONFIG_HANDLE_MESSAGE(level, module, flags, ...) \
   do {                                                                 \
-    pw_log_string_HandleMessage(                                       \
-        (level), (flags), (module), __FILE__, __LINE__, __VA_ARGS__);  \
+    pw_log_string_HandleMessage((level),                               \
+                                (flags),                               \
+                                (module),                              \
+                                __FILE__,                              \
+                                __LINE__ PW_COMMA_ARGS(__VA_ARGS__));  \
   } while (0)
 #endif  // PW_LOG_STRING_CONFIG_HANDLE_MESSAGE
diff --git a/pw_log_string/public/pw_log_string/handler.h b/pw_log_string/public/pw_log_string/handler.h
index 11e5df5..611735a 100644
--- a/pw_log_string/public/pw_log_string/handler.h
+++ b/pw_log_string/public/pw_log_string/handler.h
@@ -41,8 +41,8 @@
                                  const char* message,
                                  ...) PW_PRINTF_FORMAT(6, 7);
 
-// Log a message with the listed attributes, this must be implemented by the
-// backend.
+/// Logs a message with the listed attributes. This must be implemented by the
+/// backend.
 void pw_log_string_HandleMessageVaList(int level,
                                        unsigned int flags,
                                        const char* module_name,
diff --git a/pw_log_tokenized/BUILD.bazel b/pw_log_tokenized/BUILD.bazel
index 8c65bd8..855a8f4 100644
--- a/pw_log_tokenized/BUILD.bazel
+++ b/pw_log_tokenized/BUILD.bazel
@@ -96,11 +96,20 @@
 )
 
 pw_cc_library(
-    name = "base64_over_hdlc",
-    srcs = ["base64_over_hdlc.cc"],
-    hdrs = ["public/pw_log_tokenized/base64_over_hdlc.h"],
+    name = "base64",
+    hdrs = ["public/pw_log_tokenized/base64.h"],
     includes = ["public"],
     deps = [
+        ":headers",  # Only config.h is needed
+        "//pw_tokenizer:base64",
+    ],
+)
+
+pw_cc_library(
+    name = "base64_over_hdlc",
+    srcs = ["base64_over_hdlc.cc"],
+    deps = [
+        ":base64",
         ":handler_facade",
         "//pw_hdlc",
         "//pw_stream:sys_io_stream",
@@ -116,6 +125,7 @@
         "pw_log_tokenized_private/test_utils.h",
     ],
     deps = [
+        ":base64",
         ":headers",
         "//pw_unit_test",
     ],
diff --git a/pw_log_tokenized/BUILD.gn b/pw_log_tokenized/BUILD.gn
index 17acf3a..1e98a9d 100644
--- a/pw_log_tokenized/BUILD.gn
+++ b/pw_log_tokenized/BUILD.gn
@@ -106,6 +106,7 @@
   public_configs = [ ":public_include_path" ]
   public_deps = [
     "$dir_pw_log:facade",
+    "$dir_pw_tokenizer:config",
     pw_log_tokenized_CONFIG,
   ]
   public = [ "public/pw_log_tokenized/config.h" ]
@@ -122,13 +123,21 @@
   }
 }
 
+pw_source_set("base64") {
+  public_configs = [ ":public_include_path" ]
+  public = [ "public/pw_log_tokenized/base64.h" ]
+  public_deps = [
+    ":config",
+    "$dir_pw_tokenizer:base64",
+  ]
+}
+
 # This target provides a backend for pw_tokenizer that encodes tokenized logs as
 # Base64, encodes them into HDLC frames, and writes them over sys_io.
 pw_source_set("base64_over_hdlc") {
-  public_configs = [ ":public_include_path" ]
-  public = [ "public/pw_log_tokenized/base64_over_hdlc.h" ]
   sources = [ "base64_over_hdlc.cc" ]
   deps = [
+    ":base64",
     ":handler.facade",
     "$dir_pw_hdlc:encoder",
     "$dir_pw_stream:sys_io_stream",
@@ -151,6 +160,7 @@
     "pw_log_tokenized_private/test_utils.h",
   ]
   deps = [
+    ":base64",
     ":headers",
     dir_pw_preprocessor,
   ]
diff --git a/pw_log_tokenized/CMakeLists.txt b/pw_log_tokenized/CMakeLists.txt
index 6397a67..69b9690 100644
--- a/pw_log_tokenized/CMakeLists.txt
+++ b/pw_log_tokenized/CMakeLists.txt
@@ -24,6 +24,7 @@
     public
   PUBLIC_DEPS
     pw_log.facade
+    pw_tokenizer.config
     ${pw_log_tokenized_CONFIG}
 )
 
@@ -86,21 +87,27 @@
     pw_preprocessor
 )
 
+pw_add_library(pw_log_tokenized.base64 INTERFACE
+  HEADERS
+    public/pw_log_tokenized/base64.h
+  PUBLIC_INCLUDES
+    public
+  PUBLIC_DEPS
+    pw_log_tokenized.config
+    pw_tokenizer.base64
+)
+
 # This target provides a backend for pw_tokenizer that encodes tokenized logs as
 # Base64, encodes them into HDLC frames, and writes them over sys_io.
 pw_add_library(pw_log_tokenized.base64_over_hdlc STATIC
-  HEADERS
-    public/pw_log_tokenized/base64_over_hdlc.h
-  PUBLIC_INCLUDES
-    public
   SOURCES
     base64_over_hdlc.cc
   PRIVATE_DEPS
     pw_hdlc.encoder
+    pw_log_tokenized.base64
     pw_log_tokenized.handler
     pw_span
     pw_stream.sys_io_stream
-    pw_tokenizer.base64
 )
 
 pw_add_test(pw_log_tokenized.log_tokenized_test
@@ -109,6 +116,7 @@
     log_tokenized_test_c.c
     pw_log_tokenized_private/test_utils.h
   PRIVATE_DEPS
+    pw_log_tokenized.base64
     pw_log_tokenized._headers
     pw_preprocessor
   GROUPS
diff --git a/pw_log_tokenized/base64_over_hdlc.cc b/pw_log_tokenized/base64_over_hdlc.cc
index 29e925a..997f6cf 100644
--- a/pw_log_tokenized/base64_over_hdlc.cc
+++ b/pw_log_tokenized/base64_over_hdlc.cc
@@ -15,17 +15,19 @@
 // This function serves as a backend for pw_tokenizer / pw_log_tokenized that
 // encodes tokenized logs as Base64 and writes them using HDLC.
 
-#include "pw_log_tokenized/base64_over_hdlc.h"
-
 #include "pw_hdlc/encoder.h"
+#include "pw_log_tokenized/base64.h"
 #include "pw_log_tokenized/handler.h"
 #include "pw_span/span.h"
 #include "pw_stream/sys_io_stream.h"
+#include "pw_string/string.h"
 #include "pw_tokenizer/base64.h"
 
 namespace pw::log_tokenized {
 namespace {
 
+inline constexpr int kBase64LogHdlcAddress = 1;
+
 stream::SysIoWriter writer;
 
 }  // namespace
@@ -36,15 +38,12 @@
     const uint8_t log_buffer[],
     size_t size_bytes) {
   // Encode the tokenized message as Base64.
-  char base64_buffer[tokenizer::kDefaultBase64EncodedBufferSize];
-  const size_t base64_bytes = tokenizer::PrefixedBase64Encode(
-      span(log_buffer, size_bytes), base64_buffer);
-  base64_buffer[base64_bytes] = '\0';
+  const pw::InlineBasicString base64_string =
+      PrefixedBase64Encode(log_buffer, size_bytes);
 
   // HDLC-encode the Base64 string via a SysIoWriter.
-  hdlc::WriteUIFrame(PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS,
-                     as_bytes(span(base64_buffer, base64_bytes)),
-                     writer);
+  hdlc::WriteUIFrame(
+      kBase64LogHdlcAddress, as_bytes(span(base64_string)), writer);
 }
 
 }  // namespace pw::log_tokenized
diff --git a/pw_log_tokenized/docs.rst b/pw_log_tokenized/docs.rst
index 77401dd..4f7cdbc 100644
--- a/pw_log_tokenized/docs.rst
+++ b/pw_log_tokenized/docs.rst
@@ -55,7 +55,7 @@
 
 .. code-block::
 
-  "■key1♦contents1■key2♦contents2■key3♦contents3"
+   "■key1♦contents1■key2♦contents2■key3♦contents3"
 
 This format makes the message easily machine parseable and human readable. It is
 extremely unlikely to conflict with log message contents due to the characters
@@ -67,7 +67,7 @@
 
 .. code-block::
 
-  "■msg♦Hyperdrive %d set to %f■module♦engine■file♦propulsion/hyper.cc"
+   "■msg♦Hyperdrive %d set to %f■module♦engine■file♦propulsion/hyper.cc"
 
 Using key-value pairs allows placing the fields in any order.
 ``pw_log_tokenized`` places the message first. This is prefered when tokenizing
@@ -175,6 +175,11 @@
         token_buffer.size());
   }
 
+The binary tokenized message may be encoded in the :ref:`prefixed Base64 format
+<module-pw_tokenizer-base64-format>` with the following function:
+
+.. doxygenfunction:: PrefixedBase64Encode(span<const std::byte>)
+
 Build targets
 -------------
 The GN build for ``pw_log_tokenized`` has two targets: ``pw_log_tokenized`` and
diff --git a/pw_log_tokenized/log_tokenized.cc b/pw_log_tokenized/log_tokenized.cc
index 25c00d9..6a1ba8c 100644
--- a/pw_log_tokenized/log_tokenized.cc
+++ b/pw_log_tokenized/log_tokenized.cc
@@ -16,6 +16,7 @@
 
 #include <cstdarg>
 
+#include "pw_log_tokenized/config.h"
 #include "pw_log_tokenized/handler.h"
 #include "pw_tokenizer/encode_args.h"
 
@@ -26,7 +27,8 @@
     ...) {
   va_list args;
   va_start(args, types);
-  pw::tokenizer::EncodedMessage<> encoded_message(token, types, args);
+  pw::tokenizer::EncodedMessage<PW_LOG_TOKENIZED_ENCODING_BUFFER_SIZE_BYTES>
+      encoded_message(token, types, args);
   va_end(args);
 
   pw_log_tokenized_HandleLog(
diff --git a/pw_log_tokenized/log_tokenized_test.cc b/pw_log_tokenized/log_tokenized_test.cc
index a8d4759..51c90e3 100644
--- a/pw_log_tokenized/log_tokenized_test.cc
+++ b/pw_log_tokenized/log_tokenized_test.cc
@@ -28,6 +28,7 @@
 #include "pw_log_tokenized/log_tokenized.h"
 
 #include "gtest/gtest.h"
+#include "pw_log_tokenized/base64.h"
 #include "pw_log_tokenized_private/test_utils.h"
 
 namespace pw::log_tokenized {
@@ -44,6 +45,22 @@
     PW_TOKENIZER_STRING_TOKEN(PW_LOG_MODULE_NAME) &
     ((1u << PW_LOG_TOKENIZED_MODULE_BITS) - 1);
 
+TEST(LogTokenized, Base64) {
+  constexpr uint8_t kBinary[6]{1, 2, 3, 4, 5, 6};
+  constexpr const char* kBase64Expected = "$AQIDBAUG";  // calculated in Python
+
+  InlineBasicString result_1 = PrefixedBase64Encode(as_bytes(span(kBinary)));
+  EXPECT_EQ(result_1, kBase64Expected);
+  EXPECT_EQ(result_1.capacity(), kBase64EncodedBufferSizeBytes);
+
+  InlineBasicString result_2 = PrefixedBase64Encode(kBinary, sizeof(kBinary));
+  EXPECT_EQ(result_2, kBase64Expected);
+
+  InlineBasicString result_3 = PrefixedBase64Encode(
+      reinterpret_cast<const std::byte*>(kBinary), sizeof(kBinary));
+  EXPECT_EQ(result_3, kBase64Expected);
+}
+
 TEST(LogTokenized, LogMetadata_LevelTooLarge_Clamps) {
   auto check_metadata = [] {
     Metadata metadata = Metadata(last_log.metadata);
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64.h b/pw_log_tokenized/public/pw_log_tokenized/base64.h
new file mode 100644
index 0000000..89e7a19
--- /dev/null
+++ b/pw_log_tokenized/public/pw_log_tokenized/base64.h
@@ -0,0 +1,51 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <cstddef>
+#include <type_traits>
+
+#include "pw_log_tokenized/config.h"
+#include "pw_tokenizer/base64.h"
+
+namespace pw::log_tokenized {
+
+// Minimum capacity for a string that to hold the Base64-encoded version of a
+// PW_LOG_TOKENIZED_ENCODING_BUFFER_SIZE_BYTES tokenized message. This is the
+// capacity needed to encode to a `pw::InlineString` and does not include a null
+// terminator.
+inline constexpr size_t kBase64EncodedBufferSizeBytes =
+    tokenizer::Base64EncodedBufferSize(kEncodingBufferSizeBytes);
+
+/// Encodes a binary tokenized log in the prefixed Base64 format. Calls
+/// @cpp_func{pw::tokenizer::PrefixedBase64Encode} for a string sized to fit a
+/// `kEncodingBufferSizeBytes` tokenized log.
+inline InlineString<kBase64EncodedBufferSizeBytes> PrefixedBase64Encode(
+    span<const std::byte> binary_message) {
+  return tokenizer::PrefixedBase64Encode<kEncodingBufferSizeBytes>(
+      binary_message);
+}
+
+#ifndef PW_EXCLUDE_FROM_DOXYGEN  // Doxygen fails to parse this, so skip it.
+
+template <typename T,
+          typename = std::enable_if_t<sizeof(T) == sizeof(std::byte)>>
+inline InlineString<kBase64EncodedBufferSizeBytes> PrefixedBase64Encode(
+    const T* log_buffer, size_t size_bytes) {
+  return PrefixedBase64Encode(as_bytes(span(log_buffer, size_bytes)));
+}
+
+#endif  // PW_EXCLUDE_FROM_DOXYGEN
+
+}  // namespace pw::log_tokenized
diff --git a/pw_log_tokenized/public/pw_log_tokenized/config.h b/pw_log_tokenized/public/pw_log_tokenized/config.h
index 6475981..c1060b5 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/config.h
+++ b/pw_log_tokenized/public/pw_log_tokenized/config.h
@@ -17,6 +17,22 @@
 
 #include "pw_log/levels.h"
 #include "pw_log/options.h"
+#include "pw_tokenizer/config.h"
+
+// The size of the stack-allocated argument encoding buffer to use by default.
+// A buffer of this size is allocated and used for the 4-byte token and for
+// encoding all arguments. It must be at least large enough for the token (4
+// bytes).
+//
+// This buffer does not need to be large to accommodate a good number of
+// tokenized string arguments. Integer arguments are usually encoded smaller
+// than their native size (e.g. 1 or 2 bytes for smaller numbers). All floating
+// point types are encoded as four bytes. Null-terminated strings are encoded
+// 1:1 in size, however, and can quickly fill up this buffer.
+#ifndef PW_LOG_TOKENIZED_ENCODING_BUFFER_SIZE_BYTES
+#define PW_LOG_TOKENIZED_ENCODING_BUFFER_SIZE_BYTES \
+  PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES
+#endif  // PW_LOG_TOKENIZED_ENCODING_BUFFER_SIZE_BYTES
 
 // This macro takes the PW_LOG format string and optionally transforms it. By
 // default, pw_log_tokenized specifies three fields as key-value pairs.
@@ -68,3 +84,17 @@
 static_assert((PW_LOG_TOKENIZED_LEVEL_BITS + PW_LOG_TOKENIZED_LINE_BITS +
                PW_LOG_TOKENIZED_FLAG_BITS + PW_LOG_TOKENIZED_MODULE_BITS) == 32,
               "Log metadata fields must use 32 bits");
+
+#ifdef __cplusplus
+
+#include <cstddef>
+
+namespace pw::log_tokenized {
+
+// C++ constant for the encoding buffer size. Use this instead of the macro.
+inline constexpr size_t kEncodingBufferSizeBytes =
+    PW_LOG_TOKENIZED_ENCODING_BUFFER_SIZE_BYTES;
+
+}  // namespace pw::log_tokenized
+
+#endif  // __cplusplus
diff --git a/pw_log_zephyr/CMakeLists.txt b/pw_log_zephyr/CMakeLists.txt
index 408f184..c086318 100644
--- a/pw_log_zephyr/CMakeLists.txt
+++ b/pw_log_zephyr/CMakeLists.txt
@@ -34,19 +34,33 @@
     pw_log_zephyr
 )
 
-pw_add_library(pw_log_zephyr.tokenized_handler STATIC
+pw_add_library(pw_log_zephyr.tokenized_lib INTERFACE
   HEADERS
     zephyr_public_overrides/zephyr_custom_log.h
-  SOURCES
-    pw_log_zephyr_tokenized_handler.cc
   PUBLIC_DEPS
-    pw_log_tokenized.config
-    pw_log_tokenized.handler
+    pw_log_tokenized.base64
     pw_span
     pw_sync.interrupt_spin_lock
     pw_tokenizer
     pw_tokenizer.base64
 )
+
+pw_zephyrize_libraries_ifdef(CONFIG_PIGWEED_LOG_TOKENIZED_LIB
+  pw_log.facade
+  pw_log_tokenized
+)
+if(CONFIG_PIGWEED_LOG_TOKENIZED_LIB)
+  zephyr_include_directories(zephyr_public_overrides)
+endif()
+
+pw_add_library(pw_log_zephyr.tokenized_handler STATIC
+  SOURCES
+    pw_log_zephyr_tokenized_handler.cc
+  PUBLIC_DEPS
+    pw_log_tokenized.handler
+    pw_log_zephyr.tokenized_lib
+)
+
 pw_zephyrize_libraries_ifdef(CONFIG_PIGWEED_LOG_TOKENIZED
     pw_log.facade
     pw_log_tokenized
@@ -80,4 +94,4 @@
 pw_set_config_from_zephyr(CONFIG_PIGWEED_LOG_TOKENIZED_LEVEL_BITS PW_LOG_TOKENIZED_LEVEL_BITS)
 pw_set_config_from_zephyr(CONFIG_PIGWEED_LOG_TOKENIZED_LINE_BITS PW_LOG_TOKENIZED_LINE_BITS)
 pw_set_config_from_zephyr(CONFIG_PIGWEED_LOG_TOKENIZED_FLAG_BITS PW_LOG_TOKENIZED_FLAG_BITS)
-pw_set_config_from_zephyr(CONFIG_PIGWEED_LOG_TOKENIZED_MODULE_BITS PW_LOG_TOKENIZED_MODULE_BITS)
+pw_set_config_from_zephyr(CONFIG_PIGWEED_LOG_TOKENIZED_MODULE_BITS PW_LOG_TOKENIZED_MODULE_BITS)
\ No newline at end of file
diff --git a/pw_log_zephyr/Kconfig b/pw_log_zephyr/Kconfig
index 6ad21e2..75070cf 100644
--- a/pw_log_zephyr/Kconfig
+++ b/pw_log_zephyr/Kconfig
@@ -46,6 +46,18 @@
       automatically tokenize all the logging strings. This means that Pigweed
       will also tokenize all of Zephyr's logging statements.
 
+config PIGWEED_LOG_TOKENIZED_LIB
+    bool "Tokenize logging and implement your own pw_log_tokenized_HandleLog"
+    select PIGWEED_PREPROCESSOR
+    select PIGWEED_SYNC_INTERRUPT_SPIN_LOCK
+    select PIGWEED_SYS_IO
+    select PIGWEED_TOKENIZER
+    select LOG_CUSTOM_HEADER
+    help
+      Same as PIGWEED_LOG_TOKENIZED but you'll need to implement
+      pw_log_tokenized_HandleLog. This gives you flexiblity to access handlers
+      outside of pigweed.
+
 config PIGWEED_LOG_NONE
     bool "Do not use pigweed logging"
     help
diff --git a/pw_log_zephyr/docs.rst b/pw_log_zephyr/docs.rst
index 975a53f..62638e0 100644
--- a/pw_log_zephyr/docs.rst
+++ b/pw_log_zephyr/docs.rst
@@ -35,9 +35,9 @@
 
 .. code-block::
 
-  add_library(log_tokenized_config INTERFACE)
-  target_compile_options(log_tokenized_config INTERFACE -include header_file_that_sets_that_macro.h)
-  pw_set_module_config(pw_log_tokenized_CONFIG log_tokenized_config)
+   add_library(log_tokenized_config INTERFACE)
+   target_compile_options(log_tokenized_config INTERFACE -include header_file_that_sets_that_macro.h)
+   pw_set_module_config(pw_log_tokenized_CONFIG log_tokenized_config)
 
 Setting the log level
 ---------------------
diff --git a/pw_log_zephyr/pw_log_zephyr_tokenized_handler.cc b/pw_log_zephyr/pw_log_zephyr_tokenized_handler.cc
index eeef915..8e4fd3c 100644
--- a/pw_log_zephyr/pw_log_zephyr_tokenized_handler.cc
+++ b/pw_log_zephyr/pw_log_zephyr_tokenized_handler.cc
@@ -39,8 +39,7 @@
 
   // Encode the tokenized message as Base64.
   const InlineBasicString base64_string =
-      tokenizer::PrefixedBase64Encode<log_tokenized::kEncodingBufferSizeBytes>(
-          span(log_buffer, size_bytes));
+      log_tokenized::PrefixedBase64Encode(log_buffer, size_bytes);
 
   if (base64_string.empty()) {
     return;
diff --git a/pw_malloc/docs.rst b/pw_malloc/docs.rst
index 2c0357d..53249f5 100644
--- a/pw_malloc/docs.rst
+++ b/pw_malloc/docs.rst
@@ -3,7 +3,6 @@
 ---------
 pw_malloc
 ---------
-
 This module defines an interface for replacing the standard libc dynamic memory
 operations.
 
@@ -19,9 +18,9 @@
 =====
 This module requires the following setup:
 
-  1. Chose a ``pw_malloc`` backend, or write one yourself.
-  2. If using GN build, Specify the ``pw_malloc_BACKEND`` GN build arg to point
-     the library that provides a ``pw_malloc`` backend.
+1. Chose a ``pw_malloc`` backend, or write one yourself.
+2. If using GN build, Specify the ``pw_malloc_BACKEND`` GN build arg to point
+   the library that provides a ``pw_malloc`` backend.
 
 Module usage
 ============
diff --git a/pw_metric/BUILD.bazel b/pw_metric/BUILD.bazel
index aa6d3e5..03fcd3a 100644
--- a/pw_metric/BUILD.bazel
+++ b/pw_metric/BUILD.bazel
@@ -17,8 +17,8 @@
     "pw_cc_library",
     "pw_cc_test",
 )
-load("//pw_protobuf_compiler:pw_proto_library.bzl", "pw_proto_library")
 load("//pw_build/bazel_internal:py_proto_library.bzl", "py_proto_library")
+load("//pw_protobuf_compiler:pw_proto_library.bzl", "pw_proto_library")
 
 package(default_visibility = ["//visibility:public"])
 
diff --git a/pw_metric/docs.rst b/pw_metric/docs.rst
index 9386365..21360dd 100644
--- a/pw_metric/docs.rst
+++ b/pw_metric/docs.rst
@@ -5,9 +5,8 @@
 =========
 
 .. attention::
-
-  This module is **not yet production ready**; ask us if you are interested in
-  using it out or have ideas about how to improve it.
+   This module is **not yet production ready**; ask us if you are interested in
+   using it out or have ideas about how to improve it.
 
 --------
 Overview
@@ -45,30 +44,30 @@
 ``MySubsystem::metrics()`` member is not globally registered; the user is on
 their own for combining this subsystem's metrics with others.
 
-.. code::
+.. code-block::
 
-  #include "pw_metric/metric.h"
+   #include "pw_metric/metric.h"
 
-  class MySubsystem {
-   public:
-    void DoSomething() {
-      attempts_.Increment();
-      if (ActionSucceeds()) {
-        successes_.Increment();
-      }
-    }
-    Group& metrics() { return metrics_; }
+   class MySubsystem {
+    public:
+     void DoSomething() {
+       attempts_.Increment();
+       if (ActionSucceeds()) {
+         successes_.Increment();
+       }
+     }
+     Group& metrics() { return metrics_; }
 
-   private:
-    PW_METRIC_GROUP(metrics_, "my_subsystem");
-    PW_METRIC(metrics_, attempts_, "attempts", 0u);
-    PW_METRIC(metrics_, successes_, "successes", 0u);
-  };
+    private:
+     PW_METRIC_GROUP(metrics_, "my_subsystem");
+     PW_METRIC(metrics_, attempts_, "attempts", 0u);
+     PW_METRIC(metrics_, successes_, "successes", 0u);
+   };
 
 The metrics subsystem has no canonical output format at this time, but a JSON
 dump might look something like this:
 
-.. code:: none
+.. code-block:: none
 
   {
     "my_subsystem" : {
@@ -89,39 +88,39 @@
 
 **Before instrumenting:**
 
-.. code::
+.. code-block::
 
-  // This code was passed down from generations of developers before; no one
-  // knows what it does or how it works. But it needs to be fixed!
-  void OldCodeThatDoesntWorkButWeDontKnowWhy() {
-    if (some_variable) {
-      DoSomething();
-    } else {
-      DoSomethingElse();
-    }
-  }
+   // This code was passed down from generations of developers before; no one
+   // knows what it does or how it works. But it needs to be fixed!
+   void OldCodeThatDoesntWorkButWeDontKnowWhy() {
+     if (some_variable) {
+       DoSomething();
+     } else {
+       DoSomethingElse();
+     }
+   }
 
 **After instrumenting:**
 
-.. code::
+.. code-block::
 
-  #include "pw_metric/global.h"
-  #include "pw_metric/metric.h"
+   #include "pw_metric/global.h"
+   #include "pw_metric/metric.h"
 
-  PW_METRIC_GLOBAL(legacy_do_something, "legacy_do_something");
-  PW_METRIC_GLOBAL(legacy_do_something_else, "legacy_do_something_else");
+   PW_METRIC_GLOBAL(legacy_do_something, "legacy_do_something");
+   PW_METRIC_GLOBAL(legacy_do_something_else, "legacy_do_something_else");
 
-  // This code was passed down from generations of developers before; no one
-  // knows what it does or how it works. But it needs to be fixed!
-  void OldCodeThatDoesntWorkButWeDontKnowWhy() {
-    if (some_variable) {
-      legacy_do_something.Increment();
-      DoSomething();
-    } else {
-      legacy_do_something_else.Increment();
-      DoSomethingElse();
-    }
-  }
+   // This code was passed down from generations of developers before; no one
+   // knows what it does or how it works. But it needs to be fixed!
+   void OldCodeThatDoesntWorkButWeDontKnowWhy() {
+     if (some_variable) {
+       legacy_do_something.Increment();
+       DoSomething();
+     } else {
+       legacy_do_something_else.Increment();
+       DoSomethingElse();
+     }
+   }
 
 In this case, the developer merely had to add the metrics header, define some
 metrics, and then start incrementing them. These metrics will be available
@@ -180,20 +179,20 @@
 
 .. cpp:class:: pw::metric::Metric
 
-  .. cpp:function:: Increment(uint32_t amount = 0)
+   .. cpp:function:: Increment(uint32_t amount = 0)
 
-    Increment the metric by the given amount. Results in undefined behaviour if
-    the metric is not of type int.
+      Increment the metric by the given amount. Results in undefined behaviour if
+      the metric is not of type int.
 
-  .. cpp:function:: Set(uint32_t value)
+   .. cpp:function:: Set(uint32_t value)
 
-    Set the metric to the given value. Results in undefined behaviour if the
-    metric is not of type int.
+      Set the metric to the given value. Results in undefined behaviour if the
+      metric is not of type int.
 
-  .. cpp:function:: Set(float value)
+   .. cpp:function:: Set(float value)
 
-    Set the metric to the given value. Results in undefined behaviour if the
-    metric is not of type float.
+      Set the metric to the given value. Results in undefined behaviour if the
+      metric is not of type float.
 
 Group
 -----
@@ -208,39 +207,39 @@
 
 .. cpp:class:: pw::metric::Group
 
-  .. cpp:function:: Dump(int indent_level = 0)
+   .. cpp:function:: Dump(int indent_level = 0)
 
-    Recursively dump a metrics group to ``pw_log``. Produces output like:
+      Recursively dump a metrics group to ``pw_log``. Produces output like:
 
-    .. code:: none
+      .. code-block:: none
 
-      "$6doqFw==": {
-        "$05OCZw==": {
-          "$VpPfzg==": 1,
-          "$LGPMBQ==": 1.000000,
-          "$+iJvUg==": 5,
-        }
-        "$9hPNxw==": 65,
-        "$oK7HmA==": 13,
-        "$FCM4qQ==": 0,
-      }
+         "$6doqFw==": {
+           "$05OCZw==": {
+             "$VpPfzg==": 1,
+             "$LGPMBQ==": 1.000000,
+             "$+iJvUg==": 5,
+           }
+           "$9hPNxw==": 65,
+           "$oK7HmA==": 13,
+           "$FCM4qQ==": 0,
+         }
 
-    Note the metric names are tokenized with base64. Decoding requires using
-    the Pigweed detokenizer. With a detokenizing-enabled logger, you could get
-    something like:
+      Note the metric names are tokenized with base64. Decoding requires using
+      the Pigweed detokenizer. With a detokenizing-enabled logger, you could get
+      something like:
 
-    .. code:: none
+      .. code-block:: none
 
-      "i2c_1": {
-        "gyro": {
-          "num_sampleses": 1,
-          "init_time_us": 1.000000,
-          "initialized": 5,
-        }
-        "bus_errors": 65,
-        "transactions": 13,
-        "bytes_sent": 0,
-      }
+         "i2c_1": {
+           "gyro": {
+             "num_sampleses": 1,
+             "init_time_us": 1.000000,
+             "initialized": 5,
+           }
+           "bus_errors": 65,
+           "transactions": 13,
+           "bytes_sent": 0,
+         }
 
 Macros
 ------
@@ -253,150 +252,147 @@
 .. cpp:function:: PW_METRIC_STATIC(identifier, name, value)
 .. cpp:function:: PW_METRIC_STATIC(group, identifier, name, value)
 
-  Declare a metric, optionally adding it to a group.
+   Declare a metric, optionally adding it to a group.
 
-  - **identifier** - An identifier name for the created variable or member.
-    For example: ``i2c_transactions`` might be used as a local or global
-    metric; inside a class, could be named according to members
-    (``i2c_transactions_`` for Google's C++ style).
-  - **name** - The string name for the metric. This will be tokenized. There
-    are no restrictions on the contents of the name; however, consider
-    restricting these to be valid C++ identifiers to ease integration with
-    other systems.
-  - **value** - The initial value for the metric. Must be either a floating
-    point value (e.g. ``3.2f``) or unsigned int (e.g. ``21u``).
-  - **group** - A ``pw::metric::Group`` instance. If provided, the metric is
-    added to the given group.
+   - **identifier** - An identifier name for the created variable or member.
+     For example: ``i2c_transactions`` might be used as a local or global
+     metric; inside a class, could be named according to members
+     (``i2c_transactions_`` for Google's C++ style).
+   - **name** - The string name for the metric. This will be tokenized. There
+     are no restrictions on the contents of the name; however, consider
+     restricting these to be valid C++ identifiers to ease integration with
+     other systems.
+   - **value** - The initial value for the metric. Must be either a floating
+     point value (e.g. ``3.2f``) or unsigned int (e.g. ``21u``).
+   - **group** - A ``pw::metric::Group`` instance. If provided, the metric is
+     added to the given group.
 
-  The macro declares a variable or member named "name" with type
-  ``pw::metric::Metric``, and works in three contexts: global, local, and
-  member.
+   The macro declares a variable or member named "name" with type
+   ``pw::metric::Metric``, and works in three contexts: global, local, and
+   member.
 
-  If the `_STATIC` variant is used, the macro declares a variable with static
-  storage. These can be used in function scopes, but not in classes.
+   If the `_STATIC` variant is used, the macro declares a variable with static
+   storage. These can be used in function scopes, but not in classes.
 
-  1. At global scope:
+   1. At global scope:
 
-    .. code::
+      .. code-block::
 
-      PW_METRIC(foo, "foo", 15.5f);
+         PW_METRIC(foo, "foo", 15.5f);
 
-      void MyFunc() {
-        foo.Increment();
-      }
+         void MyFunc() {
+           foo.Increment();
+         }
 
-  2. At local function or member function scope:
+   2. At local function or member function scope:
 
-    .. code::
+      .. code-block::
 
-      void MyFunc() {
-        PW_METRIC(foo, "foo", 15.5f);
-        foo.Increment();
-        // foo goes out of scope here; be careful!
-      }
+         void MyFunc() {
+           PW_METRIC(foo, "foo", 15.5f);
+           foo.Increment();
+           // foo goes out of scope here; be careful!
+         }
 
-  3. At member level inside a class or struct:
+   3. At member level inside a class or struct:
 
-    .. code::
+      .. code-block::
 
-      struct MyStructy {
-        void DoSomething() {
-          somethings.Increment();
-        }
-        // Every instance of MyStructy will have a separate somethings counter.
-        PW_METRIC(somethings, "somethings", 0u);
-      }
+         struct MyStructy {
+           void DoSomething() {
+             somethings.Increment();
+           }
+           // Every instance of MyStructy will have a separate somethings counter.
+           PW_METRIC(somethings, "somethings", 0u);
+         }
 
-  You can also put a metric into a group with the macro. Metrics can belong to
-  strictly one group, otherwise an assertion will fail. Example:
+   You can also put a metric into a group with the macro. Metrics can belong to
+   strictly one group, otherwise an assertion will fail. Example:
 
-  .. code::
+   .. code-block::
 
-    PW_METRIC_GROUP(my_group, "my_group");
-    PW_METRIC(my_group, foo, "foo", 0.2f);
-    PW_METRIC(my_group, bar, "bar", 44000u);
-    PW_METRIC(my_group, zap, "zap", 3.14f);
+      PW_METRIC_GROUP(my_group, "my_group");
+      PW_METRIC(my_group, foo, "foo", 0.2f);
+      PW_METRIC(my_group, bar, "bar", 44000u);
+      PW_METRIC(my_group, zap, "zap", 3.14f);
 
-  .. tip::
-
-    If you want a globally registered metric, see ``pw_metric/global.h``; in
-    that contexts, metrics are globally registered without the need to
-    centrally register in a single place.
+   .. tip::
+      If you want a globally registered metric, see ``pw_metric/global.h``; in
+      that contexts, metrics are globally registered without the need to
+      centrally register in a single place.
 
 .. cpp:function:: PW_METRIC_GROUP(identifier, name)
 .. cpp:function:: PW_METRIC_GROUP(parent_group, identifier, name)
 .. cpp:function:: PW_METRIC_GROUP_STATIC(identifier, name)
 .. cpp:function:: PW_METRIC_GROUP_STATIC(parent_group, identifier, name)
 
-  Declares a ``pw::metric::Group`` with name name; the name is tokenized.
-  Works similar to ``PW_METRIC`` and can be used in the same contexts (global,
-  local, and member). Optionally, the group can be added to a parent group.
+   Declares a ``pw::metric::Group`` with name name; the name is tokenized.
+   Works similar to ``PW_METRIC`` and can be used in the same contexts (global,
+   local, and member). Optionally, the group can be added to a parent group.
 
-  If the `_STATIC` variant is used, the macro declares a variable with static
-  storage. These can be used in function scopes, but not in classes.
+   If the `_STATIC` variant is used, the macro declares a variable with static
+   storage. These can be used in function scopes, but not in classes.
 
-  Example:
+   Example:
 
-  .. code::
+   .. code-block::
 
-    PW_METRIC_GROUP(my_group, "my_group");
-    PW_METRIC(my_group, foo, "foo", 0.2f);
-    PW_METRIC(my_group, bar, "bar", 44000u);
-    PW_METRIC(my_group, zap, "zap", 3.14f);
+      PW_METRIC_GROUP(my_group, "my_group");
+      PW_METRIC(my_group, foo, "foo", 0.2f);
+      PW_METRIC(my_group, bar, "bar", 44000u);
+      PW_METRIC(my_group, zap, "zap", 3.14f);
 
 .. cpp:function:: PW_METRIC_GLOBAL(identifier, name, value)
 
-  Declare a ``pw::metric::Metric`` with name name, and register it in the
-  global metrics list ``pw::metric::global_metrics``.
+   Declare a ``pw::metric::Metric`` with name name, and register it in the
+   global metrics list ``pw::metric::global_metrics``.
 
-  Example:
+   Example:
 
-  .. code::
+   .. code-block::
 
-    #include "pw_metric/metric.h"
-    #include "pw_metric/global.h"
+      #include "pw_metric/metric.h"
+      #include "pw_metric/global.h"
 
-    // No need to coordinate collection of foo and bar; they're autoregistered.
-    PW_METRIC_GLOBAL(foo, "foo", 0.2f);
-    PW_METRIC_GLOBAL(bar, "bar", 44000u);
+      // No need to coordinate collection of foo and bar; they're autoregistered.
+      PW_METRIC_GLOBAL(foo, "foo", 0.2f);
+      PW_METRIC_GLOBAL(bar, "bar", 44000u);
 
-  Note that metrics defined with ``PW_METRIC_GLOBAL`` should never be added to
-  groups defined with ``PW_METRIC_GROUP_GLOBAL``. Each metric can only belong
-  to one group, and metrics defined with ``PW_METRIC_GLOBAL`` are
-  pre-registered with the global metrics list.
+   Note that metrics defined with ``PW_METRIC_GLOBAL`` should never be added to
+   groups defined with ``PW_METRIC_GROUP_GLOBAL``. Each metric can only belong
+   to one group, and metrics defined with ``PW_METRIC_GLOBAL`` are
+   pre-registered with the global metrics list.
 
-  .. attention::
-
-    Do not create ``PW_METRIC_GLOBAL`` instances anywhere other than global
-    scope. Putting these on an instance (member context) would lead to dangling
-    pointers and misery. Metrics are never deleted or unregistered!
+   .. attention::
+      Do not create ``PW_METRIC_GLOBAL`` instances anywhere other than global
+      scope. Putting these on an instance (member context) would lead to dangling
+      pointers and misery. Metrics are never deleted or unregistered!
 
 .. cpp:function:: PW_METRIC_GROUP_GLOBAL(identifier, name, value)
 
-  Declare a ``pw::metric::Group`` with name name, and register it in the
-  global metric groups list ``pw::metric::global_groups``.
+   Declare a ``pw::metric::Group`` with name name, and register it in the
+   global metric groups list ``pw::metric::global_groups``.
 
-  Note that metrics created with ``PW_METRIC_GLOBAL`` should never be added to
-  groups! Instead, just create a freestanding metric and register it into the
-  global group (like in the example below).
+   Note that metrics created with ``PW_METRIC_GLOBAL`` should never be added to
+   groups! Instead, just create a freestanding metric and register it into the
+   global group (like in the example below).
 
-  Example:
+   Example:
 
-  .. code::
+   .. code-block::
 
-    #include "pw_metric/metric.h"
-    #include "pw_metric/global.h"
+      #include "pw_metric/metric.h"
+      #include "pw_metric/global.h"
 
-    // No need to coordinate collection of this group; it's globally registered.
-    PW_METRIC_GROUP_GLOBAL(leagcy_system, "legacy_system");
-    PW_METRIC(leagcy_system, foo, "foo",0.2f);
-    PW_METRIC(leagcy_system, bar, "bar",44000u);
+      // No need to coordinate collection of this group; it's globally registered.
+      PW_METRIC_GROUP_GLOBAL(leagcy_system, "legacy_system");
+      PW_METRIC(leagcy_system, foo, "foo",0.2f);
+      PW_METRIC(leagcy_system, bar, "bar",44000u);
 
-  .. attention::
-
-    Do not create ``PW_METRIC_GROUP_GLOBAL`` instances anywhere other than
-    global scope. Putting these on an instance (member context) would lead to
-    dangling pointers and misery. Metrics are never deleted or unregistered!
+   .. attention::
+      Do not create ``PW_METRIC_GROUP_GLOBAL`` instances anywhere other than
+      global scope. Putting these on an instance (member context) would lead to
+      dangling pointers and misery. Metrics are never deleted or unregistered!
 
 ----------------------
 Usage & Best Practices
@@ -413,62 +409,62 @@
 scope, then interacted with via tasks or threads. For example, consider a
 hypothetical global ``Uart`` object:
 
-.. code::
+.. code-block::
 
-  class Uart {
-   public:
-    Uart(span<std::byte> rx_buffer, span<std::byte> tx_buffer)
-      : rx_buffer_(rx_buffer), tx_buffer_(tx_buffer) {}
+   class Uart {
+    public:
+     Uart(span<std::byte> rx_buffer, span<std::byte> tx_buffer)
+       : rx_buffer_(rx_buffer), tx_buffer_(tx_buffer) {}
 
-    // Send/receive here...
+     // Send/receive here...
 
-   private:
-    pw::span<std::byte> rx_buffer;
-    pw::span<std::byte> tx_buffer;
-  };
+    private:
+     pw::span<std::byte> rx_buffer;
+     pw::span<std::byte> tx_buffer;
+   };
 
-  std::array<std::byte, 512> uart_rx_buffer;
-  std::array<std::byte, 512> uart_tx_buffer;
-  Uart uart1(uart_rx_buffer, uart_tx_buffer);
+   std::array<std::byte, 512> uart_rx_buffer;
+   std::array<std::byte, 512> uart_tx_buffer;
+   Uart uart1(uart_rx_buffer, uart_tx_buffer);
 
 Through the course of building a product, the team may want to add metrics to
 the UART to for example gain insight into which operations are triggering lots
 of data transfer. When adding metrics to the above imaginary UART object, one
 might consider the following approach:
 
-.. code::
+.. code-block::
 
-  class Uart {
-   public:
-    Uart(span<std::byte> rx_buffer,
-         span<std::byte> tx_buffer,
-         Group& parent_metrics)
-      : rx_buffer_(rx_buffer),
-        tx_buffer_(tx_buffer) {
-        // PROBLEM! parent_metrics may not be constructed if it's a reference
-        // to a static global.
-        parent_metrics.Add(tx_bytes_);
-        parent_metrics.Add(rx_bytes_);
-     }
+   class Uart {
+    public:
+     Uart(span<std::byte> rx_buffer,
+          span<std::byte> tx_buffer,
+          Group& parent_metrics)
+       : rx_buffer_(rx_buffer),
+         tx_buffer_(tx_buffer) {
+         // PROBLEM! parent_metrics may not be constructed if it's a reference
+         // to a static global.
+         parent_metrics.Add(tx_bytes_);
+         parent_metrics.Add(rx_bytes_);
+      }
 
-    // Send/receive here which increment tx/rx_bytes.
+     // Send/receive here which increment tx/rx_bytes.
 
-   private:
-    pw::span<std::byte> rx_buffer;
-    pw::span<std::byte> tx_buffer;
+    private:
+     pw::span<std::byte> rx_buffer;
+     pw::span<std::byte> tx_buffer;
 
-    PW_METRIC(tx_bytes_, "tx_bytes", 0);
-    PW_METRIC(rx_bytes_, "rx_bytes", 0);
-  };
+     PW_METRIC(tx_bytes_, "tx_bytes", 0);
+     PW_METRIC(rx_bytes_, "rx_bytes", 0);
+   };
 
-  PW_METRIC_GROUP(global_metrics, "/");
-  PW_METRIC_GROUP(global_metrics, uart1_metrics, "uart1");
+   PW_METRIC_GROUP(global_metrics, "/");
+   PW_METRIC_GROUP(global_metrics, uart1_metrics, "uart1");
 
-  std::array<std::byte, 512> uart_rx_buffer;
-  std::array<std::byte, 512> uart_tx_buffer;
-  Uart uart1(uart_rx_buffer,
-             uart_tx_buffer,
-             uart1_metrics);
+   std::array<std::byte, 512> uart_rx_buffer;
+   std::array<std::byte, 512> uart_tx_buffer;
+   Uart uart1(uart_rx_buffer,
+              uart_tx_buffer,
+              uart1_metrics);
 
 However, this **is incorrect**, since the ``parent_metrics`` (pointing to
 ``uart1_metrics`` in this case) may not be constructed at the point of
@@ -484,50 +480,49 @@
 which is called after all static constructors have run. This approach works
 correctly, even when the objects are allocated globally:
 
-.. code::
+.. code-block::
 
-  class Uart {
-   public:
-    // Note that metrics is not passed in here at all.
-    Uart(span<std::byte> rx_buffer,
-         span<std::byte> tx_buffer)
-      : rx_buffer_(rx_buffer),
-        tx_buffer_(tx_buffer) {}
+   class Uart {
+    public:
+     // Note that metrics is not passed in here at all.
+     Uart(span<std::byte> rx_buffer,
+          span<std::byte> tx_buffer)
+       : rx_buffer_(rx_buffer),
+         tx_buffer_(tx_buffer) {}
 
-     // Precondition: parent_metrics is already constructed.
-     void Init(Group& parent_metrics) {
-        parent_metrics.Add(tx_bytes_);
-        parent_metrics.Add(rx_bytes_);
-     }
+      // Precondition: parent_metrics is already constructed.
+      void Init(Group& parent_metrics) {
+         parent_metrics.Add(tx_bytes_);
+         parent_metrics.Add(rx_bytes_);
+      }
 
-    // Send/receive here which increment tx/rx_bytes.
+     // Send/receive here which increment tx/rx_bytes.
 
-   private:
-    pw::span<std::byte> rx_buffer;
-    pw::span<std::byte> tx_buffer;
+    private:
+     pw::span<std::byte> rx_buffer;
+     pw::span<std::byte> tx_buffer;
 
-    PW_METRIC(tx_bytes_, "tx_bytes", 0);
-    PW_METRIC(rx_bytes_, "rx_bytes", 0);
-  };
+     PW_METRIC(tx_bytes_, "tx_bytes", 0);
+     PW_METRIC(rx_bytes_, "rx_bytes", 0);
+   };
 
-  PW_METRIC_GROUP(root_metrics, "/");
-  PW_METRIC_GROUP(root_metrics, uart1_metrics, "uart1");
+   PW_METRIC_GROUP(root_metrics, "/");
+   PW_METRIC_GROUP(root_metrics, uart1_metrics, "uart1");
 
-  std::array<std::byte, 512> uart_rx_buffer;
-  std::array<std::byte, 512> uart_tx_buffer;
-  Uart uart1(uart_rx_buffer,
-             uart_tx_buffer);
+   std::array<std::byte, 512> uart_rx_buffer;
+   std::array<std::byte, 512> uart_tx_buffer;
+   Uart uart1(uart_rx_buffer,
+              uart_tx_buffer);
 
-  void main() {
-    // uart1_metrics is guaranteed to be initialized by this point, so it is
-    safe to pass it to Init().
-    uart1.Init(uart1_metrics);
-  }
+   void main() {
+     // uart1_metrics is guaranteed to be initialized by this point, so it is
+     safe to pass it to Init().
+     uart1.Init(uart1_metrics);
+   }
 
 .. attention::
-
-  Be extra careful about **static global metric registration**. Consider using
-  the ``Init()`` pattern.
+   Be extra careful about **static global metric registration**. Consider using
+   the ``Init()`` pattern.
 
 Metric member order matters in objects
 --------------------------------------
@@ -535,42 +530,42 @@
 within a group declared inside the class. For example, the following class will
 work fine:
 
-.. code::
+.. code-block::
 
-  #include "pw_metric/metric.h"
+   #include "pw_metric/metric.h"
 
-  class PowerSubsystem {
-   public:
-     Group& metrics() { return metrics_; }
-     const Group& metrics() const { return metrics_; }
+   class PowerSubsystem {
+    public:
+      Group& metrics() { return metrics_; }
+      const Group& metrics() const { return metrics_; }
 
-   private:
-    PW_METRIC_GROUP(metrics_, "power");  // Note metrics_ declared first.
-    PW_METRIC(metrics_, foo, "foo", 0.2f);
-    PW_METRIC(metrics_, bar, "bar", 44000u);
-  };
+    private:
+     PW_METRIC_GROUP(metrics_, "power");  // Note metrics_ declared first.
+     PW_METRIC(metrics_, foo, "foo", 0.2f);
+     PW_METRIC(metrics_, bar, "bar", 44000u);
+   };
 
 but the following one will not since the group is constructed after the metrics
 (and will result in a compile error):
 
-.. code::
+.. code-block::
 
-  #include "pw_metric/metric.h"
+   #include "pw_metric/metric.h"
 
-  class PowerSubsystem {
-   public:
-     Group& metrics() { return metrics_; }
-     const Group& metrics() const { return metrics_; }
+   class PowerSubsystem {
+    public:
+      Group& metrics() { return metrics_; }
+      const Group& metrics() const { return metrics_; }
 
-   private:
-    PW_METRIC(metrics_, foo, "foo", 0.2f);
-    PW_METRIC(metrics_, bar, "bar", 44000u);
-    PW_METRIC_GROUP(metrics_, "power");  // Error: metrics_ must be first.
-  };
+    private:
+     PW_METRIC(metrics_, foo, "foo", 0.2f);
+     PW_METRIC(metrics_, bar, "bar", 44000u);
+     PW_METRIC_GROUP(metrics_, "power");  // Error: metrics_ must be first.
+   };
 
 .. attention::
 
-  Put **groups before metrics** when declaring metrics members inside classes.
+   Put **groups before metrics** when declaring metrics members inside classes.
 
 Thread safety
 -------------
@@ -586,9 +581,9 @@
 
 .. attention::
 
-  **You must synchronize access to metrics**. ``pw_metrics`` does not
-  internally synchronize access during construction. Metric Set/Increment are
-  safe.
+   **You must synchronize access to metrics**. ``pw_metrics`` does not
+   internally synchronize access during construction. Metric Set/Increment are
+   safe.
 
 Lifecycle
 ---------
@@ -607,26 +602,25 @@
 
 Below is an example that **is incorrect**. Don't do what follows!
 
-.. code::
+.. code-block::
 
-  #include "pw_metric/metric.h"
+   #include "pw_metric/metric.h"
 
-  void main() {
-    PW_METRIC_GROUP(root, "/");
-    {
-      // BAD! The metrics have a different lifetime than the group.
-      PW_METRIC(root, temperature, "temperature_f", 72.3f);
-      PW_METRIC(root, humidity, "humidity_relative_percent", 33.2f);
-    }
-    // OOPS! root now has a linked list that points to the destructed
-    // "humidity" object.
-  }
+   void main() {
+     PW_METRIC_GROUP(root, "/");
+     {
+       // BAD! The metrics have a different lifetime than the group.
+       PW_METRIC(root, temperature, "temperature_f", 72.3f);
+       PW_METRIC(root, humidity, "humidity_relative_percent", 33.2f);
+     }
+     // OOPS! root now has a linked list that points to the destructed
+     // "humidity" object.
+   }
 
 .. attention::
-
-  **Don't destruct metrics**. Metrics are designed to be registered /
-  structured upfront, then manipulated during a device's active phase. They do
-  not support destruction.
+   **Don't destruct metrics**. Metrics are designed to be registered /
+   structured upfront, then manipulated during a device's active phase. They do
+   not support destruction.
 
 -----------------
 Exporting metrics
@@ -646,16 +640,16 @@
 The returned metric objects have flattened paths to the root. For example, the
 returned metrics (post detokenization and jsonified) might look something like:
 
-.. code:: none
+.. code-block:: none
 
-  {
-    "/i2c1/failed_txns": 17,
-    "/i2c1/total_txns": 2013,
-    "/i2c1/gyro/resets": 24,
-    "/i2c1/gyro/hangs": 1,
-    "/spi1/thermocouple/reads": 242,
-    "/spi1/thermocouple/temp_celsius": 34.52,
-  }
+   {
+     "/i2c1/failed_txns": 17,
+     "/i2c1/total_txns": 2013,
+     "/i2c1/gyro/resets": 24,
+     "/i2c1/gyro/hangs": 1,
+     "/spi1/thermocouple/reads": 242,
+     "/spi1/thermocouple/temp_celsius": 34.52,
+   }
 
 Note that there is no nesting of the groups; the nesting is implied from the
 path.
@@ -672,7 +666,7 @@
 
 For example:
 
-.. code::
+.. code-block::
 
    #include "pw_rpc/server.h"
    #include "pw_metric/metric.h"
@@ -705,25 +699,23 @@
    }
 
 .. attention::
-
-  Take care when exporting metrics. Ensure **appropriate access control** is in
-  place. In some cases it may make sense to entirely disable metrics export for
-  production builds. Although reading metrics via RPC won't influence the
-  device, in some cases the metrics could expose sensitive information if
-  product owners are not careful.
+   Take care when exporting metrics. Ensure **appropriate access control** is in
+   place. In some cases it may make sense to entirely disable metrics export for
+   production builds. Although reading metrics via RPC won't influence the
+   device, in some cases the metrics could expose sensitive information if
+   product owners are not careful.
 
 .. attention::
+   **MetricService::Get is a synchronous RPC method**
 
-  **MetricService::Get is a synchronous RPC method**
+   Calls to is ``MetricService::Get`` are blocking and will send all metrics
+   immediately, even though it is a server-streaming RPC. This will work fine if
+   the device doesn't have too many metrics, or doesn't have concurrent RPCs
+   like logging, but could be a problem in some cases.
 
-  Calls to is ``MetricService::Get`` are blocking and will send all metrics
-  immediately, even though it is a server-streaming RPC. This will work fine if
-  the device doesn't have too many metrics, or doesn't have concurrent RPCs
-  like logging, but could be a problem in some cases.
-
-  We plan to offer an async version where the application is responsible for
-  pumping the metrics into the streaming response. This gives flow control to
-  the application.
+   We plan to offer an async version where the application is responsible for
+   pumping the metrics into the streaming response. This gives flow control to
+   the application.
 
 -----------
 Size report
@@ -734,10 +726,9 @@
 .. include:: metric_size_report
 
 .. attention::
-
-  At time of writing, **the above sizes show an unexpectedly large flash
-  impact**. We are investigating why GCC is inserting large global static
-  constructors per group, when all the logic should be reused across objects.
+   At time of writing, **the above sizes show an unexpectedly large flash
+   impact**. We are investigating why GCC is inserting large global static
+   constructors per group, when all the logic should be reused across objects.
 
 -------------
 Metric Parser
diff --git a/pw_package/docs.rst b/pw_package/docs.rst
index a7cb2c9..8d2e579 100644
--- a/pw_package/docs.rst
+++ b/pw_package/docs.rst
@@ -142,3 +142,4 @@
        }
      }
    }
+
diff --git a/pw_package/py/BUILD.gn b/pw_package/py/BUILD.gn
index a00e460..6d61a00 100644
--- a/pw_package/py/BUILD.gn
+++ b/pw_package/py/BUILD.gn
@@ -29,6 +29,7 @@
     "pw_package/packages/abseil_cpp.py",
     "pw_package/packages/arduino_core.py",
     "pw_package/packages/boringssl.py",
+    "pw_package/packages/chre.py",
     "pw_package/packages/chromium_verifier.py",
     "pw_package/packages/crlset.py",
     "pw_package/packages/emboss.py",
diff --git a/pw_package/py/pw_package/packages/arduino_core.py b/pw_package/py/pw_package/packages/arduino_core.py
index ecbbfe4..f3fcdf7 100644
--- a/pw_package/py/pw_package/packages/arduino_core.py
+++ b/pw_package/py/pw_package/packages/arduino_core.py
@@ -41,53 +41,61 @@
         """Check for arduino core availability in pigweed_internal cipd."""
         package_path = path.parent.resolve()
         core_name = self.name
-        core_cache_path = package_path / ".cache" / core_name
+        core_cache_path = package_path / '.cache' / core_name
         core_cache_path.mkdir(parents=True, exist_ok=True)
 
-        cipd_package_subpath = "pigweed_internal/third_party/"
+        cipd_package_subpath = 'pigweed_internal/third_party/'
         cipd_package_subpath += core_name
-        cipd_package_subpath += "/${platform}"
+        cipd_package_subpath += '/${platform}'
 
         # Check if teensy cipd package is readable
-
         with tempfile.NamedTemporaryFile(
-            prefix='cipd', delete=True
+            prefix='cipd', delete=True, dir=core_cache_path
         ) as temp_json:
+            temp_json_path = Path(temp_json.name)
             cipd_acl_check_command = [
-                "cipd",
-                "acl-check",
+                'cipd',
+                'acl-check',
                 cipd_package_subpath,
-                "-reader",
-                "-json-output",
-                temp_json.name,
+                '-reader',
+                '-json-output',
+                str(temp_json_path),
             ]
             subprocess.run(cipd_acl_check_command, capture_output=True)
-            # Return if no packages are readable.
-            if not json.load(temp_json)['result']:
+
+            # Return if cipd_package_subpath does not exist or is not readable
+            # by the current user.
+            if not temp_json_path.is_file():
+                # Return and proceed with normal installation.
+                return
+            result_text = temp_json_path.read_text()
+            result_dict = json.loads(result_text)
+            if 'result' not in result_dict:
+                # Return and proceed with normal installation.
                 return
 
         def _run_command(command):
-            _LOG.debug("Running: `%s`", " ".join(command))
+            _LOG.debug('Running: `%s`', ' '.join(command))
             result = subprocess.run(command, capture_output=True)
             _LOG.debug(
-                "Output:\n%s", result.stdout.decode() + result.stderr.decode()
+                'Output:\n%s', result.stdout.decode() + result.stderr.decode()
             )
 
-        _run_command(["cipd", "init", "-force", core_cache_path.as_posix()])
+        _run_command(['cipd', 'init', '-force', core_cache_path.as_posix()])
         _run_command(
             [
-                "cipd",
-                "install",
+                'cipd',
+                'install',
                 cipd_package_subpath,
-                "-root",
+                '-root',
                 core_cache_path.as_posix(),
-                "-force",
+                '-force',
             ]
         )
 
         _LOG.debug(
-            "Available Cache Files:\n%s",
-            "\n".join([p.as_posix() for p in core_cache_path.glob("*")]),
+            'Available Cache Files:\n%s',
+            '\n'.join([p.as_posix() for p in core_cache_path.glob('*')]),
         )
 
     def install(self, path: Path) -> None:
@@ -118,7 +126,7 @@
         for hardware_dir in [
             path for path in (path / 'hardware').iterdir() if path.is_dir()
         ]:
-            if path.name in ["arduino", "tools"]:
+            if path.name in ['arduino', 'tools']:
                 continue
             for subdir in [
                 path for path in hardware_dir.iterdir() if path.is_dir()
@@ -132,7 +140,7 @@
                 f'  pw_arduino_build_PACKAGE_NAME = "{arduino_package_name}"',
                 '  pw_arduino_build_BOARD = "BOARD_NAME"',
             ]
-            message += ["\n".join(message_gn_args)]
+            message += ['\n'.join(message_gn_args)]
             message += [
                 'Where BOARD_NAME is any supported board.',
                 # Have arduino_builder command appear on it's own line.
diff --git a/pw_package/py/pw_package/packages/chre.py b/pw_package/py/pw_package/packages/chre.py
new file mode 100644
index 0000000..111e0c0
--- /dev/null
+++ b/pw_package/py/pw_package/packages/chre.py
@@ -0,0 +1,43 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Install and check status of CHRE."""
+
+import pathlib
+from typing import Sequence
+
+import pw_package.git_repo
+import pw_package.package_manager
+
+
+class Chre(pw_package.git_repo.GitRepo):
+    """Install and check status of CHRE."""
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(
+            *args,
+            name='chre',
+            url='https://android.googlesource.com/platform/system/chre',
+            commit='d768690052557f0d486eea2f9fb2b26a91f59372',
+            **kwargs,
+        )
+
+    def info(self, path: pathlib.Path) -> Sequence[str]:
+        return (
+            f'{self.name} installed in: {path}',
+            "Enable by running 'gn args out' and adding this line:",
+            f'  dir_pw_third_party_chre = "{path}"',
+        )
+
+
+pw_package.package_manager.register(Chre)
diff --git a/pw_package/py/pw_package/packages/picotool.py b/pw_package/py/pw_package/packages/picotool.py
index 7da51ba..94acc55 100644
--- a/pw_package/py/pw_package/packages/picotool.py
+++ b/pw_package/py/pw_package/packages/picotool.py
@@ -46,7 +46,10 @@
 
         self._pico_tool_repo = pw_package.git_repo.GitRepo(
             name='picotool',
-            url='https://github.com/raspberrypi/picotool.git',
+            url=(
+                'https://pigweed.googlesource.com/third_party/'
+                'github/raspberrypi/picotool.git'
+            ),
             commit='f6fe6b7c321a2def8950d2a440335dfba19e2eab',
         )
 
diff --git a/pw_package/py/pw_package/packages/zephyr.py b/pw_package/py/pw_package/packages/zephyr.py
index 02102c2..fcd0e40 100644
--- a/pw_package/py/pw_package/packages/zephyr.py
+++ b/pw_package/py/pw_package/packages/zephyr.py
@@ -34,16 +34,16 @@
     def __init__(self, *args, **kwargs):
         super().__init__(
             *args,
-            name="zephyr",
-            url="https://github.com/zephyrproject-rtos/zephyr",
-            commit="356c8cbe63ae01b3ab438382639d25bb418a0213",  # v3.4 release
+            name='zephyr',
+            url='https://github.com/zephyrproject-rtos/zephyr',
+            commit='356c8cbe63ae01b3ab438382639d25bb418a0213',  # v3.4 release
             **kwargs,
         )
 
     def info(self, path: pathlib.Path) -> Sequence[str]:
         return (
-            f"{self.name} installed in: {path}",
-            "Enable by running 'gn args out' and adding this line:",
+            f'{self.name} installed in: {path}',
+            'Enable by running "gn args out" and adding this line:',
             f'  dir_pw_third_party_zephyr = "{path}"',
         )
 
@@ -51,58 +51,67 @@
     def __populate_download_cache_from_cipd(path: pathlib.Path) -> None:
         """Check for Zephyr SDK in cipd"""
         package_path = path.parent.resolve()
-        core_cache_path = package_path / "zephyr_sdk"
+        core_cache_path = package_path / 'zephyr_sdk'
         core_cache_path.mkdir(parents=True, exist_ok=True)
 
-        cipd_package_subpath = "infra/3pp/tools/zephyr_sdk/${platform}"
+        cipd_package_subpath = 'infra/3pp/tools/zephyr_sdk/${platform}'
 
-        # Check if a teensy cipd package is readable
+        # Check if the zephyr_sdk cipd package is readable
         with tempfile.NamedTemporaryFile(
-            prefix="cipd", delete=True
+            prefix='cipd', delete=True
         ) as temp_json:
+            temp_json_path = pathlib.Path(temp_json.name)
             cipd_acl_check_command = [
-                "cipd",
-                "acl-check",
+                'cipd',
+                'acl-check',
                 cipd_package_subpath,
-                "-reader",
-                "-json-output",
-                temp_json.name,
+                '-reader',
+                '-json-output',
+                str(temp_json_path),
             ]
             subprocess.run(cipd_acl_check_command, capture_output=True)
 
             # Return if no packages are readable.
-            if not json.load(temp_json)["result"]:
-                raise RuntimeError("Failed to verify cipd is readable")
+            if not temp_json_path.is_file():
+                raise RuntimeError(
+                    'Failed to verify zephyr_sdk cipd package is readable.'
+                )
+            result_text = temp_json_path.read_text()
+            result_dict = json.loads(result_text)
+            if 'result' not in result_dict:
+                raise RuntimeError(
+                    'Failed to verify zephyr_sdk cipd package is readable.'
+                )
 
         # Initialize cipd
         subprocess.check_call(
             [
-                "cipd",
-                "init",
-                "-force",
+                'cipd',
+                'init',
+                '-force',
                 str(core_cache_path),
             ]
         )
         # Install the Zephyr SDK
         subprocess.check_call(
             [
-                "cipd",
-                "install",
+                'cipd',
+                'install',
                 cipd_package_subpath,
-                "-root",
+                '-root',
                 str(core_cache_path),
-                "-force",
+                '-force',
             ]
         )
         # Setup Zephyr SDK
-        setup_file = "setup.cmd" if os.name == "nt" else "setup.sh"
+        setup_file = 'setup.cmd' if os.name == 'nt' else 'setup.sh'
         subprocess.check_call(
             [
                 str(core_cache_path / setup_file),
-                "-t",
-                "all",
-                "-c",
-                "-h",
+                '-t',
+                'all',
+                '-c',
+                '-h',
             ]
         )
 
@@ -111,17 +120,17 @@
 
         self.__populate_download_cache_from_cipd(path)
         with importlib.resources.path(
-            pw_env_setup.virtualenv_setup, "constraint.list"
+            pw_env_setup.virtualenv_setup, 'constraint.list'
         ) as constraint:
             subprocess.check_call(
                 [
                     sys.executable,
-                    "-m",
-                    "pip",
-                    "install",
-                    "-r",
-                    f"{path}/scripts/requirements.txt",
-                    "-c",
+                    '-m',
+                    'pip',
+                    'install',
+                    '-r',
+                    f'{path}/scripts/requirements.txt',
+                    '-c',
                     str(constraint),
                 ]
             )
diff --git a/pw_package/py/pw_package/pigweed_packages.py b/pw_package/py/pw_package/pigweed_packages.py
index 814a4ad..50b48eb 100644
--- a/pw_package/py/pw_package/pigweed_packages.py
+++ b/pw_package/py/pw_package/pigweed_packages.py
@@ -38,6 +38,7 @@
 from pw_package.packages import smartfusion_mss
 from pw_package.packages import stm32cube
 from pw_package.packages import zephyr
+from pw_package.packages import chre
 
 # pylint: enable=unused-import
 
diff --git a/pw_perf_test/docs.rst b/pw_perf_test/docs.rst
index 63d348b..5d3fb6e 100644
--- a/pw_perf_test/docs.rst
+++ b/pw_perf_test/docs.rst
@@ -219,17 +219,19 @@
 
 .. code-block::
 
- import("$dir_pw_perf_test/perf_test.gni")
+   import("$dir_pw_perf_test/perf_test.gni")
 
- pw_perf_test("foo_perf_test") {
-   sources = [ "foo_perf_test.cc" ]
- }
+   pw_perf_test("foo_perf_test") {
+     sources = [ "foo_perf_test.cc" ]
+   }
 
 .. note::
    If you use ``pw_watch``, the template is configured to build automatically
    with ``pw_watch``. However you will still need to add your test group to the
    pw_perf_tests group in the top level BUILD.gn.
 
+.. _module-pw_perf_test-pw_perf_test:
+
 pw_perf_test template
 ---------------------
 ``pw_perf_test`` defines a single perf test suite. It creates two sub-targets.
@@ -247,14 +249,14 @@
 
 **Example**
 
-.. code::
+.. code-block::
 
- import("$dir_pw_perf_test/perf_test.gni")
+   import("$dir_pw_perf_test/perf_test.gni")
 
- pw_perf_test("large_test") {
-   sources = [ "large_test.cc" ]
-   enable_if = device_has_1m_flash
- }
+   pw_perf_test("large_test") {
+     sources = [ "large_test.cc" ]
+     enable_if = device_has_1m_flash
+   }
 
 Grouping
 --------
@@ -263,24 +265,24 @@
 
 **Example**
 
-.. code::
+.. code-block::
 
-  import("$dir_pw_perf_test/perf_test.gni")
+   import("$dir_pw_perf_test/perf_test.gni")
 
-  pw_perf_test("foo_test") {
-    sources = [ "foo.cc" ]
-  }
+   pw_perf_test("foo_test") {
+     sources = [ "foo.cc" ]
+   }
 
-  pw_perf_test("bar_test") {
-    sources = [ "bar.cc" ]
-  }
+   pw_perf_test("bar_test") {
+     sources = [ "bar.cc" ]
+   }
 
-  group("my_perf_tests_collection") {
-    deps = [
-      ":foo_test",
-      ":bar_test",
-    ]
-  }
+   group("my_perf_tests_collection") {
+     deps = [
+       ":foo_test",
+       ":bar_test",
+     ]
+   }
 
 Running
 -------
@@ -306,17 +308,17 @@
 
 **Example**
 
-.. code::
+.. code-block::
 
-  load(
-    "//pw_build:pigweed.bzl",
-    "pw_cc_test",
-  )
+   load(
+     "//pw_build:pigweed.bzl",
+     "pw_cc_test",
+   )
 
-  pw_cc_perf_test(
-    name = "foo_test",
-    srcs = ["foo_perf_test.cc"],
-  )
+   pw_cc_perf_test(
+     name = "foo_test",
+     srcs = ["foo_perf_test.cc"],
+   )
 
 Running
 -------
diff --git a/pw_persistent_ram/BUILD.bazel b/pw_persistent_ram/BUILD.bazel
index e714450..76d986f 100644
--- a/pw_persistent_ram/BUILD.bazel
+++ b/pw_persistent_ram/BUILD.bazel
@@ -38,6 +38,18 @@
     ],
 )
 
+pw_cc_library(
+    name = "flat_file_system_entry",
+    hdrs = [
+        "public/pw_persistent_ram/flat_file_system_entry.h",
+    ],
+    includes = ["public"],
+    deps = [
+        "//pw_file:flat_file_system",
+        "//pw_persistent_ram",
+    ],
+)
+
 pw_cc_test(
     name = "persistent_test",
     srcs = [
@@ -65,3 +77,13 @@
         "//pw_unit_test",
     ],
 )
+
+pw_cc_test(
+    name = "flat_file_system_entry_test",
+    srcs = [
+        "flat_file_system_entry_test.cc",
+    ],
+    deps = [
+        ":flat_file_system_entry",
+    ],
+)
diff --git a/pw_persistent_ram/BUILD.gn b/pw_persistent_ram/BUILD.gn
index 9930035..4ad32ba 100644
--- a/pw_persistent_ram/BUILD.gn
+++ b/pw_persistent_ram/BUILD.gn
@@ -41,10 +41,23 @@
   ]
 }
 
+pw_source_set("flat_file_system_entry") {
+  public_configs = [ ":public_include_path" ]
+  public = [ "public/pw_persistent_ram/flat_file_system_entry.h" ]
+  sources = []
+  public_deps = [
+    ":pw_persistent_ram",
+    "$dir_pw_file:flat_file_system",
+    dir_pw_status,
+  ]
+  deps = []
+}
+
 pw_test_group("tests") {
   tests = [
     ":persistent_test",
     ":persistent_buffer_test",
+    ":flat_file_system_entry_test",
   ]
 }
 
@@ -64,6 +77,11 @@
   sources = [ "persistent_buffer_test.cc" ]
 }
 
+pw_test("flat_file_system_entry_test") {
+  deps = [ ":flat_file_system_entry" ]
+  sources = [ "flat_file_system_entry_test.cc" ]
+}
+
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
   report_deps = [ ":persistent_size" ]
diff --git a/pw_persistent_ram/CMakeLists.txt b/pw_persistent_ram/CMakeLists.txt
index 59d6994..cc1e008 100644
--- a/pw_persistent_ram/CMakeLists.txt
+++ b/pw_persistent_ram/CMakeLists.txt
@@ -31,6 +31,16 @@
     persistent_buffer.cc
 )
 
+pw_add_library(pw_persistent_ram.flat_file_system_entry INTERFACE
+  HEADERS
+    public/pw_persistent_ram/flat_file_system_entry.h
+  PUBLIC_INCLUDES
+    public
+  PUBLIC_DEPS
+    pw_file.flat_file_system
+    pw_persistent_ram
+)
+
 pw_add_test(pw_persistent_ram.persistent_test
   SOURCES
     persistent_test.cc
@@ -52,3 +62,13 @@
     modules
     pw_persistent_ram
 )
+
+pw_add_test(pw_persistent_ram.flat_file_system_entry_test
+  SOURCES
+    flat_file_system_entry_test.cc
+  PRIVATE_DEPS
+    pw_persistent_ram.flat_file_system_entry
+  GROUPS
+    modules
+    pw_persistent_ram
+)
diff --git a/pw_persistent_ram/flat_file_system_entry_test.cc b/pw_persistent_ram/flat_file_system_entry_test.cc
new file mode 100644
index 0000000..5af6aff
--- /dev/null
+++ b/pw_persistent_ram/flat_file_system_entry_test.cc
@@ -0,0 +1,127 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_persistent_ram/flat_file_system_entry.h"
+
+#include "gtest/gtest.h"
+
+namespace pw::persistent_ram {
+namespace {
+
+class FlatFileSystemPersistentBufferEntryTest : public ::testing::Test {
+ protected:
+  static constexpr uint32_t kBufferSize = 16;
+  static constexpr size_t kMaxFileNameLength = 32;
+
+  FlatFileSystemPersistentBufferEntryTest() {}
+
+  // Emulate invalidation of persistent section(s).
+  void ZeroPersistentMemory() { memset(buffer_, 0, sizeof(buffer_)); }
+
+  PersistentBuffer<kBufferSize>& GetPersistentBuffer() {
+    return *(new (buffer_) PersistentBuffer<kBufferSize>());
+  }
+
+  // Allocate a chunk of aligned storage that can be independently controlled.
+  alignas(PersistentBuffer<kBufferSize>)
+      std::byte buffer_[sizeof(PersistentBuffer<kBufferSize>)];
+};
+
+TEST_F(FlatFileSystemPersistentBufferEntryTest, BasicProperties) {
+  constexpr std::string_view kExpectedFileName("file_1.bin");
+  constexpr file::FlatFileSystemService::Entry::Id kExpectedFileId = 7;
+  constexpr file::FlatFileSystemService::Entry::FilePermissions
+      kExpectedPermissions =
+          file::FlatFileSystemService::Entry::FilePermissions::READ;
+
+  ZeroPersistentMemory();
+  auto& persistent = GetPersistentBuffer();
+
+  // write some data to create the file
+  constexpr uint32_t kExpectedNumber = 0x6C2C6582;
+  auto writer = persistent.GetWriter();
+  ASSERT_EQ(OkStatus(), writer.Write(as_bytes(span(&kExpectedNumber, 1))));
+
+  FlatFileSystemPersistentBufferEntry persistent_file(
+      kExpectedFileName, kExpectedFileId, kExpectedPermissions, persistent);
+
+  std::array<char, kMaxFileNameLength> tmp_buffer = {};
+  static_assert(kExpectedFileName.size() <= tmp_buffer.size());
+  StatusWithSize sws = persistent_file.Name(tmp_buffer);
+  ASSERT_EQ(OkStatus(), sws.status());
+
+  EXPECT_EQ(
+      0, std::memcmp(tmp_buffer.data(), kExpectedFileName.data(), sws.size()));
+  EXPECT_EQ(sizeof(kExpectedNumber), persistent_file.SizeBytes());
+  EXPECT_EQ(kExpectedPermissions, persistent_file.Permissions());
+  EXPECT_EQ(kExpectedFileId, persistent_file.FileId());
+}
+
+TEST_F(FlatFileSystemPersistentBufferEntryTest, Delete) {
+  constexpr std::string_view kExpectedFileName("file_2.bin");
+  constexpr file::FlatFileSystemService::Entry::Id kExpectedFileId = 8;
+  constexpr file::FlatFileSystemService::Entry::FilePermissions
+      kExpectedPermissions =
+          file::FlatFileSystemService::Entry::FilePermissions::WRITE;
+
+  ZeroPersistentMemory();
+  auto& persistent = GetPersistentBuffer();
+
+  // write some data to create the file
+  constexpr uint32_t kExpectedNumber = 0x6C2C6582;
+  auto writer = persistent.GetWriter();
+  ASSERT_EQ(OkStatus(), writer.Write(as_bytes(span(&kExpectedNumber, 1))));
+
+  FlatFileSystemPersistentBufferEntry persistent_file(
+      kExpectedFileName, kExpectedFileId, kExpectedPermissions, persistent);
+
+  std::array<char, kMaxFileNameLength> tmp_buffer = {};
+  static_assert(kExpectedFileName.size() <= tmp_buffer.size());
+  StatusWithSize sws = persistent_file.Name(tmp_buffer);
+  ASSERT_EQ(OkStatus(), sws.status());
+
+  EXPECT_EQ(
+      0, std::memcmp(tmp_buffer.data(), kExpectedFileName.data(), sws.size()));
+  EXPECT_EQ(sizeof(kExpectedNumber), persistent_file.SizeBytes());
+
+  ASSERT_EQ(OkStatus(), persistent_file.Delete());
+
+  sws = persistent_file.Name(tmp_buffer);
+  ASSERT_EQ(Status::NotFound(), sws.status());
+  EXPECT_EQ(0u, persistent_file.SizeBytes());
+}
+
+TEST_F(FlatFileSystemPersistentBufferEntryTest, NoData) {
+  constexpr std::string_view kExpectedFileName("file_2.bin");
+  constexpr file::FlatFileSystemService::Entry::Id kExpectedFileId = 9;
+  constexpr file::FlatFileSystemService::Entry::FilePermissions
+      kExpectedPermissions =
+          file::FlatFileSystemService::Entry::FilePermissions::READ_AND_WRITE;
+
+  ZeroPersistentMemory();
+  auto& persistent = GetPersistentBuffer();
+
+  FlatFileSystemPersistentBufferEntry persistent_file(
+      kExpectedFileName, kExpectedFileId, kExpectedPermissions, persistent);
+
+  std::array<char, kMaxFileNameLength> tmp_buffer = {};
+  static_assert(kExpectedFileName.size() <= tmp_buffer.size());
+
+  StatusWithSize sws = persistent_file.Name(tmp_buffer);
+  ASSERT_EQ(Status::NotFound(), sws.status());
+  EXPECT_EQ(0u, persistent_file.SizeBytes());
+}
+
+}  // namespace
+}  // namespace pw::persistent_ram
diff --git a/pw_persistent_ram/public/pw_persistent_ram/flat_file_system_entry.h b/pw_persistent_ram/public/pw_persistent_ram/flat_file_system_entry.h
new file mode 100644
index 0000000..4d0e040
--- /dev/null
+++ b/pw_persistent_ram/public/pw_persistent_ram/flat_file_system_entry.h
@@ -0,0 +1,73 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#pragma once
+
+#include "pw_file/flat_file_system.h"
+#include "pw_persistent_ram/persistent_buffer.h"
+
+namespace pw::persistent_ram {
+
+template <size_t kMaxSizeBytes>
+class FlatFileSystemPersistentBufferEntry final
+    : public file::FlatFileSystemService::Entry {
+ public:
+  FlatFileSystemPersistentBufferEntry(
+      std::string_view file_name,
+      file::FlatFileSystemService::Entry::Id file_id,
+      file::FlatFileSystemService::Entry::FilePermissions permissions,
+      PersistentBuffer<kMaxSizeBytes>& persistent_buffer)
+      : file_name_(file_name),
+        file_id_(file_id),
+        permissions_(permissions),
+        persistent_buffer_(persistent_buffer) {}
+
+  StatusWithSize Name(span<char> dest) final {
+    if (file_name_.empty() || !persistent_buffer_.has_value()) {
+      return StatusWithSize(Status::NotFound(), 0);
+    }
+
+    size_t bytes_to_copy = std::min(dest.size_bytes(), file_name_.size());
+    std::memcpy(dest.data(), file_name_.data(), bytes_to_copy);
+    if (bytes_to_copy != file_name_.size()) {
+      return StatusWithSize(Status::ResourceExhausted(), bytes_to_copy);
+    }
+
+    return StatusWithSize(OkStatus(), bytes_to_copy);
+  }
+
+  size_t SizeBytes() final { return persistent_buffer_.size(); }
+
+  Status Delete() final {
+    persistent_buffer_.clear();
+    return pw::OkStatus();
+  }
+
+  file::FlatFileSystemService::Entry::FilePermissions Permissions()
+      const final {
+    return permissions_;
+  }
+
+  file::FlatFileSystemService::Entry::Id FileId() const final {
+    return file_id_;
+  }
+
+ private:
+  const std::string_view file_name_;
+  const file::FlatFileSystemService::Entry::Id file_id_;
+  const file::FlatFileSystemService::Entry::FilePermissions permissions_;
+  PersistentBuffer<kMaxSizeBytes>& persistent_buffer_;
+};
+
+}  // namespace pw::persistent_ram
diff --git a/pw_presubmit/py/BUILD.gn b/pw_presubmit/py/BUILD.gn
index 2b452c3..c00935d 100644
--- a/pw_presubmit/py/BUILD.gn
+++ b/pw_presubmit/py/BUILD.gn
@@ -43,6 +43,7 @@
     "pw_presubmit/presubmit.py",
     "pw_presubmit/presubmit_context.py",
     "pw_presubmit/python_checks.py",
+    "pw_presubmit/rst_format.py",
     "pw_presubmit/shell_checks.py",
     "pw_presubmit/source_in_build.py",
     "pw_presubmit/todo_check.py",
diff --git a/pw_presubmit/py/pw_presubmit/format_code.py b/pw_presubmit/py/pw_presubmit/format_code.py
index d773b4e..03bfbc5 100755
--- a/pw_presubmit/py/pw_presubmit/format_code.py
+++ b/pw_presubmit/py/pw_presubmit/format_code.py
@@ -64,7 +64,14 @@
     owners_checks,
     presubmit_context,
 )
-from pw_presubmit.tools import exclude_paths, file_summary, log_run, plural
+from pw_presubmit.tools import (
+    exclude_paths,
+    file_summary,
+    log_run,
+    plural,
+    colorize_diff,
+)
+from pw_presubmit.rst_format import reformat_rst
 
 _LOG: logging.Logger = logging.getLogger(__name__)
 _COLOR = pw_cli.color.colors()
@@ -73,26 +80,6 @@
 _Context = Union[PresubmitContext, FormatContext]
 
 
-def colorize_diff_line(line: str) -> str:
-    if line.startswith('--- ') or line.startswith('+++ '):
-        return _COLOR.bold_white(line)
-    if line.startswith('-'):
-        return _COLOR.red(line)
-    if line.startswith('+'):
-        return _COLOR.green(line)
-    if line.startswith('@@ '):
-        return _COLOR.cyan(line)
-    return line
-
-
-def colorize_diff(lines: Iterable[str]) -> str:
-    """Takes a diff str or list of str lines and returns a colorized version."""
-    if isinstance(lines, str):
-        lines = lines.splitlines(True)
-
-    return ''.join(colorize_diff_line(line) for line in lines)
-
-
 def _diff(path, original: bytes, formatted: bytes) -> str:
     return ''.join(
         difflib.unified_diff(
@@ -461,6 +448,24 @@
     return {}
 
 
+def rst_format_check(ctx: _Context) -> Dict[Path, str]:
+    errors = {}
+    for path in ctx.paths:
+        result = reformat_rst(path, diff=True, in_place=False)
+        if result:
+            errors[path] = ''.join(result)
+    return errors
+
+
+def rst_format_fix(ctx: _Context) -> Dict[Path, str]:
+    errors = {}
+    for path in ctx.paths:
+        result = reformat_rst(path, diff=True, in_place=True)
+        if result:
+            errors[path] = ''.join(result)
+    return errors
+
+
 def print_format_check(
     errors: Dict[Path, str],
     show_fix_commands: bool,
@@ -595,8 +600,8 @@
 RST_FORMAT: CodeFormat = CodeFormat(
     'reStructuredText',
     FileFilter(endswith=['.rst']),
-    check_trailing_space,
-    fix_trailing_space,
+    rst_format_check,
+    rst_format_fix,
 )
 
 MARKDOWN_FORMAT: CodeFormat = CodeFormat(
diff --git a/pw_presubmit/py/pw_presubmit/keep_sorted.py b/pw_presubmit/py/pw_presubmit/keep_sorted.py
index 667d707..33ed26f 100644
--- a/pw_presubmit/py/pw_presubmit/keep_sorted.py
+++ b/pw_presubmit/py/pw_presubmit/keep_sorted.py
@@ -34,11 +34,10 @@
 )
 
 import pw_cli
-from . import cli, format_code, git_repo, presubmit, presubmit_context, tools
+from . import cli, git_repo, presubmit, presubmit_context, tools
 
 DEFAULT_PATH = Path('out', 'presubmit', 'keep_sorted')
 
-_COLOR = pw_cli.color.colors()
 _LOG: logging.Logger = logging.getLogger(__name__)
 
 # Ignore a whole section. Please do not change the order of these lines.
@@ -390,7 +389,7 @@
             )
 
             outs.write(diff)
-            print(format_code.colorize_diff(diff))
+            print(tools.colorize_diff(diff))
 
     return errors
 
diff --git a/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py b/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
index e04c7f7..dde7a59 100755
--- a/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
+++ b/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
@@ -153,7 +153,6 @@
         'python.tests',
         'python.lint',
         'docs',
-        'fuzzers',
         'pigweed_pypi_distribution',
     ]
 
@@ -250,6 +249,19 @@
     ),
 )
 
+gn_chre_build = build.GnGenNinja(
+    name='gn_chre_build',
+    path_filter=_BUILD_FILE_FILTER,
+    packages=('chre',),
+    gn_args=dict(
+        dir_pw_third_party_chre=lambda ctx: '"{}"'.format(
+            ctx.package_root / 'chre'
+        ),
+        pw_C_OPTIMIZATION_LEVELS=_OPTIMIZATION_LEVELS,
+    ),
+    ninja_targets=(*_at_all_optimization_levels('host_clang'),),
+)
+
 gn_emboss_nanopb_build = build.GnGenNinja(
     name='gn_emboss_nanopb_build',
     path_filter=_BUILD_FILE_FILTER,
@@ -469,7 +481,7 @@
             ctx.root / 'third_party/googletest'
         ),
     },
-    ninja_targets=('host_clang_fuzz',),
+    ninja_targets=('fuzzers',),
     ninja_contexts=(
         lambda ctx: build.modified_env(
             FUZZTEST_PRNG_SEED=build.fuzztest_prng_seed(ctx),
@@ -477,6 +489,15 @@
     ),
 )
 
+oss_fuzz_build = build.GnGenNinja(
+    name='oss_fuzz_build',
+    path_filter=_BUILD_FILE_FILTER,
+    gn_args={
+        'pw_toolchain_OSS_FUZZ_ENABLED': True,
+    },
+    ninja_targets=('oss_fuzz',),
+)
+
 
 def _env_with_zephyr_vars(ctx: PresubmitContext) -> dict:
     """Returns the environment variables with ... set for Zephyr."""
@@ -676,6 +697,8 @@
         '//pw_sync/...',
         '//pw_thread/...',
         '//pw_thread_freertos/...',
+        '//pw_interrupt/...',
+        '//pw_cpu_exception/...',
     )
 
 
@@ -1140,6 +1163,8 @@
     cmake_clang,
     cmake_gcc,
     coverage,
+    # TODO(b/234876100): Remove once msan is added to all_sanitizers().
+    cpp_checks.msan,
     docs_build,
     gitmodules.create(gitmodules.Config(allow_submodules=False)),
     gn_clang_build,
@@ -1171,6 +1196,7 @@
 # program block CQ on Linux.
 MISC = (
     # keep-sorted: start
+    gn_chre_build,
     gn_emboss_nanopb_build,
     gn_googletest_build,
     # keep-sorted: end
@@ -1184,6 +1210,7 @@
     gn_crypto_micro_ecc_build,
     gn_fuzz_build,
     gn_software_update_build,
+    oss_fuzz_build,
     # keep-sorted: end
 )
 
@@ -1236,8 +1263,8 @@
     _LINTFORMAT,
     gn_combined_build_check,
     gn_host_tools,
-    bazel_test if sys.platform == 'linux' else (),
-    bazel_build if sys.platform == 'linux' else (),
+    bazel_test,
+    bazel_build,
     python_checks.gn_python_check,
     python_checks.gn_python_test_coverage,
     python_checks.check_upstream_python_constraints,
diff --git a/pw_presubmit/py/pw_presubmit/presubmit_context.py b/pw_presubmit/py/pw_presubmit/presubmit_context.py
index 9e7b1da..fcc67cf 100644
--- a/pw_presubmit/py/pw_presubmit/presubmit_context.py
+++ b/pw_presubmit/py/pw_presubmit/presubmit_context.py
@@ -92,7 +92,7 @@
 @dataclasses.dataclass
 class LuciPipeline:
     round: int
-    builds_from_previous_iteration: Sequence[str]
+    builds_from_previous_iteration: Sequence[int]
 
     @staticmethod
     def create(
@@ -115,7 +115,7 @@
         return LuciPipeline(
             round=int(pipeline_props['round']),
             builds_from_previous_iteration=list(
-                pipeline_props['builds_from_previous_iteration']
+                int(x) for x in pipeline_props['builds_from_previous_iteration']
             ),
         )
 
diff --git a/pw_presubmit/py/pw_presubmit/python_checks.py b/pw_presubmit/py/pw_presubmit/python_checks.py
index 79e631b..a03578d 100644
--- a/pw_presubmit/py/pw_presubmit/python_checks.py
+++ b/pw_presubmit/py/pw_presubmit/python_checks.py
@@ -28,7 +28,6 @@
 
 from pw_env_setup import python_packages
 
-from pw_presubmit.format_code import colorize_diff_line
 from pw_presubmit.presubmit import (
     call,
     Check,
@@ -39,7 +38,7 @@
     PresubmitFailure,
 )
 from pw_presubmit import build
-from pw_presubmit.tools import log_run
+from pw_presubmit.tools import log_run, colorize_diff_line
 
 _LOG = logging.getLogger(__name__)
 
@@ -218,7 +217,18 @@
     output_text = output_text.replace(str(ctx.output_dir), '')
     output_text = output_text.replace(str(ctx.root), '')
     output_text = output_text.replace(str(output_file.parent), '')
-    output_file.write_text(output_text)
+
+    final_output_text = ''
+    for line in output_text.splitlines(keepends=True):
+        # Remove --find-links lines
+        if line.startswith('--find-links'):
+            continue
+        # Remove blank lines
+        if line == '\n':
+            continue
+        final_output_text += line
+
+    output_file.write_text(final_output_text)
 
 
 def _update_upstream_python_constraints(
diff --git a/pw_presubmit/py/pw_presubmit/rst_format.py b/pw_presubmit/py/pw_presubmit/rst_format.py
new file mode 100644
index 0000000..ea5d309
--- /dev/null
+++ b/pw_presubmit/py/pw_presubmit/rst_format.py
@@ -0,0 +1,246 @@
+#!/usr/bin/env python3
+
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Restructured Text Formatting."""
+
+import argparse
+from dataclasses import dataclass, field
+import difflib
+from functools import cached_property
+from pathlib import Path
+import textwrap
+from typing import List, Optional
+
+from pw_presubmit.tools import colorize_diff
+
+DEFAULT_TAB_WIDTH = 8
+CODE_BLOCK_INDENTATION = 3
+
+
+def _parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description=__doc__)
+
+    parser.add_argument(
+        '--tab-width',
+        default=DEFAULT_TAB_WIDTH,
+        help='Number of spaces to use when converting tab characters.',
+    )
+    parser.add_argument(
+        '--diff',
+        action='store_true',
+        help='Print a diff of formatting changes.',
+    )
+    parser.add_argument(
+        '-i',
+        '--in-place',
+        action='store_true',
+        help='Replace existing file with the reformatted copy.',
+    )
+    parser.add_argument(
+        'rst_files',
+        nargs='+',
+        default=[],
+        type=Path,
+        help='Paths to rst files.',
+    )
+
+    return parser.parse_args()
+
+
+def _indent_amount(line: str) -> int:
+    return len(line) - len(line.lstrip())
+
+
+def _reindent(input_text: str, amount: int) -> str:
+    text = ''
+    for line in textwrap.dedent(input_text).splitlines():
+        if len(line.strip()) == 0:
+            text += '\n'
+            continue
+        text += ' ' * amount
+        text += line
+        text += '\n'
+    return text
+
+
+def _strip_trailing_whitespace(line: str) -> str:
+    return line.rstrip() + '\n'
+
+
+@dataclass
+class CodeBlock:
+    """Store a single code block."""
+
+    directive_lineno: int
+    directive_line: str
+    first_line_indent: Optional[int] = None
+    end_lineno: Optional[int] = None
+    option_lines: List[str] = field(default_factory=list)
+    code_lines: List[str] = field(default_factory=list)
+
+    def __post_init__(self) -> None:
+        self._blank_line_after_options_found = False
+
+    def finished(self) -> bool:
+        return self.end_lineno is not None
+
+    def append_line(self, index: int, line: str) -> None:
+        """Process a line for this code block."""
+        # Check if outside the code block (indentation is less):
+        if (
+            self.first_line_indent is not None
+            and len(line.strip()) > 0
+            and _indent_amount(line) < self.first_line_indent
+        ):
+            # Code block ended
+            self.end_lineno = index
+            return
+
+        # If first line indent hasn't been found
+        if self.first_line_indent is None:
+            # Check if the first word is a directive option.
+            # E.g. :caption:
+            line_words = line.split()
+            if (
+                line_words
+                and line_words[0].startswith(':')
+                and line_words[0].endswith(':')
+            ):
+                self.option_lines.append(line.rstrip())
+                return
+            # Check for a blank line
+            if len(line.strip()) == 0:
+                if (
+                    self.option_lines
+                    and not self._blank_line_after_options_found
+                ):
+                    self._blank_line_after_options_found = True
+                return
+            # Check for a line that is a continuation of a previous option.
+            if self.option_lines and not self._blank_line_after_options_found:
+                self.option_lines.append(line.rstrip())
+                return
+
+            self.first_line_indent = _indent_amount(line)
+
+        # Save this line as code.
+        self.code_lines.append(line.rstrip())
+
+    @cached_property
+    def directive_indent_amount(self) -> int:
+        return _indent_amount(self.directive_line)
+
+    def options_block_text(self) -> str:
+        return _reindent(
+            input_text='\n'.join(self.option_lines),
+            amount=self.directive_indent_amount + CODE_BLOCK_INDENTATION,
+        )
+
+    def code_block_text(self) -> str:
+        return _reindent(
+            input_text='\n'.join(self.code_lines),
+            amount=self.directive_indent_amount + CODE_BLOCK_INDENTATION,
+        )
+
+    def to_text(self) -> str:
+        text = ''
+        text += self.directive_line
+        if self.option_lines:
+            text += self.options_block_text()
+        text += '\n'
+        text += self.code_block_text()
+        text += '\n'
+        return text
+
+    def __repr__(self) -> str:
+        return self.to_text()
+
+
+def reindent_code_blocks(in_text: str) -> str:
+    """Reindent code blocks to 3 spaces."""
+    out_text = ''
+    current_block: Optional[CodeBlock] = None
+    for index, line in enumerate(in_text.splitlines(keepends=True)):
+        # If a code block is active, process this line.
+        if current_block:
+            current_block.append_line(index, line)
+            if current_block.finished():
+                out_text += current_block.to_text()
+                # This line wasn't part of the code block, process as normal.
+                out_text += _strip_trailing_whitespace(line)
+                # Erase this code_block variable
+                current_block = None
+        # Check for new code block start
+        elif line.lstrip().startswith('.. code') and line.rstrip().endswith(
+            '::'
+        ):
+            current_block = CodeBlock(
+                directive_lineno=index, directive_line=line
+            )
+            continue
+        else:
+            out_text += _strip_trailing_whitespace(line)
+    # If the document ends with a code block it may still need to be written.
+    if current_block is not None:
+        out_text += current_block.to_text()
+    return out_text
+
+
+def reformat_rst(
+    file_name: Path,
+    diff: bool = False,
+    in_place: bool = False,
+    tab_width: int = DEFAULT_TAB_WIDTH,
+) -> List[str]:
+    """Reformat an rst file.
+
+    Returns a list of diff lines."""
+    in_text = file_name.read_text()
+
+    # Replace tabs with spaces
+    out_text = in_text.replace('\t', ' ' * tab_width)
+
+    # Indent .. code-block:: directives.
+    out_text = reindent_code_blocks(in_text)
+
+    result_diff = list(
+        difflib.unified_diff(
+            in_text.splitlines(True),
+            out_text.splitlines(True),
+            f'{file_name}  (original)',
+            f'{file_name}  (reformatted)',
+        )
+    )
+    if diff and result_diff:
+        print(''.join(colorize_diff(result_diff)))
+
+    if in_place:
+        file_name.write_text(out_text)
+
+    return result_diff
+
+
+def rst_format_main(
+    rst_files: List[Path],
+    diff: bool = False,
+    in_place: bool = False,
+    tab_width: int = DEFAULT_TAB_WIDTH,
+) -> None:
+    for rst_file in rst_files:
+        reformat_rst(rst_file, diff, in_place, tab_width)
+
+
+if __name__ == '__main__':
+    rst_format_main(**vars(_parse_args()))
diff --git a/pw_presubmit/py/pw_presubmit/tools.py b/pw_presubmit/py/pw_presubmit/tools.py
index c32780d..9e1319e 100644
--- a/pw_presubmit/py/pw_presubmit/tools.py
+++ b/pw_presubmit/py/pw_presubmit/tools.py
@@ -32,9 +32,31 @@
     Tuple,
 )
 
+import pw_cli.color
 from pw_presubmit.presubmit_context import PRESUBMIT_CONTEXT
 
 _LOG: logging.Logger = logging.getLogger(__name__)
+_COLOR = pw_cli.color.colors()
+
+
+def colorize_diff_line(line: str) -> str:
+    if line.startswith('--- ') or line.startswith('+++ '):
+        return _COLOR.bold_white(line)
+    if line.startswith('-'):
+        return _COLOR.red(line)
+    if line.startswith('+'):
+        return _COLOR.green(line)
+    if line.startswith('@@ '):
+        return _COLOR.cyan(line)
+    return line
+
+
+def colorize_diff(lines: Iterable[str]) -> str:
+    """Takes a diff str or list of str lines and returns a colorized version."""
+    if isinstance(lines, str):
+        lines = lines.splitlines(True)
+
+    return ''.join(colorize_diff_line(line) for line in lines)
 
 
 def plural(
diff --git a/pw_protobuf/BUILD.bazel b/pw_protobuf/BUILD.bazel
index b308ae6..030d960 100644
--- a/pw_protobuf/BUILD.bazel
+++ b/pw_protobuf/BUILD.bazel
@@ -12,6 +12,7 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
 load("@rules_proto//proto:defs.bzl", "proto_library")
 load(
     "//pw_build:pigweed.bzl",
@@ -20,7 +21,6 @@
     "pw_cc_test",
 )
 load("//pw_fuzzer:fuzzer.bzl", "pw_cc_fuzz_test")
-load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
 load("//pw_protobuf_compiler:pw_proto_library.bzl", "pw_proto_filegroup", "pw_proto_library")
 
 package(default_visibility = ["//visibility:public"])
diff --git a/pw_protobuf/BUILD.gn b/pw_protobuf/BUILD.gn
index a6ae1f8..d01dfdc 100644
--- a/pw_protobuf/BUILD.gn
+++ b/pw_protobuf/BUILD.gn
@@ -120,8 +120,6 @@
     ":codegen_message_test",
     ":decoder_test",
     ":encoder_test",
-    ":encoder_fuzzer_test",
-    ":decoder_fuzzer_test",
     ":find_test",
     ":map_utils_test",
     ":message_test",
@@ -129,10 +127,11 @@
     ":stream_decoder_test",
     ":varint_size_test",
   ]
+  group_deps = [ ":fuzzers" ]
 }
 
-group("fuzzers") {
-  deps = [
+pw_fuzzer_group("fuzzers") {
+  fuzzers = [
     ":decoder_fuzzer",
     ":encoder_fuzzer",
   ]
diff --git a/pw_protobuf/codegen_message_test.cc b/pw_protobuf/codegen_message_test.cc
index 38d3378..cf987c6 100644
--- a/pw_protobuf/codegen_message_test.cc
+++ b/pw_protobuf/codegen_message_test.cc
@@ -2052,5 +2052,30 @@
   }
 }
 
+TEST(CodegenMessage, MaxSize) {
+  // Verify constants generated from max_size options in full_test.options
+  static_assert(Pigweed::kErrorMessageMaxSize == 64);
+  static_assert(Pigweed::kDataMaxSize == 8);
+
+  Pigweed::Message size_message;
+  EXPECT_EQ(size_message.error_message.max_size(),
+            Pigweed::kErrorMessageMaxSize);
+  EXPECT_EQ(size_message.data.max_size(), Pigweed::kDataMaxSize);
+
+  // Verify constants generated from max_count options in repeated.options
+  static_assert(RepeatedTest::kUint32sMaxSize == 8);
+  static_assert(RepeatedTest::kFixed32sMaxSize == 8);
+  static_assert(RepeatedTest::kDoublesMaxSize == 2);
+  static_assert(RepeatedTest::kUint64sMaxSize == 4);
+  static_assert(RepeatedTest::kEnumsMaxSize == 4);
+
+  RepeatedTest::Message count_message;
+  EXPECT_EQ(count_message.uint32s.max_size(), RepeatedTest::kUint32sMaxSize);
+  EXPECT_EQ(count_message.fixed32s.max_size(), RepeatedTest::kFixed32sMaxSize);
+  EXPECT_EQ(count_message.doubles.max_size(), RepeatedTest::kDoublesMaxSize);
+  EXPECT_EQ(count_message.uint64s.max_size(), RepeatedTest::kUint64sMaxSize);
+  EXPECT_EQ(count_message.enums.max_size(), RepeatedTest::kEnumsMaxSize);
+}
+
 }  // namespace
 }  // namespace pw::protobuf
diff --git a/pw_protobuf/docs.rst b/pw_protobuf/docs.rst
index 774cc54..52ad7f2 100644
--- a/pw_protobuf/docs.rst
+++ b/pw_protobuf/docs.rst
@@ -8,9 +8,9 @@
 
 .. note::
 
-  The protobuf module is a work in progress. Wire format encoding and decoding
-  is supported, though the APIs are not final. C++ code generation exists for
-  encoding and decoding, but not yet optimized for in-memory decoding.
+   The protobuf module is a work in progress. Wire format encoding and decoding
+   is supported, though the APIs are not final. C++ code generation exists for
+   encoding and decoding, but not yet optimized for in-memory decoding.
 
 --------
 Overview
@@ -23,9 +23,9 @@
 The API is designed in three layers, which can be freely intermixed with each
 other in your code, depending on point of use requirements:
 
- 1. Message Structures,
- 2. Per-Field Writers and Readers,
- 3. Direct Writers and Readers.
+1. Message Structures,
+2. Per-Field Writers and Readers,
+3. Direct Writers and Readers.
 
 This has a few benefits. The primary one is that it allows the core proto
 serialization and deserialization libraries to be relatively small.
@@ -35,29 +35,29 @@
 To demonstrate these layers, we use the following protobuf message definition
 in the examples:
 
-.. code::
+.. code-block:: protobuf
 
-  message Customer {
-    enum Status {
-      NEW = 1;
-      ACTIVE = 2;
-      INACTIVE = 3;
-    }
-    int32 age = 1;
-    string name = 2;
-    Status status = 3;
-  }
+   message Customer {
+     enum Status {
+       NEW = 1;
+       ACTIVE = 2;
+       INACTIVE = 3;
+     }
+     int32 age = 1;
+     string name = 2;
+     Status status = 3;
+   }
 
 And the following accompanying options file:
 
-.. code::
+.. code-block:: text
 
-  Customer.name max_size:32
+   Customer.name max_size:32
 
 .. toctree::
-  :maxdepth: 1
+   :maxdepth: 1
 
-  size_report
+   size_report
 
 Message Structures
 ==================
@@ -66,50 +66,50 @@
 
 This results in the following generated structure:
 
-.. code:: c++
+.. code-block:: c++
 
-  enum class Customer::Status : uint32_t {
-    NEW = 1,
-    ACTIVE = 2,
-    INACTIVE = 3,
+   enum class Customer::Status : uint32_t {
+     NEW = 1,
+     ACTIVE = 2,
+     INACTIVE = 3,
 
-    kNew = NEW,
-    kActive = ACTIVE,
-    kInactive = INACTIVE,
-  };
+     kNew = NEW,
+     kActive = ACTIVE,
+     kInactive = INACTIVE,
+   };
 
-  struct Customer::Message {
-    int32_t age;
-    pw::InlineString<32> name;
-    Customer::Status status;
-  };
+   struct Customer::Message {
+     int32_t age;
+     pw::InlineString<32> name;
+     Customer::Status status;
+   };
 
 Which can be encoded with the code:
 
-.. code:: c++
+.. code-block:: c++
 
-  #include "example_protos/customer.pwpb.h"
+   #include "example_protos/customer.pwpb.h"
 
-  pw::Status EncodeCustomer(Customer::StreamEncoder& encoder) {
-    return encoder.Write({
-      age = 33,
-      name = "Joe Bloggs",
-      status = Customer::Status::INACTIVE
-    });
-  }
+   pw::Status EncodeCustomer(Customer::StreamEncoder& encoder) {
+     return encoder.Write({
+       age = 33,
+       name = "Joe Bloggs",
+       status = Customer::Status::INACTIVE
+     });
+   }
 
 And decoded into a struct with the code:
 
-.. code:: c++
+.. code-block:: c++
 
-  #include "example_protos/customer.pwpb.h"
+   #include "example_protos/customer.pwpb.h"
 
-  pw::Status DecodeCustomer(Customer::StreamDecoder& decoder) {
-    Customer::Message customer{};
-    PW_TRY(decoder.Read(customer));
-    // Read fields from customer
-    return pw::OkStatus();
-  }
+   pw::Status DecodeCustomer(Customer::StreamDecoder& decoder) {
+     Customer::Message customer{};
+     PW_TRY(decoder.Read(customer));
+     // Read fields from customer
+     return pw::OkStatus();
+   }
 
 These structures can be moved, copied, and compared with each other for
 equality.
@@ -130,28 +130,28 @@
 
 .. code-block:: c++
 
-  template <typename Message>
-  constexpr bool IsTriviallyComparable<Message>();
+   template <typename Message>
+   constexpr bool IsTriviallyComparable<Message>();
 
 For example, given the following protobuf definitions:
 
-.. code-block::
+.. code-block:: protobuf
 
-  message Point {
-    int32 x = 1;
-    int32 y = 2;
-  }
+   message Point {
+     int32 x = 1;
+     int32 y = 2;
+   }
 
-  message Label {
-    Point point = 1;
-    string label = 2;
-  }
+   message Label {
+     Point point = 1;
+     string label = 2;
+   }
 
 And the accompanying options file:
 
-.. code-block::
+.. code-block:: text
 
-  Label.label use_callback:true
+   Label.label use_callback:true
 
 The ``Point`` message can be fully compared for equality, but ``Label`` cannot.
 ``Label`` still defines an ``operator==``, but it ignores the ``label`` string.
@@ -172,22 +172,22 @@
 constant that represents the maximum encoded size of the protobuf message,
 excluding the contents of any field values which require a callback.
 
-.. code:: c++
+.. code-block:: c++
 
-  #include "example_protos/customer.pwpb.h"
+   #include "example_protos/customer.pwpb.h"
 
-  std::byte buffer[Customer::kMaxEncodedSizeBytes];
-  Customer::MemoryEncoder encoder(buffer);
-  const auto status = encoder.Write({
-    age = 22,
-    name = "Wolfgang Bjornson",
-    status = Customer::Status::ACTIVE
-  });
+   std::byte buffer[Customer::kMaxEncodedSizeBytes];
+   Customer::MemoryEncoder encoder(buffer);
+   const auto status = encoder.Write({
+     age = 22,
+     name = "Wolfgang Bjornson",
+     status = Customer::Status::ACTIVE
+   });
 
-  // Always check the encoder status or return values from Write calls.
-  if (!status.ok()) {
-    PW_LOG_INFO("Failed to encode proto; %s", encoder.status().str());
-  }
+   // Always check the encoder status or return values from Write calls.
+   if (!status.ok()) {
+     PW_LOG_INFO("Failed to encode proto; %s", encoder.status().str());
+   }
 
 In the above example, because the ``name`` field has a ``max_size`` specified
 in the accompanying options file, ``kMaxEncodedSizeBytes`` includes the maximum
@@ -202,54 +202,54 @@
 but is known to your code (``kMaxImageDataSize`` in this example being a
 constant in your own code), you can simply add it to the generated constant:
 
-.. code:: c++
+.. code-block:: c++
 
-  #include "example_protos/store.pwpb.h"
+   #include "example_protos/store.pwpb.h"
 
-  const std::byte image_data[kMaxImageDataSize] = { ... };
+   const std::byte image_data[kMaxImageDataSize] = { ... };
 
-  Store::Message store{};
-  // Calling SetEncoder means we must always extend the buffer size.
-  store.image_data.SetEncoder([](Store::StreamEncoder& encoder) {
-    return encoder.WriteImageData(image_data);
-  });
+   Store::Message store{};
+   // Calling SetEncoder means we must always extend the buffer size.
+   store.image_data.SetEncoder([](Store::StreamEncoder& encoder) {
+     return encoder.WriteImageData(image_data);
+   });
 
-  std::byte buffer[Store::kMaxEncodedSizeBytes + kMaxImageDataSize];
-  Store::MemoryEncoder encoder(buffer);
-  const auto status = encoder.Write(store);
+   std::byte buffer[Store::kMaxEncodedSizeBytes + kMaxImageDataSize];
+   Store::MemoryEncoder encoder(buffer);
+   const auto status = encoder.Write(store);
 
-  // Always check the encoder status or return values from Write calls.
-  if (!status.ok()) {
-    PW_LOG_INFO("Failed to encode proto; %s", encoder.status().str());
-  }
+   // Always check the encoder status or return values from Write calls.
+   if (!status.ok()) {
+     PW_LOG_INFO("Failed to encode proto; %s", encoder.status().str());
+   }
 
 Or when using a variable number of repeated submessages, where the maximum
 number is known to your code but not to the proto, you can add the constants
 from one message type to another:
 
-.. code:: c++
+.. code-block:: c++
 
-  #include "example_protos/person.pwpb.h"
+   #include "example_protos/person.pwpb.h"
 
-  Person::Message grandchild{};
-  // Calling SetEncoder means we must always extend the buffer size.
-  grandchild.grandparent.SetEncoder([](Person::StreamEncoder& encoder) {
-    PW_TRY(encoder.GetGrandparentEncoder().Write(maternal_grandma));
-    PW_TRY(encoder.GetGrandparentEncoder().Write(maternal_grandpa));
-    PW_TRY(encoder.GetGrandparentEncoder().Write(paternal_grandma));
-    PW_TRY(encoder.GetGrandparentEncoder().Write(paternal_grandpa));
-    return pw::OkStatus();
-  });
+   Person::Message grandchild{};
+   // Calling SetEncoder means we must always extend the buffer size.
+   grandchild.grandparent.SetEncoder([](Person::StreamEncoder& encoder) {
+     PW_TRY(encoder.GetGrandparentEncoder().Write(maternal_grandma));
+     PW_TRY(encoder.GetGrandparentEncoder().Write(maternal_grandpa));
+     PW_TRY(encoder.GetGrandparentEncoder().Write(paternal_grandma));
+     PW_TRY(encoder.GetGrandparentEncoder().Write(paternal_grandpa));
+     return pw::OkStatus();
+   });
 
-  std::byte buffer[Person::kMaxEncodedSizeBytes +
-                   Grandparent::kMaxEncodedSizeBytes * 4];
-  Person::MemoryEncoder encoder(buffer);
-  const auto status = encoder.Write(grandchild);
+   std::byte buffer[Person::kMaxEncodedSizeBytes +
+                    Grandparent::kMaxEncodedSizeBytes * 4];
+   Person::MemoryEncoder encoder(buffer);
+   const auto status = encoder.Write(grandchild);
 
-  // Always check the encoder status or return values from Write calls.
-  if (!status.ok()) {
-    PW_LOG_INFO("Failed to encode proto; %s", encoder.status().str());
-  }
+   // Always check the encoder status or return values from Write calls.
+   if (!status.ok()) {
+     PW_LOG_INFO("Failed to encode proto; %s", encoder.status().str());
+   }
 
 .. warning::
   Encoding to a buffer that is insufficiently large will return
@@ -273,33 +273,33 @@
 underlying methods with the correct field numbers and value types, and result
 in no additional binary code over correctly using the core implementation.
 
-.. code:: c++
+.. code-block:: c++
 
-  class Customer::StreamEncoder : pw::protobuf::StreamEncoder {
-   public:
-    // Message Structure Writer.
-    pw::Status Write(const Customer::Message&);
+   class Customer::StreamEncoder : pw::protobuf::StreamEncoder {
+    public:
+     // Message Structure Writer.
+     pw::Status Write(const Customer::Message&);
 
-    // Per-Field Typed Writers.
-    pw::Status WriteAge(int32_t);
+     // Per-Field Typed Writers.
+     pw::Status WriteAge(int32_t);
 
-    pw::Status WriteName(std::string_view);
-    pw::Status WriteName(const char*, size_t);
+     pw::Status WriteName(std::string_view);
+     pw::Status WriteName(const char*, size_t);
 
-    pw::Status WriteStatus(Customer::Status);
-  };
+     pw::Status WriteStatus(Customer::Status);
+   };
 
 So the same encoding method could be written as:
 
-.. code:: c++
+.. code-block:: c++
 
-  #include "example_protos/customer.pwpb.h"
+   #include "example_protos/customer.pwpb.h"
 
-  Status EncodeCustomer(Customer::StreamEncoder& encoder) {
-    PW_TRY(encoder.WriteAge(33));
-    PW_TRY(encoder.WriteName("Joe Bloggs"sv));
-    PW_TRY(encoder.WriteStatus(Customer::Status::INACTIVE));
-  }
+   Status EncodeCustomer(Customer::StreamEncoder& encoder) {
+     PW_TRY(encoder.WriteAge(33));
+     PW_TRY(encoder.WriteName("Joe Bloggs"sv));
+     PW_TRY(encoder.WriteStatus(Customer::Status::INACTIVE));
+   }
 
 Pigweed's protobuf encoders encode directly to the wire format of a proto rather
 than staging information to a mutable datastructure. This means any writes of a
@@ -319,69 +319,69 @@
 
 For example:
 
-.. code::
+.. code-block:: protobuf
 
-  // The first half of the overlaid message.
-  message BaseMessage {
-    uint32 length = 1;
-    reserved 2;  // Reserved for Overlay
-  }
+   // The first half of the overlaid message.
+   message BaseMessage {
+     uint32 length = 1;
+     reserved 2;  // Reserved for Overlay
+   }
 
-  // OK: The second half of the overlaid message.
-  message Overlay {
-    reserved 1;  // Reserved for BaseMessage
-    uint32 height = 2;
-  }
+   // OK: The second half of the overlaid message.
+   message Overlay {
+     reserved 1;  // Reserved for BaseMessage
+     uint32 height = 2;
+   }
 
-  // OK: A message that overlays and bundles both types together.
-  message Both {
-    uint32 length = 1;  // Defined independently by BaseMessage
-    uint32 height = 2;  // Defined independently by Overlay
-  }
+   // OK: A message that overlays and bundles both types together.
+   message Both {
+     uint32 length = 1;  // Defined independently by BaseMessage
+     uint32 height = 2;  // Defined independently by Overlay
+   }
 
-  // BAD: Diverges from BaseMessage's definition, and can cause decode
-  // errors/corruption.
-  message InvalidOverlay {
-    fixed32 length = 1;
-  }
+   // BAD: Diverges from BaseMessage's definition, and can cause decode
+   // errors/corruption.
+   message InvalidOverlay {
+     fixed32 length = 1;
+   }
 
 The ``StreamEncoderCast<>()`` helper template reduces very messy casting into
 a much easier to read syntax:
 
-.. code:: c++
+.. code-block:: c++
 
-  #include "pw_protobuf/encoder.h"
-  #include "pw_protobuf_test_protos/full_test.pwpb.h"
+   #include "pw_protobuf/encoder.h"
+   #include "pw_protobuf_test_protos/full_test.pwpb.h"
 
-  Result<ConstByteSpan> EncodeOverlaid(uint32_t height,
-                                       uint32_t length,
-                                       ConstByteSpan encode_buffer) {
-    BaseMessage::MemoryEncoder base(encode_buffer);
+   Result<ConstByteSpan> EncodeOverlaid(uint32_t height,
+                                        uint32_t length,
+                                        ConstByteSpan encode_buffer) {
+     BaseMessage::MemoryEncoder base(encode_buffer);
 
-    // Without StreamEncoderCast<>(), this line would be:
-    //   Overlay::StreamEncoder& overlay =
-    //       *static_cast<Overlay::StreamEncoder*>(
-    //           static_cast<pw::protobuf::StreamEncoder*>(&base)
-    Overlay::StreamEncoder& overlay =
-        StreamEncoderCast<Overlay::StreamEncoder>(base);
-    if (!overlay.WriteHeight(height).ok()) {
-      return overlay.status();
-    }
-    if (!base.WriteLength(length).ok()) {
-      return base.status();
-    }
-    return ConstByteSpan(base);
-  }
+     // Without StreamEncoderCast<>(), this line would be:
+     //   Overlay::StreamEncoder& overlay =
+     //       *static_cast<Overlay::StreamEncoder*>(
+     //           static_cast<pw::protobuf::StreamEncoder*>(&base)
+     Overlay::StreamEncoder& overlay =
+         StreamEncoderCast<Overlay::StreamEncoder>(base);
+     if (!overlay.WriteHeight(height).ok()) {
+       return overlay.status();
+     }
+     if (!base.WriteLength(length).ok()) {
+       return base.status();
+     }
+     return ConstByteSpan(base);
+   }
 
 While this use case is somewhat uncommon, it's a core supported use case of
 pw_protobuf.
 
 .. warning::
 
-  Using this to convert one stream encoder to another when the messages
-  themselves do not safely overlay will result in corrupt protos. Be careful
-  when doing this as there's no compile-time way to detect whether or not two
-  messages are meant to overlay.
+   Using this to convert one stream encoder to another when the messages
+   themselves do not safely overlay will result in corrupt protos. Be careful
+   when doing this as there's no compile-time way to detect whether or not two
+   messages are meant to overlay.
 
 Decoding
 --------
@@ -389,62 +389,62 @@
 structure, the following additional methods are also generated in the typed
 ``StreamDecoder`` class.
 
-.. code:: c++
+.. code-block:: c++
 
-  class Customer::StreamDecoder : pw::protobuf::StreamDecoder {
-   public:
-    // Message Structure Reader.
-    pw::Status Read(Customer::Message&);
+   class Customer::StreamDecoder : pw::protobuf::StreamDecoder {
+    public:
+     // Message Structure Reader.
+     pw::Status Read(Customer::Message&);
 
-    // Returns the identity of the current field.
-    ::pw::Result<Fields> Field();
+     // Returns the identity of the current field.
+     ::pw::Result<Fields> Field();
 
-    // Per-Field Typed Readers.
-    pw::Result<int32_t> ReadAge();
+     // Per-Field Typed Readers.
+     pw::Result<int32_t> ReadAge();
 
-    pw::StatusWithSize ReadName(pw::span<char>);
-    BytesReader GetNameReader(); // Read name as a stream of bytes.
+     pw::StatusWithSize ReadName(pw::span<char>);
+     BytesReader GetNameReader(); // Read name as a stream of bytes.
 
-    pw::Result<Customer::Status> ReadStatus();
-  };
+     pw::Result<Customer::Status> ReadStatus();
+   };
 
 Complete and correct decoding requires looping through the fields, so is more
 complex than encoding or using the message structure.
 
-.. code:: c++
+.. code-block:: c++
 
-  pw::Status DecodeCustomer(Customer::StreamDecoder& decoder) {
-    uint32_t age;
-    char name[32];
-    Customer::Status status;
+   pw::Status DecodeCustomer(Customer::StreamDecoder& decoder) {
+     uint32_t age;
+     char name[32];
+     Customer::Status status;
 
-    while ((status = decoder.Next()).ok()) {
-      switch (decoder.Field().value()) {
-        case Customer::Fields::kAge: {
-          PW_TRY_ASSIGN(age, decoder.ReadAge());
-          break;
-        }
-        case Customer::Fields::kName: {
-          PW_TRY(decoder.ReadName(name));
-          break;
-        }
-        case Customer::Fields::kStatus: {
-          PW_TRY_ASSIGN(status, decoder.ReadStatus());
-          break;
-        }
-      }
-    }
+     while ((status = decoder.Next()).ok()) {
+       switch (decoder.Field().value()) {
+         case Customer::Fields::kAge: {
+           PW_TRY_ASSIGN(age, decoder.ReadAge());
+           break;
+         }
+         case Customer::Fields::kName: {
+           PW_TRY(decoder.ReadName(name));
+           break;
+         }
+         case Customer::Fields::kStatus: {
+           PW_TRY_ASSIGN(status, decoder.ReadStatus());
+           break;
+         }
+       }
+     }
 
-    return status.IsOutOfRange() ? OkStatus() : status;
-  }
+     return status.IsOutOfRange() ? OkStatus() : status;
+   }
 
 .. warning:: ``Fields::SNAKE_CASE`` is deprecated. Use ``Fields::kCamelCase``.
 
-  Transitional support for ``Fields::SNAKE_CASE`` will soon only be available by
-  explicitly setting the following GN variable in your project:
-  ``pw_protobuf_compiler_GENERATE_LEGACY_ENUM_SNAKE_CASE_NAMES=true``
+   Transitional support for ``Fields::SNAKE_CASE`` will soon only be available by
+   explicitly setting the following GN variable in your project:
+   ``pw_protobuf_compiler_GENERATE_LEGACY_ENUM_SNAKE_CASE_NAMES=true``
 
-  This support will be removed after downstream projects have been migrated.
+   This support will be removed after downstream projects have been migrated.
 
 
 Reading a single field
@@ -456,52 +456,52 @@
 
 .. code-block:: c++
 
-  pw::Status ReadCustomerData(pw::ConstByteSpan serialized_customer) {
-    pw::Result<uint32_t> age = Customer::FindAge(serialized_customer);
-    if (!age.ok()) {
-      return age.status();
-    }
+   pw::Status ReadCustomerData(pw::ConstByteSpan serialized_customer) {
+     pw::Result<uint32_t> age = Customer::FindAge(serialized_customer);
+     if (!age.ok()) {
+       return age.status();
+     }
 
-    // This will scan the buffer again from the start, which is less efficient
-    // than writing a custom decoder loop.
-    pw::Result<std::string_view> name = Customer::FindName(serialized_customer);
-    if (!age.ok()) {
-      return age.status();
-    }
+     // This will scan the buffer again from the start, which is less efficient
+     // than writing a custom decoder loop.
+     pw::Result<std::string_view> name = Customer::FindName(serialized_customer);
+     if (!age.ok()) {
+       return age.status();
+     }
 
-    DoStuff(age, name);
-    return pw::OkStatus();
-  }
+     DoStuff(age, name);
+     return pw::OkStatus();
+   }
 
 The ``Find`` APIs also work with streamed data, as shown below.
 
 .. code-block:: c++
 
-  pw::Status ReadCustomerData(pw::stream::Reader& customer_stream) {
-    pw::Result<uint32_t> age = Customer::FindAge(customer_stream);
-    if (!age.ok()) {
-      return age.status();
-    }
+   pw::Status ReadCustomerData(pw::stream::Reader& customer_stream) {
+     pw::Result<uint32_t> age = Customer::FindAge(customer_stream);
+     if (!age.ok()) {
+       return age.status();
+     }
 
-    // This will begin scanning for `name` from the current position of the
-    // stream (following the `age` field). If `name` appeared before `age` in
-    // the serialized data, it will not be found.
-    //
-    // Note that unlike with the buffer APIs, stream Find methods copy `string`
-    // and `bytes` fields into a user-provided buffer.
-    char name[32];
-    pw::StatusWithSize sws = Customer::FindName(serialized_customer, name);
-    if (!sws.ok()) {
-      return sws.status();
-    }
-    if (sws.size() >= sizeof(name)) {
-      return pw::Status::OutOfRange();
-    }
-    name[sws.size()] = '\0';
+     // This will begin scanning for `name` from the current position of the
+     // stream (following the `age` field). If `name` appeared before `age` in
+     // the serialized data, it will not be found.
+     //
+     // Note that unlike with the buffer APIs, stream Find methods copy `string`
+     // and `bytes` fields into a user-provided buffer.
+     char name[32];
+     pw::StatusWithSize sws = Customer::FindName(serialized_customer, name);
+     if (!sws.ok()) {
+       return sws.status();
+     }
+     if (sws.size() >= sizeof(name)) {
+       return pw::Status::OutOfRange();
+     }
+     name[sws.size()] = '\0';
 
-    DoStuff(age, name);
-    return pw::OkStatus();
-  }
+     DoStuff(age, name);
+     return pw::OkStatus();
+   }
 
 .. note::
 
@@ -529,37 +529,37 @@
 To encode the same message we've used in the examples thus far, we would use
 the following parts of the core API:
 
-.. code:: c++
+.. code-block:: c++
 
-  class pw::protobuf::StreamEncoder {
-   public:
-    Status WriteInt32(uint32_t field_number, int32_t);
-    Status WriteUint32(uint32_t field_number, uint32_t);
+   class pw::protobuf::StreamEncoder {
+    public:
+     Status WriteInt32(uint32_t field_number, int32_t);
+     Status WriteUint32(uint32_t field_number, uint32_t);
 
-    Status WriteString(uint32_t field_number, std::string_view);
-    Status WriteString(uint32_t field_number, const char*, size_t);
+     Status WriteString(uint32_t field_number, std::string_view);
+     Status WriteString(uint32_t field_number, const char*, size_t);
 
-    // And many other methods, see pw_protobuf/encoder.h
-  };
+     // And many other methods, see pw_protobuf/encoder.h
+   };
 
 Encoding the same message requires that we specify the field numbers, which we
 can hardcode, or supplement using the C++ code generated ``Fields`` enum, and
 cast the enumerated type.
 
-.. code:: c++
+.. code-block:: c++
 
-  #include "pw_protobuf/encoder.h"
-  #include "example_protos/customer.pwpb.h"
+   #include "pw_protobuf/encoder.h"
+   #include "example_protos/customer.pwpb.h"
 
-  Status EncodeCustomer(pw::protobuf::StreamEncoder& encoder) {
-    PW_TRY(encoder.WriteInt32(static_cast<uint32_t>(Customer::Fields::kAge),
-                              33));
-    PW_TRY(encoder.WriteString(static_cast<uint32_t>(Customer::Fields::kName),
-                               "Joe Bloggs"sv));
-    PW_TRY(encoder.WriteUint32(
-        static_cast<uint32_t>(Customer::Fields::kStatus),
-        static_cast<uint32_t>(Customer::Status::INACTIVE)));
-  }
+   Status EncodeCustomer(pw::protobuf::StreamEncoder& encoder) {
+     PW_TRY(encoder.WriteInt32(static_cast<uint32_t>(Customer::Fields::kAge),
+                               33));
+     PW_TRY(encoder.WriteString(static_cast<uint32_t>(Customer::Fields::kName),
+                                "Joe Bloggs"sv));
+     PW_TRY(encoder.WriteUint32(
+         static_cast<uint32_t>(Customer::Fields::kStatus),
+         static_cast<uint32_t>(Customer::Status::INACTIVE)));
+   }
 
 Decoding
 --------
@@ -568,52 +568,52 @@
 
 To decode the same message we would use the following parts of the core API:
 
-.. code:: c++
+.. code-block:: c++
 
-  class pw::protobuf::StreamDecoder {
-   public:
-    // Returns the identity of the current field.
-    ::pw::Result<uint32_t> FieldNumber();
+   class pw::protobuf::StreamDecoder {
+    public:
+     // Returns the identity of the current field.
+     ::pw::Result<uint32_t> FieldNumber();
 
-    Result<int32_t> ReadInt32();
-    Result<uint32_t> ReadUint32();
+     Result<int32_t> ReadInt32();
+     Result<uint32_t> ReadUint32();
 
-    StatusWithSize ReadString(pw::span<char>);
+     StatusWithSize ReadString(pw::span<char>);
 
-    // And many other methods, see pw_protobuf/stream_decoder.h
-  };
+     // And many other methods, see pw_protobuf/stream_decoder.h
+   };
 
 As with the typed per-field API, complete and correct decoding requires looping
 through the fields and checking the field numbers, along with casting types.
 
-.. code:: c++
+.. code-block:: c++
 
-  pw::Status DecodeCustomer(pw::protobuf::StreamDecoder& decoder) {
-    uint32_t age;
-    char name[32];
-    Customer::Status status;
+   pw::Status DecodeCustomer(pw::protobuf::StreamDecoder& decoder) {
+     uint32_t age;
+     char name[32];
+     Customer::Status status;
 
-    while ((status = decoder.Next()).ok()) {
-      switch (decoder.FieldNumber().value()) {
-        case static_cast<uint32_t>(Customer::Fields::kAge): {
-          PW_TRY_ASSIGN(age, decoder.ReadInt32());
-          break;
-        }
-        case static_cast<uint32_t>(Customer::Fields::kName): {
-          PW_TRY(decoder.ReadString(name));
-          break;
-        }
-        case static_cast<uint32_t>(Customer::Fields::kStatus): {
-          uint32_t status_value;
-          PW_TRY_ASSIGN(status_value, decoder.ReadUint32());
-          status = static_cast<Customer::Status>(status_value);
-          break;
-        }
-      }
-    }
+     while ((status = decoder.Next()).ok()) {
+       switch (decoder.FieldNumber().value()) {
+         case static_cast<uint32_t>(Customer::Fields::kAge): {
+           PW_TRY_ASSIGN(age, decoder.ReadInt32());
+           break;
+         }
+         case static_cast<uint32_t>(Customer::Fields::kName): {
+           PW_TRY(decoder.ReadString(name));
+           break;
+         }
+         case static_cast<uint32_t>(Customer::Fields::kStatus): {
+           uint32_t status_value;
+           PW_TRY_ASSIGN(status_value, decoder.ReadUint32());
+           status = static_cast<Customer::Status>(status_value);
+           break;
+         }
+       }
+     }
 
-    return status.IsOutOfRange() ? OkStatus() : status;
-  }
+     return status.IsOutOfRange() ? OkStatus() : status;
+   }
 
 Find APIs
 ---------
@@ -655,29 +655,29 @@
 
 Example ``BUILD.gn``:
 
-.. code::
+.. code-block::
 
-  import("//build_overrides/pigweed.gni")
+   import("//build_overrides/pigweed.gni")
 
-  import("$dir_pw_build/target_types.gni")
-  import("$dir_pw_protobuf_compiler/proto.gni")
+   import("$dir_pw_build/target_types.gni")
+   import("$dir_pw_protobuf_compiler/proto.gni")
 
-  # This target controls where the *.pwpb.h headers end up on the include path.
-  # In this example, it's at "pet_daycare_protos/client.pwpb.h".
-  pw_proto_library("pet_daycare_protos") {
-    sources = [
-      "pet_daycare_protos/client.proto",
-    ]
-  }
+   # This target controls where the *.pwpb.h headers end up on the include path.
+   # In this example, it's at "pet_daycare_protos/client.pwpb.h".
+   pw_proto_library("pet_daycare_protos") {
+     sources = [
+       "pet_daycare_protos/client.proto",
+     ]
+   }
 
-  pw_source_set("example_client") {
-    sources = [ "example_client.cc" ]
-    deps = [
-      ":pet_daycare_protos.pwpb",
-      dir_pw_bytes,
-      dir_pw_stream,
-    ]
-  }
+   pw_source_set("example_client") {
+     sources = [ "example_client.cc" ]
+     deps = [
+       ":pet_daycare_protos.pwpb",
+       dir_pw_bytes,
+       dir_pw_stream,
+     ]
+   }
 
 -------------
 Configuration
@@ -725,13 +725,13 @@
 host software can use the reflection API to query for the options and validate
 messages comply with the specified limitations.
 
-.. code::
+.. code-block:: text
 
-  import "pw_protobuf_protos/field_options.proto";
+   import "pw_protobuf_protos/field_options.proto";
 
-  message Demo {
-    string size_limited_string = 1 [(pw.protobuf.pwpb).max_size = 16];
-  };
+   message Demo {
+     string size_limited_string = 1 [(pw.protobuf.pwpb).max_size = 16];
+   };
 
 Options Files
 =============
@@ -744,30 +744,30 @@
 
 Example:
 
-.. code::
+.. code-block::
 
-  // Set an option for a specific field.
-  fuzzy_friends.Client.visit_dates max_count:16
+   // Set an option for a specific field.
+   fuzzy_friends.Client.visit_dates max_count:16
 
-  // Set options for multiple fields by wildcard matching.
-  fuzzy_friends.Pet.* max_size:32
+   // Set options for multiple fields by wildcard matching.
+   fuzzy_friends.Pet.* max_size:32
 
-  // Set multiple options in one go.
-  fuzzy_friends.Dog.paws max_count:4 fixed_count:true
+   // Set multiple options in one go.
+   fuzzy_friends.Dog.paws max_count:4 fixed_count:true
 
 Options files should be listed as ``inputs`` when defining ``pw_proto_library``,
 e.g.
 
-.. code::
+.. code-block::
 
-  pw_proto_library("pet_daycare_protos") {
-    sources = [
-      "pet_daycare_protos/client.proto",
-    ]
-    inputs = [
-      "pet_daycare_protos/client.options",
-    ]
-  }
+   pw_proto_library("pet_daycare_protos") {
+     sources = [
+       "pet_daycare_protos/client.proto",
+     ]
+     inputs = [
+       "pet_daycare_protos/client.options",
+     ]
+   }
 
 Valid options are:
 
@@ -820,177 +820,197 @@
 
 * Scalar fields are represented by their appropriate C++ type.
 
-  .. code::
+  .. code-block:: protobuf
 
-    message Customer {
-      int32 age = 1;
-      uint32 birth_year = 2;
-      sint64 rating = 3;
-      bool is_active = 4;
-    }
+     message Customer {
+       int32 age = 1;
+       uint32 birth_year = 2;
+       sint64 rating = 3;
+       bool is_active = 4;
+     }
 
-  .. code:: c++
+  .. code-block:: c++
 
-    struct Customer::Message {
-      int32_t age;
-      uint32_t birth_year;
-      int64_t rating;
-      bool is_active;
-    };
+     struct Customer::Message {
+       int32_t age;
+       uint32_t birth_year;
+       int64_t rating;
+       bool is_active;
+     };
 
 * Enumerations are represented by a code generated namespaced proto enum.
 
-  .. code::
+  .. code-block:: protobuf
 
-    message Award {
-      enum Service {
-        BRONZE = 1;
-        SILVER = 2;
-        GOLD = 3;
-      }
-      Service service = 1;
-    }
+     message Award {
+       enum Service {
+         BRONZE = 1;
+         SILVER = 2;
+         GOLD = 3;
+       }
+       Service service = 1;
+     }
 
-  .. code:: c++
+  .. code-block:: c++
 
-    enum class Award::Service : uint32_t {
-      BRONZE = 1,
-      SILVER = 2,
-      GOLD = 3,
+     enum class Award::Service : uint32_t {
+       BRONZE = 1,
+       SILVER = 2,
+       GOLD = 3,
 
-      kBronze = BRONZE,
-      kSilver = SILVER,
-      kGold = GOLD,
-    };
+       kBronze = BRONZE,
+       kSilver = SILVER,
+       kGold = GOLD,
+     };
 
-    struct Award::Message {
-      Award::Service service;
-    };
+     struct Award::Message {
+       Award::Service service;
+     };
 
   Aliases to the enum values are also included in the "constant" style to match
   your preferred coding style. These aliases have any common prefix to the
   enumeration values removed, such that:
 
-  .. code::
+  .. code-block:: protobuf
 
-    enum Activity {
-      ACTIVITY_CYCLING = 1;
-      ACTIVITY_RUNNING = 2;
-      ACTIVITY_SWIMMING = 3;
-    }
+     enum Activity {
+       ACTIVITY_CYCLING = 1;
+       ACTIVITY_RUNNING = 2;
+       ACTIVITY_SWIMMING = 3;
+     }
 
-  .. code:: c++
+  .. code-block:: c++
 
-    enum class Activity : uint32_t {
-      ACTIVITY_CYCLING = 1,
-      ACTIVITY_RUNNING = 2,
-      ACTIVITY_SWIMMING = 3,
+     enum class Activity : uint32_t {
+       ACTIVITY_CYCLING = 1,
+       ACTIVITY_RUNNING = 2,
+       ACTIVITY_SWIMMING = 3,
 
-      kCycling = ACTIVITY_CYCLING,
-      kRunning = ACTIVITY_RUNNING,
-      kSwimming = ACTIVITY_SWIMMING,
-    };
+       kCycling = ACTIVITY_CYCLING,
+       kRunning = ACTIVITY_RUNNING,
+       kSwimming = ACTIVITY_SWIMMING,
+     };
 
 
 * Nested messages are represented by their own ``struct Message`` provided that
   a reference cycle does not exist.
 
-  .. code::
+  .. code-block:: protobuf
 
-    message Sale {
-      Customer customer = 1;
-      Product product = 2;
-    }
+     message Sale {
+       Customer customer = 1;
+       Product product = 2;
+     }
 
-  .. code:: c++
+  .. code-block:: c++
 
-    struct Sale::Message {
-      Customer::Message customer;
-      Product::Message product;
-    };
+     struct Sale::Message {
+       Customer::Message customer;
+       Product::Message product;
+     };
 
 * Optional scalar fields are represented by the appropriate C++ type wrapped in
   ``std::optional``. Optional fields are not encoded when the value is not
   present.
 
-  .. code::
+  .. code-block:: protobuf
 
-    message Loyalty {
-      optional int32 points = 1;
-    }
+     message Loyalty {
+       optional int32 points = 1;
+     }
 
-  .. code:: c++
+  .. code-block:: c++
 
-    struct Loyalty::Message {
-      std::optional<int32_t> points;
-    };
+     struct Loyalty::Message {
+       std::optional<int32_t> points;
+     };
 
 * Repeated scalar fields are represented by ``pw::Vector`` when the
   ``max_count`` option is set for that field, or by ``std::array`` when both
   ``max_count`` and ``fixed_count:true`` are set.
 
-  .. code::
+  The max count is exposed as an UpperCamelCase constant ``k{FieldName}MaxSize``.
+
+  .. code-block:: protobuf
 
     message Register {
       repeated int32 cash_in = 1;
       repeated int32 cash_out = 2;
     }
 
-  .. code::
+  .. code-block:: text
 
-    Register.cash_in max_count:32 fixed_count:true
-    Register.cash_out max_count:64
+     Register.cash_in max_count:32 fixed_count:true
+     Register.cash_out max_count:64
 
-  .. code:: c++
+  .. code-block:: c++
 
-    struct Register::Message {
-      std::array<int32_t, 32> cash_in;
-      pw::Vector<int32_t, 64> cash_out;
-    };
+     namespace Register {
+       static constexpr size_t kCashInMaxSize = 32;
+       static constexpr size_t kCashOutMaxSize = 64;
+     }
+
+     struct Register::Message {
+       std::array<int32_t, kCashInMaxSize> cash_in;
+       pw::Vector<int32_t, kCashOutMaxSize> cash_out;
+     };
 
 * `bytes` fields are represented by ``pw::Vector`` when the ``max_size`` option
   is set for that field, or by ``std::array`` when both ``max_size`` and
   ``fixed_size:true`` are set.
 
-  .. code::
+  The max size is exposed as an UpperCamelCase constant ``k{FieldName}MaxSize``.
 
-    message Product {
-      bytes sku = 1;
-      bytes serial_number = 2;
-    }
+  .. code-block:: protobuf
 
-  .. code::
+     message Product {
+       bytes sku = 1;
+       bytes serial_number = 2;
+     }
 
-    Product.sku max_size:8 fixed_size:true
-    Product.serial_number max_size:64
+  .. code-block:: text
 
-  .. code:: c++
+     Product.sku max_size:8 fixed_size:true
+     Product.serial_number max_size:64
 
-    struct Product::Message {
-      std::array<std::byte, 8> sku;
-      pw::Vector<std::byte, 64> serial_number;
-    };
+  .. code-block:: c++
+
+     namespace Product {
+       static constexpr size_t kSkuMaxSize = 8;
+       static constexpr size_t kSerialNumberMaxSize = 64;
+     }
+
+     struct Product::Message {
+       std::array<std::byte, kSkuMaxSize> sku;
+       pw::Vector<std::byte, kSerialNumberMaxSize> serial_number;
+     };
 
 * `string` fields are represented by a :cpp:type:`pw::InlineString` when the
   ``max_size`` option is set for that field. The string can hold up to
   ``max_size`` characters, and is always null terminated. The null terminator is
   not counted in ``max_size``.
 
-  .. code::
+  The max size is exposed as an UpperCamelCase constant ``k{FieldName}MaxSize``.
 
-    message Employee {
-      string name = 1;
-    }
+  .. code-block:: protobuf
 
-  .. code::
+     message Employee {
+       string name = 1;
+     }
 
-    Employee.name max_size:128
+  .. code-block:: text
 
-  .. code:: c++
+     Employee.name max_size:128
 
-    struct Employee::Message {
-      pw::InlineString<128> name;
-    };
+  .. code-block:: c++
+
+     namespace Employee {
+       static constexpr size_t kNameMaxSize = 128;
+     }
+
+     struct Employee::Message {
+       pw::InlineString<kNameMaxSize> name;
+     };
 
 * Nested messages with a dependency cycle, repeated scalar fields without a
   ``max_count`` option set, `bytes` and `strings` fields without a ``max_size``
@@ -1000,29 +1020,29 @@
   You set the callback to a custom function for encoding or decoding
   before passing the structure to ``Write()`` or ``Read()`` appropriately.
 
-  .. code::
+  .. code-block:: protobuf
 
-    message Store {
-      Store nearest_store = 1;
-      repeated int32 employee_numbers = 2;
-      string driections = 3;
-      repeated string address = 4;
-      repeated Employee employees = 5;
-    }
+     message Store {
+       Store nearest_store = 1;
+       repeated int32 employee_numbers = 2;
+       string driections = 3;
+       repeated string address = 4;
+       repeated Employee employees = 5;
+     }
 
-  .. code::
+  .. code-block::
 
-    // No options set.
+     // No options set.
 
-  .. code:: c++
+  .. code-block:: c++
 
-    struct Store::Message {
-      pw::protobuf::Callback<Store::StreamEncoder, Store::StreamDecoder> nearest_store;
-      pw::protobuf::Callback<Store::StreamEncoder, Store::StreamDecoder> employee_numbers;
-      pw::protobuf::Callback<Store::StreamEncoder, Store::StreamDecoder> directions;
-      pw::protobuf::Callback<Store::StreamEncoder, Store::StreamDecoder> address;
-      pw::protobuf::Callback<Store::StreamEncoder, Store::StreamDecoder> employees;
-    };
+     struct Store::Message {
+       pw::protobuf::Callback<Store::StreamEncoder, Store::StreamDecoder> nearest_store;
+       pw::protobuf::Callback<Store::StreamEncoder, Store::StreamDecoder> employee_numbers;
+       pw::protobuf::Callback<Store::StreamEncoder, Store::StreamDecoder> directions;
+       pw::protobuf::Callback<Store::StreamEncoder, Store::StreamDecoder> address;
+       pw::protobuf::Callback<Store::StreamEncoder, Store::StreamDecoder> employees;
+     };
 
   A Callback object can be converted to a ``bool`` indicating whether a callback
   is set.
@@ -1043,21 +1063,21 @@
 ``Channel::Message::operator_`` to avoid conflicting with the ``operator``
 keyword.
 
-.. code::
+.. code-block:: protobuf
 
-  message Channel {
-    int32 bitrate = 1;
-    float signal_to_noise_ratio = 2;
-    Company operator = 3;
-  }
+   message Channel {
+     int32 bitrate = 1;
+     float signal_to_noise_ratio = 2;
+     Company operator = 3;
+   }
 
-.. code:: c++
+.. code-block:: c++
 
-  struct Channel::Message {
-    int32_t bitrate;
-    float signal_to_noise_ratio;
-    Company::Message operator_;
-  };
+   struct Channel::Message {
+     int32_t bitrate;
+     float signal_to_noise_ratio;
+     Company::Message operator_;
+   };
 
 Similarly, as shown in the example below, some POSIX-signal names conflict with
 macros defined by the standard-library header ``<csignal>`` and therefore
@@ -1068,136 +1088,135 @@
 naming conflicts caused by user-defined macros are the user's responsibility
 (https://google.github.io/styleguide/cppguide.html#Preprocessor_Macros).
 
-.. code::
+.. code-block:: protobuf
 
-  enum PosixSignal {
-    NONE = 0;
-    SIGHUP = 1;
-    SIGINT = 2;
-    SIGQUIT = 3;
-    SIGILL = 4;
-    SIGTRAP = 5;
-    SIGABRT = 6;
-    SIGFPE = 8;
-    SIGKILL = 9;
-    SIGSEGV = 11;
-    SIGPIPE = 13;
-    SIGALRM = 14;
-    SIGTERM = 15;
-  }
+   enum PosixSignal {
+     NONE = 0;
+     SIGHUP = 1;
+     SIGINT = 2;
+     SIGQUIT = 3;
+     SIGILL = 4;
+     SIGTRAP = 5;
+     SIGABRT = 6;
+     SIGFPE = 8;
+     SIGKILL = 9;
+     SIGSEGV = 11;
+     SIGPIPE = 13;
+     SIGALRM = 14;
+     SIGTERM = 15;
+   }
 
-.. code:: c++
+.. code-block:: c++
 
-  enum class PosixSignal : uint32_t {
-    NONE = 0,
-    SIGHUP = 1,
-    SIGINT_ = 2,
-    SIGQUIT = 3,
-    SIGILL_ = 4,
-    SIGTRAP = 5,
-    SIGABRT_ = 6,
-    SIGFPE_ = 8,
-    SIGKILL = 9,
-    SIGSEGV_ = 11,
-    SIGPIPE = 13,
-    SIGALRM = 14,
-    SIGTERM_ = 15,
+   enum class PosixSignal : uint32_t {
+     NONE = 0,
+     SIGHUP = 1,
+     SIGINT_ = 2,
+     SIGQUIT = 3,
+     SIGILL_ = 4,
+     SIGTRAP = 5,
+     SIGABRT_ = 6,
+     SIGFPE_ = 8,
+     SIGKILL = 9,
+     SIGSEGV_ = 11,
+     SIGPIPE = 13,
+     SIGALRM = 14,
+     SIGTERM_ = 15,
 
-    kNone = NONE,
-    kSighup = SIGHUP,
-    kSigint = SIGINT_,
-    kSigquit = SIGQUIT,
-    kSigill = SIGILL_,
-    kSigtrap = SIGTRAP,
-    kSigabrt = SIGABRT_,
-    kSigfpe = SIGFPE_,
-    kSigkill = SIGKILL,
-    kSigsegv = SIGSEGV_,
-    kSigpipe = SIGPIPE,
-    kSigalrm = SIGALRM,
-    kSigterm = SIGTERM_,
-  };
+     kNone = NONE,
+     kSighup = SIGHUP,
+     kSigint = SIGINT_,
+     kSigquit = SIGQUIT,
+     kSigill = SIGILL_,
+     kSigtrap = SIGTRAP,
+     kSigabrt = SIGABRT_,
+     kSigfpe = SIGFPE_,
+     kSigkill = SIGKILL,
+     kSigsegv = SIGSEGV_,
+     kSigpipe = SIGPIPE,
+     kSigalrm = SIGALRM,
+     kSigterm = SIGTERM_,
+   };
 
 Much like reserved words and macros, the names ``Message`` and ``Fields`` are
 suffixed with underscores in generated C++ code. This is to prevent name
 conflicts with the codegen internals if they're used in a nested context as in
 the example below.
 
-.. code::
+.. code-block:: protobuf
 
-  message Function {
-    message Message {
-      string content = 1;
-    }
+   message Function {
+     message Message {
+       string content = 1;
+     }
 
-    enum Fields {
-      NONE = 0;
-      COMPLEX_NUMBERS = 1;
-      INTEGERS_MOD_5 = 2;
-      MEROMORPHIC_FUNCTIONS_ON_COMPLEX_PLANE = 3;
-      OTHER = 4;
-    }
+     enum Fields {
+       NONE = 0;
+       COMPLEX_NUMBERS = 1;
+       INTEGERS_MOD_5 = 2;
+       MEROMORPHIC_FUNCTIONS_ON_COMPLEX_PLANE = 3;
+       OTHER = 4;
+     }
 
-    Message description = 1;
-    Fields domain = 2;
-    Fields codomain = 3;
-  }
+     Message description = 1;
+     Fields domain = 2;
+     Fields codomain = 3;
+   }
 
-.. code::
+.. code-block::
 
-  Function.Message.content max_size:128
+   Function.Message.content max_size:128
 
-.. code:: c++
+.. code-block:: c++
 
-  struct Function::Message_::Message {
-    pw::InlineString<128> content;
-  };
+   struct Function::Message_::Message {
+     pw::InlineString<128> content;
+   };
 
-  enum class Function::Message_::Fields : uint32_t {
-    CONTENT = 1,
-  };
+   enum class Function::Message_::Fields : uint32_t {
+     CONTENT = 1,
+   };
 
-  enum class Function::Fields_ uint32_t {
-    NONE = 0,
-    COMPLEX_NUMBERS = 1,
-    INTEGERS_MOD_5 = 2,
-    MEROMORPHIC_FUNCTIONS_ON_COMPLEX_PLANE = 3,
-    OTHER = 4,
+   enum class Function::Fields_ uint32_t {
+     NONE = 0,
+     COMPLEX_NUMBERS = 1,
+     INTEGERS_MOD_5 = 2,
+     MEROMORPHIC_FUNCTIONS_ON_COMPLEX_PLANE = 3,
+     OTHER = 4,
 
-    kNone = NONE,
-    kComplexNumbers = COMPLEX_NUMBERS,
-    kIntegersMod5 = INTEGERS_MOD_5,
-    kMeromorphicFunctionsOnComplexPlane =
-        MEROMORPHIC_FUNCTIONS_ON_COMPLEX_PLANE,
-    kOther = OTHER,
-  };
+     kNone = NONE,
+     kComplexNumbers = COMPLEX_NUMBERS,
+     kIntegersMod5 = INTEGERS_MOD_5,
+     kMeromorphicFunctionsOnComplexPlane =
+         MEROMORPHIC_FUNCTIONS_ON_COMPLEX_PLANE,
+     kOther = OTHER,
+   };
 
-  struct Function::Message {
-    Function::Message_::Message description;
-    Function::Fields_ domain;
-    Function::Fields_ codomain;
-  };
+   struct Function::Message {
+     Function::Message_::Message description;
+     Function::Fields_ domain;
+     Function::Fields_ codomain;
+   };
 
-  enum class Function::Fields : uint32_t {
-    DESCRIPTION = 1,
-    DOMAIN = 2,
-    CODOMAIN = 3,
-  };
+   enum class Function::Fields : uint32_t {
+     DESCRIPTION = 1,
+     DOMAIN = 2,
+     CODOMAIN = 3,
+   };
 
 .. warning::
-  Note that the C++ spec also reserves two categories of identifiers for the
-  compiler to use in ways that may conflict with generated code:
+   Note that the C++ spec also reserves two categories of identifiers for the
+   compiler to use in ways that may conflict with generated code:
 
-  * Any identifier that contains two consecutive underscores anywhere in it.
+   * Any identifier that contains two consecutive underscores anywhere in it.
+   * Any identifier that starts with an underscore followed by a capital letter.
 
-  * Any identifier that starts with an underscore followed by a capital letter.
-
-  Appending underscores to symbols in these categories wouldn't change the fact
-  that they match patterns reserved for the compiler, so the codegen does not
-  currently attempt to fix them. Such names will therefore result in
-  non-portable code that may or may not work depending on the compiler. These
-  naming patterns are of course strongly discouraged in any protobufs that will
-  be used with ``pw_protobuf`` codegen.
+   Appending underscores to symbols in these categories wouldn't change the fact
+   that they match patterns reserved for the compiler, so the codegen does not
+   currently attempt to fix them. Such names will therefore result in
+   non-portable code that may or may not work depending on the compiler. These
+   naming patterns are of course strongly discouraged in any protobufs that will
+   be used with ``pw_protobuf`` codegen.
 
 Overhead
 ========
@@ -1215,27 +1234,27 @@
 The simplest way to use ``MemoryEncoder`` to encode a proto is from its code
 generated ``Message`` structure into an in-memory buffer.
 
-.. code:: c++
+.. code-block:: c++
 
-  #include "my_protos/my_proto.pwpb.h"
-  #include "pw_bytes/span.h"
-  #include "pw_protobuf/encoder.h"
-  #include "pw_status/status_with_size.h"
+   #include "my_protos/my_proto.pwpb.h"
+   #include "pw_bytes/span.h"
+   #include "pw_protobuf/encoder.h"
+   #include "pw_status/status_with_size.h"
 
-  // Writes a proto response to the provided buffer, returning the encode
-  // status and number of bytes written.
-  pw::StatusWithSize WriteProtoResponse(pw::ByteSpan response) {
-    MyProto::Message message{}
-    message.magic_number = 0x1a1a2b2b;
-    message.favorite_food = "cookies";
-    message.calories = 600;
+   // Writes a proto response to the provided buffer, returning the encode
+   // status and number of bytes written.
+   pw::StatusWithSize WriteProtoResponse(pw::ByteSpan response) {
+     MyProto::Message message{}
+     message.magic_number = 0x1a1a2b2b;
+     message.favorite_food = "cookies";
+     message.calories = 600;
 
-    // All proto writes are directly written to the `response` buffer.
-    MyProto::MemoryEncoder encoder(response);
-    encoder.Write(message);
+     // All proto writes are directly written to the `response` buffer.
+     MyProto::MemoryEncoder encoder(response);
+     encoder.Write(message);
 
-    return pw::StatusWithSize(encoder.status(), encoder.size());
-  }
+     return pw::StatusWithSize(encoder.status(), encoder.size());
+   }
 
 All fields of a message are written, including those initialized to their
 default values.
@@ -1245,52 +1264,52 @@
 or lower-level APIs. This can be more convenient if finer grained control or
 other custom handling is required.
 
-.. code:: c++
+.. code-block:: c++
 
-  #include "my_protos/my_proto.pwpb.h"
-  #include "pw_bytes/span.h"
-  #include "pw_protobuf/encoder.h"
-  #include "pw_status/status_with_size.h"
+   #include "my_protos/my_proto.pwpb.h"
+   #include "pw_bytes/span.h"
+   #include "pw_protobuf/encoder.h"
+   #include "pw_status/status_with_size.h"
 
-  // Writes a proto response to the provided buffer, returning the encode
-  // status and number of bytes written.
-  pw::StatusWithSize WriteProtoResponse(pw::ByteSpan response) {
-    // All proto writes are directly written to the `response` buffer.
-    MyProto::MemoryEncoder encoder(response);
-    encoder.WriteMagicNumber(0x1a1a2b2b);
-    encoder.WriteFavoriteFood("cookies");
-    // Only conditionally write calories.
-    if (on_diet) {
-      encoder.WriteCalories(600);
-    }
-    return pw::StatusWithSize(encoder.status(), encoder.size());
-  }
+   // Writes a proto response to the provided buffer, returning the encode
+   // status and number of bytes written.
+   pw::StatusWithSize WriteProtoResponse(pw::ByteSpan response) {
+     // All proto writes are directly written to the `response` buffer.
+     MyProto::MemoryEncoder encoder(response);
+     encoder.WriteMagicNumber(0x1a1a2b2b);
+     encoder.WriteFavoriteFood("cookies");
+     // Only conditionally write calories.
+     if (on_diet) {
+       encoder.WriteCalories(600);
+     }
+     return pw::StatusWithSize(encoder.status(), encoder.size());
+   }
 
 StreamEncoder
 =============
 ``StreamEncoder`` is constructed with the destination stream, and a scratch
 buffer used to handle nested submessages.
 
-.. code:: c++
+.. code-block:: c++
 
-  #include "my_protos/my_proto.pwpb.h"
-  #include "pw_bytes/span.h"
-  #include "pw_protobuf/encoder.h"
-  #include "pw_stream/sys_io_stream.h"
+   #include "my_protos/my_proto.pwpb.h"
+   #include "pw_bytes/span.h"
+   #include "pw_protobuf/encoder.h"
+   #include "pw_stream/sys_io_stream.h"
 
-  pw::stream::SysIoWriter sys_io_writer;
-  MyProto::StreamEncoder encoder(sys_io_writer, pw::ByteSpan());
+   pw::stream::SysIoWriter sys_io_writer;
+   MyProto::StreamEncoder encoder(sys_io_writer, pw::ByteSpan());
 
-  // Once this line returns, the field has been written to the Writer.
-  encoder.WriteTimestamp(system::GetUnixEpoch());
+   // Once this line returns, the field has been written to the Writer.
+   encoder.WriteTimestamp(system::GetUnixEpoch());
 
-  // There's no intermediate buffering when writing a string directly to a
-  // StreamEncoder.
-  encoder.WriteWelcomeMessage("Welcome to Pigweed!");
+   // There's no intermediate buffering when writing a string directly to a
+   // StreamEncoder.
+   encoder.WriteWelcomeMessage("Welcome to Pigweed!");
 
-  if (!encoder.status().ok()) {
-    PW_LOG_INFO("Failed to encode proto; %s", encoder.status().str());
-  }
+   if (!encoder.status().ok()) {
+     PW_LOG_INFO("Failed to encode proto; %s", encoder.status().str());
+   }
 
 Callbacks
 =========
@@ -1306,25 +1325,25 @@
 nested submessage (with a dependency cycle, or repeated) can be implemented by
 calling ``Write()`` on a nested encoder.
 
-.. code:: c++
+.. code-block:: c++
 
-    Store::Message store{};
-    store.employees.SetEncoder([](Store::StreamEncoder& encoder) {
-      Employee::Message employee{};
-      // Populate `employee`.
-      return encoder.GetEmployeesEncoder().Write(employee);
-    ));
+   Store::Message store{};
+   store.employees.SetEncoder([](Store::StreamEncoder& encoder) {
+     Employee::Message employee{};
+     // Populate `employee`.
+     return encoder.GetEmployeesEncoder().Write(employee);
+   ));
 
 Nested submessages
 ==================
 Code generated ``GetFieldEncoder`` methods are provided that return a correctly
 typed ``StreamEncoder`` or ``MemoryEncoder`` for the message.
 
-.. code::
+.. code-block:: protobuf
 
-  message Owner {
-    Animal pet = 1;
-  }
+   message Owner {
+     Animal pet = 1;
+   }
 
 Note that the accessor method is named for the field, while the returned encoder
 is named for the message type.
@@ -1341,9 +1360,9 @@
 that nested decoder.)
 
 .. warning::
-  When a nested submessage is created, any use of the parent encoder that
-  created the nested encoder will trigger a crash. To resume using the parent
-  encoder, destroy the submessage encoder first.
+   When a nested submessage is created, any use of the parent encoder that
+   created the nested encoder will trigger a crash. To resume using the parent
+   encoder, destroy the submessage encoder first.
 
 Buffering
 ---------
@@ -1370,40 +1389,40 @@
 also be useful in estimating how much space to allocate to account for nested
 submessage encoding overhead.
 
-.. code:: c++
+.. code-block:: c++
 
-  #include "my_protos/pets.pwpb.h"
-  #include "pw_bytes/span.h"
-  #include "pw_protobuf/encoder.h"
-  #include "pw_stream/sys_io_stream.h"
+   #include "my_protos/pets.pwpb.h"
+   #include "pw_bytes/span.h"
+   #include "pw_protobuf/encoder.h"
+   #include "pw_stream/sys_io_stream.h"
 
-  pw::stream::SysIoWriter sys_io_writer;
-  // The scratch buffer should be at least as big as the largest nested
-  // submessage. It's a good idea to be a little generous.
-  std::byte submessage_scratch_buffer[Owner::kScratchBufferSizeBytes];
+   pw::stream::SysIoWriter sys_io_writer;
+   // The scratch buffer should be at least as big as the largest nested
+   // submessage. It's a good idea to be a little generous.
+   std::byte submessage_scratch_buffer[Owner::kScratchBufferSizeBytes];
 
-  // Provide the scratch buffer to the proto encoder. The buffer's lifetime must
-  // match the lifetime of the encoder.
-  Owner::StreamEncoder owner_encoder(sys_io_writer, submessage_scratch_buffer);
+   // Provide the scratch buffer to the proto encoder. The buffer's lifetime must
+   // match the lifetime of the encoder.
+   Owner::StreamEncoder owner_encoder(sys_io_writer, submessage_scratch_buffer);
 
-  {
-    // Note that the parent encoder, owner_encoder, cannot be used until the
-    // nested encoder, pet_encoder, has been destroyed.
-    Animal::StreamEncoder pet_encoder = owner_encoder.GetPetEncoder();
+   {
+     // Note that the parent encoder, owner_encoder, cannot be used until the
+     // nested encoder, pet_encoder, has been destroyed.
+     Animal::StreamEncoder pet_encoder = owner_encoder.GetPetEncoder();
 
-    // There's intermediate buffering when writing to a nested encoder.
-    pet_encoder.WriteName("Spot");
-    pet_encoder.WriteType(Pet::Type::DOG);
+     // There's intermediate buffering when writing to a nested encoder.
+     pet_encoder.WriteName("Spot");
+     pet_encoder.WriteType(Pet::Type::DOG);
 
-    // When this scope ends, the nested encoder is serialized to the Writer.
-    // In addition, the parent encoder, owner_encoder, can be used again.
-  }
+     // When this scope ends, the nested encoder is serialized to the Writer.
+     // In addition, the parent encoder, owner_encoder, can be used again.
+   }
 
-  // If an encode error occurs when encoding the nested messages, it will be
-  // reflected at the root encoder.
-  if (!owner_encoder.status().ok()) {
-    PW_LOG_INFO("Failed to encode proto; %s", owner_encoder.status().str());
-  }
+   // If an encode error occurs when encoding the nested messages, it will be
+   // reflected at the root encoder.
+   if (!owner_encoder.status().ok()) {
+     PW_LOG_INFO("Failed to encode proto; %s", owner_encoder.status().str());
+   }
 
 MemoryEncoder objects use the final destination buffer rather than relying on a
 scratch buffer.  The ``kMaxEncodedSizeBytes`` constant takes into account the
@@ -1411,10 +1430,10 @@
 yourself, your destination buffer might need additional space.
 
 .. warning::
-  If the scratch buffer size is not sufficient, the encoding will fail with
-  ``Status::ResourceExhausted()``. Always check the results of ``Write`` calls
-  or the encoder status to ensure success, as otherwise the encoded data will
-  be invalid.
+   If the scratch buffer size is not sufficient, the encoding will fail with
+   ``Status::ResourceExhausted()``. Always check the results of ``Write`` calls
+   or the encoder status to ensure success, as otherwise the encoded data will
+   be invalid.
 
 Scalar Fields
 =============
@@ -1443,10 +1462,10 @@
 The following two method calls are equivalent, where the first is using the
 code generated API, and the second implemented by hand.
 
-.. code:: c++
+.. code-block:: c++
 
-  my_proto_encoder.WriteAge(42);
-  my_proto_encoder.WriteInt32(static_cast<uint32_t>(MyProto::Fields::kAge), 42);
+   my_proto_encoder.WriteAge(42);
+   my_proto_encoder.WriteInt32(static_cast<uint32_t>(MyProto::Fields::kAge), 42);
 
 Repeated Fields
 ---------------
@@ -1455,13 +1474,13 @@
 
 .. cpp:function:: Status MyProto::StreamEncoder::WriteFoos(T)
 
-  This writes a single unpacked value.
+   This writes a single unpacked value.
 
 .. cpp:function:: Status MyProto::StreamEncoder::WriteFoos(pw::span<const T>)
 .. cpp:function:: Status MyProto::StreamEncoder::WriteFoos(const pw::Vector<T>&)
 
-  These write a packed field containing all of the values in the provided span
-  or vector.
+   These write a packed field containing all of the values in the provided span
+   or vector.
 
 These too can be freely intermixed with the lower-level API methods, both to
 write a single value, or to write packed values from either a ``pw::span`` or
@@ -1493,14 +1512,14 @@
 The following two method calls are equivalent, where the first is using the
 code generated API, and the second implemented by hand.
 
-.. code:: c++
+.. code-block:: c++
 
-  constexpr std::array<int32_t, 5> numbers = { 4, 8, 15, 16, 23, 42 };
+   constexpr std::array<int32_t, 5> numbers = { 4, 8, 15, 16, 23, 42 };
 
-  my_proto_encoder.WriteNumbers(numbers);
-  my_proto_encoder.WritePackedInt32(
-      static_cast<uint32_t>(MyProto::Fields::kNumbers),
-      numbers);
+   my_proto_encoder.WriteNumbers(numbers);
+   my_proto_encoder.WritePackedInt32(
+       static_cast<uint32_t>(MyProto::Fields::kNumbers),
+       numbers);
 
 Enumerations
 ============
@@ -1516,12 +1535,12 @@
 The following two methods are equivalent, where the first is code generated,
 and the second implemented by hand.
 
-.. code:: c++
+.. code-block:: c++
 
-  my_proto_encoder.WriteAward(MyProto::Award::SILVER);
-  my_proto_encoder.WriteUint32(
-      static_cast<uint32_t>(MyProto::Fields::kAward),
-      static_cast<uint32_t>(MyProto::Award::SILVER));
+   my_proto_encoder.WriteAward(MyProto::Award::SILVER);
+   my_proto_encoder.WriteUint32(
+       static_cast<uint32_t>(MyProto::Fields::kAward),
+       static_cast<uint32_t>(MyProto::Award::SILVER));
 
 Repeated Fields
 ---------------
@@ -1530,13 +1549,13 @@
 
 .. cpp:function:: Status MyProto::StreamEncoder::WriteEnums(MyProto::Enums)
 
-  This writes a single unpacked value.
+   This writes a single unpacked value.
 
 .. cpp:function:: Status MyProto::StreamEncoder::WriteEnums(pw::span<const MyProto::Enums>)
 .. cpp:function:: Status MyProto::StreamEncoder::WriteEnums(const pw::Vector<MyProto::Enums>&)
 
-  These write a packed field containing all of the values in the provided span
-  or vector.
+   These write a packed field containing all of the values in the provided span
+   or vector.
 
 Their use is as scalar fields.
 
@@ -1557,9 +1576,9 @@
 
 .. cpp:function:: Status pw::protobuf::StreamEncoder::WriteStringFromStream(uint32_t field_number, stream::Reader& bytes_reader, size_t num_bytes, ByteSpan stream_pipe_buffer)
 
-  The payload for the value is provided through the stream::Reader
-  ``bytes_reader``. The method reads a chunk of the data from the reader using
-  the ``stream_pipe_buffer`` and writes it to the encoder.
+   The payload for the value is provided through the stream::Reader
+   ``bytes_reader``. The method reads a chunk of the data from the reader using
+   the ``stream_pipe_buffer`` and writes it to the encoder.
 
 Bytes
 =====
@@ -1575,9 +1594,9 @@
 
 .. cpp:function:: Status pw::protobuf::StreamEncoder::WriteBytesFromStream(uint32_t field_number, stream::Reader& bytes_reader, size_t num_bytes, ByteSpan stream_pipe_buffer)
 
-  The payload for the value is provided through the stream::Reader
-  ``bytes_reader``. The method reads a chunk of the data from the reader using
-  the ``stream_pipe_buffer`` and writes it to the encoder.
+   The payload for the value is provided through the stream::Reader
+   ``bytes_reader``. The method reads a chunk of the data from the reader using
+   the ``stream_pipe_buffer`` and writes it to the encoder.
 
 Error Handling
 ==============
@@ -1591,8 +1610,8 @@
 submessages (e.g. ``map<string, bytes>``) are provided in
 ``pw_protobuf/map_utils.h``.
 
-.. Note::
-  The helper API are currently in-development and may not remain stable.
+.. note::
+   The helper API are currently in-development and may not remain stable.
 
 --------
 Decoding
@@ -1600,19 +1619,19 @@
 The simplest way to use ``StreamDecoder`` is to decode a proto from the stream
 into its code generated ``Message`` structure.
 
-.. code:: c++
+.. code-block:: c++
 
-  #include "my_protos/my_proto.pwpb.h"
-  #include "pw_protobuf/stream_decoder.h"
-  #include "pw_status/status.h"
-  #include "pw_stream/stream.h"
+   #include "my_protos/my_proto.pwpb.h"
+   #include "pw_protobuf/stream_decoder.h"
+   #include "pw_status/status.h"
+   #include "pw_stream/stream.h"
 
-  pw::Status DecodeProtoFromStream(pw::stream::Reader& reader) {
-    MyProto::Message message{};
-    MyProto::StreamDecoder decoder(reader);
-    decoder.Read(message);
-    return decoder.status();
-  }
+   pw::Status DecodeProtoFromStream(pw::stream::Reader& reader) {
+     MyProto::Message message{};
+     MyProto::StreamDecoder decoder(reader);
+     decoder.Read(message);
+     return decoder.status();
+   }
 
 In the case of errors, the decoding will stop and return with the cursor on the
 field that caused the error. It is valid in some cases to inspect the error and
@@ -1634,47 +1653,47 @@
 .. cpp:function:: Result<MyProto::Fields> MyProto::StreamDecoder::Field()
 .. cpp:function:: Result<uint32_t> pw::protobuf::StreamDecoder::FieldNumber()
 
-.. code:: c++
+.. code-block:: c++
 
-  #include "my_protos/my_proto.pwpb.h"
-  #include "pw_protobuf/strema_decoder.h"
-  #include "pw_status/status.h"
-  #include "pw_status/try.h"
-  #include "pw_stream/stream.h"
+   #include "my_protos/my_proto.pwpb.h"
+   #include "pw_protobuf/strema_decoder.h"
+   #include "pw_status/status.h"
+   #include "pw_status/try.h"
+   #include "pw_stream/stream.h"
 
-  pw::Status DecodeProtoFromStream(pw::stream::Reader& reader) {
-    MyProto::StreamDecoder decoder(reader);
-    pw::Status status;
+   pw::Status DecodeProtoFromStream(pw::stream::Reader& reader) {
+     MyProto::StreamDecoder decoder(reader);
+     pw::Status status;
 
-    uint32_t age;
-    char name[16];
+     uint32_t age;
+     char name[16];
 
-    // Iterate over the fields in the message. A return value of OK indicates
-    // that a valid field has been found and can be read. When the decoder
-    // reaches the end of the message, Next() will return OUT_OF_RANGE.
-    // Other return values indicate an error trying to decode the message.
-    while ((status = decoder.Next()).ok()) {
-      // Field() returns a Result<Fields> as it may fail sometimes.
-      // However, Field() is guaranteed to be valid after a call to Next()
-      // that returns OK, so the value can be used directly here.
-      switch (decoder.Field().value()) {
-        case MyProto::Fields::kAge: {
-          PW_TRY_ASSIGN(age, decoder.ReadAge());
-          break;
-        }
-        case MyProto::Fields::kName:
-          // The string field is copied into the provided buffer. If the buffer
-          // is too small to fit the string, RESOURCE_EXHAUSTED is returned and
-          // the decoder is not advanced, allowing the field to be re-read.
-          PW_TRY(decoder.ReadName(name));
-          break;
-      }
-    }
+     // Iterate over the fields in the message. A return value of OK indicates
+     // that a valid field has been found and can be read. When the decoder
+     // reaches the end of the message, Next() will return OUT_OF_RANGE.
+     // Other return values indicate an error trying to decode the message.
+     while ((status = decoder.Next()).ok()) {
+       // Field() returns a Result<Fields> as it may fail sometimes.
+       // However, Field() is guaranteed to be valid after a call to Next()
+       // that returns OK, so the value can be used directly here.
+       switch (decoder.Field().value()) {
+         case MyProto::Fields::kAge: {
+           PW_TRY_ASSIGN(age, decoder.ReadAge());
+           break;
+         }
+         case MyProto::Fields::kName:
+           // The string field is copied into the provided buffer. If the buffer
+           // is too small to fit the string, RESOURCE_EXHAUSTED is returned and
+           // the decoder is not advanced, allowing the field to be re-read.
+           PW_TRY(decoder.ReadName(name));
+           break;
+       }
+     }
 
-    // Do something with the fields...
+     // Do something with the fields...
 
-    return status.IsOutOfRange() ? OkStatus() : status;
-  }
+     return status.IsOutOfRange() ? OkStatus() : status;
+   }
 
 Callbacks
 =========
@@ -1690,29 +1709,29 @@
 nested submessage (with a dependency cycle, or repeated) can be implemented by
 calling ``Read()`` on a nested decoder.
 
-.. code:: c++
+.. code-block:: c++
 
-    Store::Message store{};
-    store.employees.SetDecoder([](Store::StreamDecoder& decoder) {
-      PW_ASSERT(decoder.Field().value() == Store::Fields::kEmployees);
+   Store::Message store{};
+   store.employees.SetDecoder([](Store::StreamDecoder& decoder) {
+     PW_ASSERT(decoder.Field().value() == Store::Fields::kEmployees);
 
-      Employee::Message employee{};
-      // Set any callbacks on `employee`.
-      PW_TRY(decoder.GetEmployeesDecoder().Read(employee));
-      // Do things with `employee`.
-      return OkStatus();
-    ));
+     Employee::Message employee{};
+     // Set any callbacks on `employee`.
+     PW_TRY(decoder.GetEmployeesDecoder().Read(employee));
+     // Do things with `employee`.
+     return OkStatus();
+   ));
 
 Nested submessages
 ==================
 Code generated ``GetFieldDecoder`` methods are provided that return a correctly
 typed ``StreamDecoder`` for the message.
 
-.. code::
+.. code-block:: protobuf
 
-  message Owner {
-    Animal pet = 1;
-  }
+   message Owner {
+     Animal pet = 1;
+   }
 
 As with encoding, note that the accessor method is named for the field, while
 the returned decoder is named for the message type.
@@ -1725,28 +1744,28 @@
 .. cpp:function:: pw::protobuf::StreamDecoder pw::protobuf::StreamDecoder::GetNestedDecoder()
 
 .. warning::
-  When a nested submessage is being decoded, any use of the parent decoder that
-  created the nested decoder will trigger a crash. To resume using the parent
-  decoder, destroy the submessage decoder first.
+   When a nested submessage is being decoded, any use of the parent decoder that
+   created the nested decoder will trigger a crash. To resume using the parent
+   decoder, destroy the submessage decoder first.
 
 
-.. code:: c++
+.. code-block:: c++
 
-  case Owner::Fields::kPet: {
-    // Note that the parent decoder, owner_decoder, cannot be used until the
-    // nested decoder, pet_decoder, has been destroyed.
-    Animal::StreamDecoder pet_decoder = owner_decoder.GetPetDecoder();
+   case Owner::Fields::kPet: {
+     // Note that the parent decoder, owner_decoder, cannot be used until the
+     // nested decoder, pet_decoder, has been destroyed.
+     Animal::StreamDecoder pet_decoder = owner_decoder.GetPetDecoder();
 
-    while ((status = pet_decoder.Next()).ok()) {
-      switch (pet_decoder.Field().value()) {
-        // Decode pet fields...
-      }
-    }
+     while ((status = pet_decoder.Next()).ok()) {
+       switch (pet_decoder.Field().value()) {
+         // Decode pet fields...
+       }
+     }
 
-    // When this scope ends, the nested decoder is destroyed and the
-    // parent decoder, owner_decoder, can be used again.
-    break;
-  }
+     // When this scope ends, the nested decoder is destroyed and the
+     // parent decoder, owner_decoder, can be used again.
+     break;
+   }
 
 Scalar Fields
 =============
@@ -1773,15 +1792,15 @@
 The following two code snippets are equivalent, where the first uses the code
 generated API, and the second implemented by hand.
 
-.. code:: c++
+.. code-block:: c++
 
-  pw::Result<int32_t> age = my_proto_decoder.ReadAge();
+   pw::Result<int32_t> age = my_proto_decoder.ReadAge();
 
-.. code:: c++
+.. code-block:: c++
 
-  PW_ASSERT(my_proto_decoder.FieldNumber().value() ==
-      static_cast<uint32_t>(MyProto::Fields::kAge));
-  pw::Result<int32_t> my_proto_decoder.ReadInt32();
+   PW_ASSERT(my_proto_decoder.FieldNumber().value() ==
+       static_cast<uint32_t>(MyProto::Fields::kAge));
+   pw::Result<int32_t> my_proto_decoder.ReadInt32();
 
 Repeated Fields
 ---------------
@@ -1790,20 +1809,20 @@
 
 .. cpp:function:: Result<T> MyProto::StreamDecoder::ReadFoos()
 
-  This reads a single unpacked value.
+   This reads a single unpacked value.
 
 .. cpp:function:: StatusWithSize MyProto::StreamDecoder::ReadFoos(pw::span<T>)
 
-  This reads a packed field containing all of the values into the provided span.
+   This reads a packed field containing all of the values into the provided span.
 
 .. cpp:function:: Status MyProto::StreamDecoder::ReadFoos(pw::Vector<T>&)
 
-  Protobuf encoders are permitted to choose either repeating single unpacked
-  values, or a packed field, including splitting repeated fields up into
-  multiple packed fields.
+   Protobuf encoders are permitted to choose either repeating single unpacked
+   values, or a packed field, including splitting repeated fields up into
+   multiple packed fields.
 
-  This method supports either format, appending values to the provided
-  ``pw::Vector``.
+   This method supports either format, appending values to the provided
+   ``pw::Vector``.
 
 These too can be freely intermixed with the lower-level API methods, to read a
 single value, a field of packed values into a ``pw::span``, or support both
@@ -1835,19 +1854,19 @@
 The following two code blocks are equivalent, where the first uses the code
 generated API, and the second is implemented by hand.
 
-.. code:: c++
+.. code-block:: c++
 
-  pw::Vector<int32_t, 8> numbers;
+   pw::Vector<int32_t, 8> numbers;
 
-  my_proto_decoder.ReadNumbers(numbers);
+   my_proto_decoder.ReadNumbers(numbers);
 
-.. code:: c++
+.. code-block:: c++
 
-  pw::Vector<int32_t, 8> numbers;
+   pw::Vector<int32_t, 8> numbers;
 
-  PW_ASSERT(my_proto_decoder.FieldNumber().value() ==
-      static_cast<uint32_t>(MyProto::Fields::kNumbers));
-  my_proto_decoder.ReadRepeatedInt32(numbers);
+   PW_ASSERT(my_proto_decoder.FieldNumber().value() ==
+       static_cast<uint32_t>(MyProto::Fields::kNumbers));
+   my_proto_decoder.ReadRepeatedInt32(numbers);
 
 Enumerations
 ============
@@ -1861,14 +1880,14 @@
 
 .. cpp:function:: constexpr bool MyProto::IsValidEnum(MyProto::Enum value)
 
-  Validates the value encoded in the wire format against the known set of
-  enumerates.
+   Validates the value encoded in the wire format against the known set of
+   enumerates.
 
 .. cpp:function:: constexpr const char* MyProto::EnumToString(MyProto::Enum value)
 
-  Returns the string representation of the enum value. For example,
-  ``FooToString(Foo::kBarBaz)`` returns ``"BAR_BAZ"``. Returns the empty string
-  if the value is not a valid value.
+   Returns the string representation of the enum value. For example,
+   ``FooToString(Foo::kBarBaz)`` returns ``"BAR_BAZ"``. Returns the empty string
+   if the value is not a valid value.
 
 To read enumerations with the lower-level API, you would need to cast the
 retured value from the ``uint32_t``.
@@ -1878,19 +1897,19 @@
 
 .. code-block:: c++
 
-  pw::Result<MyProto::Award> award = my_proto_decoder.ReadAward();
-  if (!MyProto::IsValidAward(award)) {
-    PW_LOG_DBG("Unknown award");
-  }
+   pw::Result<MyProto::Award> award = my_proto_decoder.ReadAward();
+   if (!MyProto::IsValidAward(award)) {
+     PW_LOG_DBG("Unknown award");
+   }
 
 .. code-block:: c++
 
-  PW_ASSERT(my_proto_decoder.FieldNumber().value() ==
-      static_cast<uint32_t>(MyProto::Fields::kAward));
-  pw::Result<uint32_t> award_value = my_proto_decoder.ReadUint32();
-  if (award_value.ok()) {
-    MyProto::Award award = static_cast<MyProto::Award>(award_value);
-  }
+   PW_ASSERT(my_proto_decoder.FieldNumber().value() ==
+       static_cast<uint32_t>(MyProto::Fields::kAward));
+   pw::Result<uint32_t> award_value = my_proto_decoder.ReadUint32();
+   if (award_value.ok()) {
+     MyProto::Award award = static_cast<MyProto::Award>(award_value);
+   }
 
 Repeated Fields
 ---------------
@@ -1899,17 +1918,17 @@
 
 .. cpp:function:: Result<MyProto::Enums> MyProto::StreamDecoder::ReadEnums()
 
-  This reads a single unpacked value.
+   This reads a single unpacked value.
 
 .. cpp:function:: StatusWithSize MyProto::StreamDecoder::ReadEnums(pw::span<MyProto::Enums>)
 
-  This reads a packed field containing all of the checked values into the
-  provided span.
+   This reads a packed field containing all of the checked values into the
+   provided span.
 
 .. cpp:function:: Status MyProto::StreamDecoder::ReadEnums(pw::Vector<MyProto::Enums>&)
 
-  This method supports either repeated unpacked or packed formats, appending
-  checked values to the provided ``pw::Vector``.
+   This method supports either repeated unpacked or packed formats, appending
+   checked values to the provided ``pw::Vector``.
 
 Their use is as scalar fields.
 
@@ -1973,9 +1992,9 @@
 be prevented from reading from the stream beyond the known bounds by specifying
 the known length to the decoder:
 
-.. code:: c++
+.. code-block:: c++
 
-  pw::protobuf::StreamDecoder decoder(reader, message_length);
+   pw::protobuf::StreamDecoder decoder(reader, message_length);
 
 When a decoder constructed in this way goes out of scope, it will consume any
 remaining bytes in ``message_length`` allowing the next ``Read()`` on the stream
@@ -1998,39 +2017,39 @@
 When reading ``bytes`` and ``string`` fields, the decoder returns a view of that
 field within the buffer; no data is copied out.
 
-.. code:: c++
+.. code-block:: c++
 
-  #include "pw_protobuf/decoder.h"
-  #include "pw_status/try.h"
+   #include "pw_protobuf/decoder.h"
+   #include "pw_status/try.h"
 
-  pw::Status DecodeProtoFromBuffer(pw::span<const std::byte> buffer) {
-    pw::protobuf::Decoder decoder(buffer);
-    pw::Status status;
+   pw::Status DecodeProtoFromBuffer(pw::span<const std::byte> buffer) {
+     pw::protobuf::Decoder decoder(buffer);
+     pw::Status status;
 
-    uint32_t uint32_field;
-    std::string_view string_field;
+     uint32_t uint32_field;
+     std::string_view string_field;
 
-    // Iterate over the fields in the message. A return value of OK indicates
-    // that a valid field has been found and can be read. When the decoder
-    // reaches the end of the message, Next() will return OUT_OF_RANGE.
-    // Other return values indicate an error trying to decode the message.
-    while ((status = decoder.Next()).ok()) {
-      switch (decoder.FieldNumber()) {
-        case 1:
-          PW_TRY(decoder.ReadUint32(&uint32_field));
-          break;
-        case 2:
-          // The passed-in string_view will point to the contents of the string
-          // field within the buffer.
-          PW_TRY(decoder.ReadString(&string_field));
-          break;
-      }
-    }
+     // Iterate over the fields in the message. A return value of OK indicates
+     // that a valid field has been found and can be read. When the decoder
+     // reaches the end of the message, Next() will return OUT_OF_RANGE.
+     // Other return values indicate an error trying to decode the message.
+     while ((status = decoder.Next()).ok()) {
+       switch (decoder.FieldNumber()) {
+         case 1:
+           PW_TRY(decoder.ReadUint32(&uint32_field));
+           break;
+         case 2:
+           // The passed-in string_view will point to the contents of the string
+           // field within the buffer.
+           PW_TRY(decoder.ReadString(&string_field));
+           break;
+       }
+     }
 
-    // Do something with the fields...
+     // Do something with the fields...
 
-    return status.IsOutOfRange() ? OkStatus() : status;
-  }
+     return status.IsOutOfRange() ? OkStatus() : status;
+   }
 
 ---------------
 Message Decoder
@@ -2038,8 +2057,8 @@
 
 .. note::
 
-  ``pw::protobuf::Message`` is unrelated to the codegen ``struct Message``
-  used with ``StreamDecoder``.
+   ``pw::protobuf::Message`` is unrelated to the codegen ``struct Message``
+   used with ``StreamDecoder``.
 
 The module implements a message parsing helper class ``Message``, in
 ``pw_protobuf/message.h``, to faciliate proto message parsing and field access.
@@ -2050,120 +2069,120 @@
 message access. The following gives examples for using the class to process
 different fields in a proto message:
 
-.. code:: c++
+.. code-block:: c++
 
-  // Consider the proto messages defined as follows:
-  //
-  // message Nested {
-  //   string nested_str = 1;
-  //   bytes nested_bytes = 2;
-  // }
-  //
-  // message {
-  //   uint32 integer = 1;
-  //   string str = 2;
-  //   bytes bytes = 3;
-  //   Nested nested = 4;
-  //   repeated string rep_str = 5;
-  //   repeated Nested rep_nested  = 6;
-  //   map<string, bytes> str_to_bytes = 7;
-  //   map<string, Nested> str_to_nested = 8;
-  // }
+   // Consider the proto messages defined as follows:
+   //
+   // message Nested {
+   //   string nested_str = 1;
+   //   bytes nested_bytes = 2;
+   // }
+   //
+   // message {
+   //   uint32 integer = 1;
+   //   string str = 2;
+   //   bytes bytes = 3;
+   //   Nested nested = 4;
+   //   repeated string rep_str = 5;
+   //   repeated Nested rep_nested  = 6;
+   //   map<string, bytes> str_to_bytes = 7;
+   //   map<string, Nested> str_to_nested = 8;
+   // }
 
-  // Given a seekable `reader` that reads the top-level proto message, and
-  // a <proto_size> that gives the size of the proto message:
-  Message message(reader, proto_size);
+   // Given a seekable `reader` that reads the top-level proto message, and
+   // a <proto_size> that gives the size of the proto message:
+   Message message(reader, proto_size);
 
-  // Parse a proto integer field
-  Uint32 integer = messasge_parser.AsUint32(1);
-  if (!integer.ok()) {
-    // handle parsing error. i.e. return integer.status().
-  }
-  uint32_t integer_value = integer.value(); // obtained the value
+   // Parse a proto integer field
+   Uint32 integer = messasge_parser.AsUint32(1);
+   if (!integer.ok()) {
+     // handle parsing error. i.e. return integer.status().
+   }
+   uint32_t integer_value = integer.value(); // obtained the value
 
-  // Parse a string field
-  String str = message.AsString(2);
-  if (!str.ok()) {
-    // handle parsing error. i.e. return str.status();
-  }
+   // Parse a string field
+   String str = message.AsString(2);
+   if (!str.ok()) {
+     // handle parsing error. i.e. return str.status();
+   }
 
-  // check string equal
-  Result<bool> str_check = str.Equal("foo");
+   // check string equal
+   Result<bool> str_check = str.Equal("foo");
 
-  // Parse a bytes field
-  Bytes bytes = message.AsBytes(3);
-  if (!bytes.ok()) {
-    // handle parsing error. i.e. return bytes.status();
-  }
+   // Parse a bytes field
+   Bytes bytes = message.AsBytes(3);
+   if (!bytes.ok()) {
+     // handle parsing error. i.e. return bytes.status();
+   }
 
-  // Get a reader to the bytes.
-  stream::IntervalReader bytes_reader = bytes.GetBytesReader();
+   // Get a reader to the bytes.
+   stream::IntervalReader bytes_reader = bytes.GetBytesReader();
 
-  // Parse nested message `Nested nested = 4;`
-  Message nested = message.AsMessage(4).
-  // Get the fields in the nested message.
-  String nested_str = nested.AsString(1);
-  Bytes nested_bytes = nested.AsBytes(2);
+   // Parse nested message `Nested nested = 4;`
+   Message nested = message.AsMessage(4).
+   // Get the fields in the nested message.
+   String nested_str = nested.AsString(1);
+   Bytes nested_bytes = nested.AsBytes(2);
 
-  // Parse repeated field `repeated string rep_str = 5;`
-  RepeatedStrings rep_str = message.AsRepeatedString(5);
-  // Iterate through the entries. If proto is malformed when
-  // iterating, the next element (`str` in this case) will be invalid
-  // and loop will end in the iteration after.
-  for (String element : rep_str) {
-    // Check status
-    if (!str.ok()) {
-      // In the case of error, loop will end in the next iteration if
-      // continues. This is the chance for code to catch the error.
-    }
-    // Process str
-  }
+   // Parse repeated field `repeated string rep_str = 5;`
+   RepeatedStrings rep_str = message.AsRepeatedString(5);
+   // Iterate through the entries. If proto is malformed when
+   // iterating, the next element (`str` in this case) will be invalid
+   // and loop will end in the iteration after.
+   for (String element : rep_str) {
+     // Check status
+     if (!str.ok()) {
+       // In the case of error, loop will end in the next iteration if
+       // continues. This is the chance for code to catch the error.
+     }
+     // Process str
+   }
 
-  // Parse repeated field `repeated Nested rep_nested = 6;`
-  RepeatedStrings rep_str = message.AsRepeatedString(6);
-  // Iterate through the entries. For iteration
-  for (Message element : rep_rep_nestedstr) {
-    // Check status
-    if (!element.ok()) {
-      // In the case of error, loop will end in the next iteration if
-      // continues. This is the chance for code to catch the error.
-    }
-    // Process element
-  }
+   // Parse repeated field `repeated Nested rep_nested = 6;`
+   RepeatedStrings rep_str = message.AsRepeatedString(6);
+   // Iterate through the entries. For iteration
+   for (Message element : rep_rep_nestedstr) {
+     // Check status
+     if (!element.ok()) {
+       // In the case of error, loop will end in the next iteration if
+       // continues. This is the chance for code to catch the error.
+     }
+     // Process element
+   }
 
-  // Parse map field `map<string, bytes> str_to_bytes = 7;`
-  StringToBytesMap str_to_bytes = message.AsStringToBytesMap(7);
-  // Access the entry by a given key value
-  Bytes bytes_for_key = str_to_bytes["key"];
-  // Or iterate through map entries
-  for (StringToBytesMapEntry entry : str_to_bytes) {
-    // Check status
-    if (!entry.ok()) {
-      // In the case of error, loop will end in the next iteration if
-      // continues. This is the chance for code to catch the error.
-    }
-    String key = entry.Key();
-    Bytes value = entry.Value();
-    // process entry
-  }
+   // Parse map field `map<string, bytes> str_to_bytes = 7;`
+   StringToBytesMap str_to_bytes = message.AsStringToBytesMap(7);
+   // Access the entry by a given key value
+   Bytes bytes_for_key = str_to_bytes["key"];
+   // Or iterate through map entries
+   for (StringToBytesMapEntry entry : str_to_bytes) {
+     // Check status
+     if (!entry.ok()) {
+       // In the case of error, loop will end in the next iteration if
+       // continues. This is the chance for code to catch the error.
+     }
+     String key = entry.Key();
+     Bytes value = entry.Value();
+     // process entry
+   }
 
-  // Parse map field `map<string, Nested> str_to_nested = 8;`
-  StringToMessageMap str_to_nested = message.AsStringToBytesMap(8);
-  // Access the entry by a given key value
-  Message nested_for_key = str_to_nested["key"];
-  // Or iterate through map entries
-  for (StringToMessageMapEntry entry : str_to_nested) {
-    // Check status
-    if (!entry.ok()) {
-      // In the case of error, loop will end in the next iteration if
-      // continues. This is the chance for code to catch the error.
-      // However it is still recommended that the user breaks here.
-      break;
-    }
-    String key = entry.Key();
-    Message value = entry.Value();
-    // process entry
-  }
+   // Parse map field `map<string, Nested> str_to_nested = 8;`
+   StringToMessageMap str_to_nested = message.AsStringToBytesMap(8);
+   // Access the entry by a given key value
+   Message nested_for_key = str_to_nested["key"];
+   // Or iterate through map entries
+   for (StringToMessageMapEntry entry : str_to_nested) {
+     // Check status
+     if (!entry.ok()) {
+       // In the case of error, loop will end in the next iteration if
+       // continues. This is the chance for code to catch the error.
+       // However it is still recommended that the user breaks here.
+       break;
+     }
+     String key = entry.Key();
+     Message value = entry.Value();
+     // process entry
+   }
 
 The methods in ``Message`` for parsing a single field, i.e. everty `AsXXX()`
 method except AsRepeatedXXX() and AsStringMapXXX(), internally performs a
@@ -2175,28 +2194,28 @@
 single fields directly.
 
 
-.. code:: c++
+.. code-block:: c++
 
-  for (Message::Field field : message) {
-    // Check status
-    if (!field.ok()) {
-      // In the case of error, loop will end in the next iteration if
-      // continues. This is the chance for code to catch the error.
-    }
-    if (field.field_number() == 1) {
-      Uint32 integer = field.As<Uint32>();
-      ...
-    } else if (field.field_number() == 2) {
-      String str = field.As<String>();
-      ...
-    } else if (field.field_number() == 3) {
-      Bytes bytes = field.As<Bytes>();
-      ...
-    } else if (field.field_number() == 4) {
-      Message nested = field.As<Message>();
-      ...
-    }
-  }
+   for (Message::Field field : message) {
+     // Check status
+     if (!field.ok()) {
+       // In the case of error, loop will end in the next iteration if
+       // continues. This is the chance for code to catch the error.
+     }
+     if (field.field_number() == 1) {
+       Uint32 integer = field.As<Uint32>();
+       ...
+     } else if (field.field_number() == 2) {
+       String str = field.As<String>();
+       ...
+     } else if (field.field_number() == 3) {
+       Bytes bytes = field.As<Bytes>();
+       ...
+     } else if (field.field_number() == 4) {
+       Message nested = field.As<Message>();
+       ...
+     }
+   }
 
 
 .. Note::
@@ -2255,17 +2274,17 @@
 ============
 Contains the enum for pw::Status.
 
-.. Note::
- ``pw::protobuf::StatusCode`` values should not be used outside of a .proto
- file. Instead, the StatusCodes should be converted to the Status type in the
- language. In C++, this would be:
+.. note::
+   ``pw::protobuf::StatusCode`` values should not be used outside of a .proto
+   file. Instead, the StatusCodes should be converted to the Status type in the
+   language. In C++, this would be:
 
-  .. code:: c++
+   .. code-block:: c++
 
-    // Reading from a proto
-    pw::Status status = static_cast<pw::Status::Code>(proto.status_field));
-    // Writing to a proto
-    proto.status_field = static_cast<pw::protobuf::StatusCode>(status.code()));
+      // Reading from a proto
+      pw::Status status = static_cast<pw::Status::Code>(proto.status_field));
+      // Writing to a proto
+      proto.status_field = static_cast<pw::protobuf::StatusCode>(status.code()));
 
 ----------------------------------------
 Comparison with other protobuf libraries
diff --git a/pw_protobuf/py/pw_protobuf/codegen_pwpb.py b/pw_protobuf/py/pw_protobuf/codegen_pwpb.py
index b0ab0e9..b383469 100644
--- a/pw_protobuf/py/pw_protobuf/codegen_pwpb.py
+++ b/pw_protobuf/py/pw_protobuf/codegen_pwpb.py
@@ -21,7 +21,7 @@
 from itertools import takewhile
 import os
 import sys
-from typing import Dict, Iterable, List, Optional, Tuple
+from typing import Dict, Iterable, List, Optional, Tuple, Type
 from typing import cast
 
 from google.protobuf import descriptor_pb2
@@ -416,7 +416,7 @@
         return False
 
     @staticmethod
-    def repeated_field_container(type_name: str, max_size: int) -> str:
+    def repeated_field_container(type_name: str, max_size: str) -> str:
         """Returns the container type used for repeated fields.
 
         Defaults to ::pw::Vector<type, max_size>. String fields use
@@ -464,42 +464,36 @@
     def sub_table(self) -> str:  # pylint: disable=no-self-use
         return '{}'
 
-    def struct_member(self, from_root: bool = False) -> Tuple[str, str]:
-        """Returns the structure member."""
+    def struct_member_type(self, from_root: bool = False) -> str:
+        """Returns the structure member type."""
         if self.use_callback():
             return (
-                f'{PROTOBUF_NAMESPACE}::Callback'
-                '<StreamEncoder, StreamDecoder>',
-                self.name(),
+                f'{PROTOBUF_NAMESPACE}::Callback<StreamEncoder, StreamDecoder>'
             )
 
         # Optional fields are wrapped in std::optional
         if self.is_optional():
-            return (
-                'std::optional<{}>'.format(self.type_name(from_root)),
-                self.name(),
-            )
+            return 'std::optional<{}>'.format(self.type_name(from_root))
 
         # Non-repeated fields have a member of just the type name.
         max_size = self.max_size()
         if max_size == 0:
-            return (self.type_name(from_root), self.name())
+            return self.type_name(from_root)
 
         # Fixed size fields use std::array.
         if self.is_fixed_size():
-            return (
-                'std::array<{}, {}>'.format(
-                    self.type_name(from_root), max_size
-                ),
-                self.name(),
+            return 'std::array<{}, {}>'.format(
+                self.type_name(from_root), self.max_size_constant_name()
             )
 
         # Otherwise prefer pw::Vector for repeated fields.
-        return (
-            self.repeated_field_container(self.type_name(from_root), max_size),
-            self.name(),
+        return self.repeated_field_container(
+            self.type_name(from_root), self.max_size_constant_name()
         )
 
+    def max_size_constant_name(self) -> str:
+        return f'k{self._field.name()}MaxSize'
+
     def _varint_type_table_entry(self) -> str:
         if self.wire_type() == 'kVarint':
             return '{}::VarintType::{}'.format(
@@ -2021,7 +2015,7 @@
     def _size_length(self) -> Optional[str]:
         if self.use_callback():
             return None
-        return f'{self.max_size()}'
+        return self.max_size_constant_name()
 
 
 class StringLenWriteMethod(WriteMethod):
@@ -2142,7 +2136,7 @@
         return True
 
     @staticmethod
-    def repeated_field_container(type_name: str, max_size: int) -> str:
+    def repeated_field_container(type_name: str, max_size: str) -> str:
         return f'::pw::InlineBasicString<{type_name}, {max_size}>'
 
     def _size_fn(self) -> str:
@@ -2154,7 +2148,7 @@
     def _size_length(self) -> Optional[str]:
         if self.use_callback():
             return None
-        return f'{self.max_size()}'
+        return self.max_size_constant_name()
 
 
 class EnumWriteMethod(WriteMethod):
@@ -2560,27 +2554,50 @@
     ],
 }
 
-PROTO_FIELD_PROPERTIES: Dict[int, List] = {
-    descriptor_pb2.FieldDescriptorProto.TYPE_DOUBLE: [DoubleProperty],
-    descriptor_pb2.FieldDescriptorProto.TYPE_FLOAT: [FloatProperty],
-    descriptor_pb2.FieldDescriptorProto.TYPE_INT32: [Int32Property],
-    descriptor_pb2.FieldDescriptorProto.TYPE_SINT32: [Sint32Property],
-    descriptor_pb2.FieldDescriptorProto.TYPE_SFIXED32: [Sfixed32Property],
-    descriptor_pb2.FieldDescriptorProto.TYPE_INT64: [Int64Property],
-    descriptor_pb2.FieldDescriptorProto.TYPE_SINT64: [Sint64Property],
-    descriptor_pb2.FieldDescriptorProto.TYPE_SFIXED64: [Sfixed32Property],
-    descriptor_pb2.FieldDescriptorProto.TYPE_UINT32: [Uint32Property],
-    descriptor_pb2.FieldDescriptorProto.TYPE_FIXED32: [Fixed32Property],
-    descriptor_pb2.FieldDescriptorProto.TYPE_UINT64: [Uint64Property],
-    descriptor_pb2.FieldDescriptorProto.TYPE_FIXED64: [Fixed64Property],
-    descriptor_pb2.FieldDescriptorProto.TYPE_BOOL: [BoolProperty],
-    descriptor_pb2.FieldDescriptorProto.TYPE_BYTES: [BytesProperty],
-    descriptor_pb2.FieldDescriptorProto.TYPE_STRING: [StringProperty],
-    descriptor_pb2.FieldDescriptorProto.TYPE_MESSAGE: [SubMessageProperty],
-    descriptor_pb2.FieldDescriptorProto.TYPE_ENUM: [EnumProperty],
+PROTO_FIELD_PROPERTIES: Dict[int, Type[MessageProperty]] = {
+    descriptor_pb2.FieldDescriptorProto.TYPE_DOUBLE: DoubleProperty,
+    descriptor_pb2.FieldDescriptorProto.TYPE_FLOAT: FloatProperty,
+    descriptor_pb2.FieldDescriptorProto.TYPE_INT32: Int32Property,
+    descriptor_pb2.FieldDescriptorProto.TYPE_SINT32: Sint32Property,
+    descriptor_pb2.FieldDescriptorProto.TYPE_SFIXED32: Sfixed32Property,
+    descriptor_pb2.FieldDescriptorProto.TYPE_INT64: Int64Property,
+    descriptor_pb2.FieldDescriptorProto.TYPE_SINT64: Sint64Property,
+    descriptor_pb2.FieldDescriptorProto.TYPE_SFIXED64: Sfixed32Property,
+    descriptor_pb2.FieldDescriptorProto.TYPE_UINT32: Uint32Property,
+    descriptor_pb2.FieldDescriptorProto.TYPE_FIXED32: Fixed32Property,
+    descriptor_pb2.FieldDescriptorProto.TYPE_UINT64: Uint64Property,
+    descriptor_pb2.FieldDescriptorProto.TYPE_FIXED64: Fixed64Property,
+    descriptor_pb2.FieldDescriptorProto.TYPE_BOOL: BoolProperty,
+    descriptor_pb2.FieldDescriptorProto.TYPE_BYTES: BytesProperty,
+    descriptor_pb2.FieldDescriptorProto.TYPE_STRING: StringProperty,
+    descriptor_pb2.FieldDescriptorProto.TYPE_MESSAGE: SubMessageProperty,
+    descriptor_pb2.FieldDescriptorProto.TYPE_ENUM: EnumProperty,
 }
 
 
+def proto_message_field_props(
+    message: ProtoMessage,
+    root: ProtoNode,
+) -> Iterable[MessageProperty]:
+    """Yields a MessageProperty for each field in a ProtoMessage.
+
+    Only properties which should_appear() is True are returned.
+
+    Args:
+      message: The ProtoMessage whose fields are iterated.
+      root: The root ProtoNode of the tree.
+
+    Yields:
+      An appropriately-typed MessageProperty object for each field
+      in the message, to which the property refers.
+    """
+    for field in message.fields():
+        property_class = PROTO_FIELD_PROPERTIES[field.type()]
+        prop = property_class(field, message, root)
+        if prop.should_appear():
+            yield prop
+
+
 def proto_field_methods(class_type: ClassType, field_type: int) -> List:
     return (
         PROTO_FIELD_WRITE_METHODS[field_type]
@@ -2836,31 +2853,41 @@
 
 
 def forward_declare(
-    node: ProtoMessage,
+    message: ProtoMessage,
     root: ProtoNode,
     output: OutputFile,
     exclude_legacy_snake_case_field_name_enums: bool,
 ) -> None:
     """Generates code forward-declaring entities in a message's namespace."""
-    namespace = node.cpp_namespace(root=root)
+    namespace = message.cpp_namespace(root=root)
     output.write_line()
     output.write_line(f'namespace {namespace} {{')
 
     # Define an enum defining each of the message's fields and their numbers.
     output.write_line('enum class Fields : uint32_t {')
     with output.indent():
-        for field in node.fields():
+        for field in message.fields():
             output.write_line(f'{field.enum_name()} = {field.number()},')
 
         # Migration support from SNAKE_CASE to kConstantCase.
         if not exclude_legacy_snake_case_field_name_enums:
-            for field in node.fields():
+            for field in message.fields():
                 output.write_line(
                     f'{field.legacy_enum_name()} = {field.number()},'
                 )
 
     output.write_line('};')
 
+    # Define constants for fixed-size fields.
+    output.write_line()
+    for prop in proto_message_field_props(message, root):
+        max_size = prop.max_size()
+        if max_size:
+            output.write_line(
+                f'static constexpr size_t {prop.max_size_constant_name()} '
+                f'= {max_size};'
+            )
+
     # Declare the message's message struct.
     output.write_line()
     output.write_line('struct Message;')
@@ -2875,14 +2902,14 @@
     output.write_line('class StreamDecoder;')
 
     # Declare the message's enums.
-    for child in node.children():
+    for child in message.children():
         if child.type() == ProtoNode.Type.ENUM:
             output.write_line()
-            generate_code_for_enum(cast(ProtoEnum, child), node, output)
+            generate_code_for_enum(cast(ProtoEnum, child), message, output)
             output.write_line()
-            generate_function_for_enum(cast(ProtoEnum, child), node, output)
+            generate_function_for_enum(cast(ProtoEnum, child), message, output)
             output.write_line()
-            generate_to_string_for_enum(cast(ProtoEnum, child), node, output)
+            generate_to_string_for_enum(cast(ProtoEnum, child), message, output)
 
     output.write_line(f'}}  // namespace {namespace}')
 
@@ -2898,17 +2925,13 @@
     # Generate members for each of the message's fields.
     with output.indent():
         cmp: List[str] = []
-        for field in message.fields():
-            for property_class in PROTO_FIELD_PROPERTIES[field.type()]:
-                prop = property_class(field, message, root)
-                if not prop.should_appear():
-                    continue
+        for prop in proto_message_field_props(message, root):
+            type_name = prop.struct_member_type()
+            name = prop.name()
+            output.write_line(f'{type_name} {name};')
 
-                (type_name, name) = prop.struct_member()
-                output.write_line(f'{type_name} {name};')
-
-                if not prop.use_callback():
-                    cmp.append(f'{name} == other.{name}')
+            if not prop.use_callback():
+                cmp.append(f'{name} == other.{name}')
 
         # Equality operator
         output.write_line()
@@ -2937,12 +2960,7 @@
     namespace = message.cpp_namespace(root=root)
     output.write_line(f'namespace {namespace} {{')
 
-    properties = []
-    for field in message.fields():
-        for property_class in PROTO_FIELD_PROPERTIES[field.type()]:
-            prop = property_class(field, message, root)
-            if prop.should_appear():
-                properties.append(prop)
+    properties = list(proto_message_field_props(message, root))
 
     output.write_line('PW_MODIFY_DIAGNOSTICS_PUSH();')
     output.write_line('PW_MODIFY_DIAGNOSTIC(ignored, "-Winvalid-offsetof");')
@@ -2989,7 +3007,7 @@
         )
 
         member_list = ', '.join(
-            [f'message.{prop.struct_member()[1]}' for prop in properties]
+            [f'message.{prop.name()}' for prop in properties]
         )
 
         # Generate std::tuple for Message fields.
@@ -3025,15 +3043,10 @@
 
     property_sizes: List[str] = []
     scratch_sizes: List[str] = []
-    for field in message.fields():
-        for property_class in PROTO_FIELD_PROPERTIES[field.type()]:
-            prop = property_class(field, message, root)
-            if not prop.should_appear():
-                continue
-
-            property_sizes.append(prop.max_encoded_size())
-            if prop.include_in_scratch_size():
-                scratch_sizes.append(prop.max_encoded_size())
+    for prop in proto_message_field_props(message, root):
+        property_sizes.append(prop.max_encoded_size())
+        if prop.include_in_scratch_size():
+            scratch_sizes.append(prop.max_encoded_size())
 
     output.write_line('inline constexpr size_t kMaxEncodedSizeBytes =')
     with output.indent():
@@ -3103,15 +3116,10 @@
     message: ProtoMessage, root: ProtoNode, output: OutputFile
 ) -> None:
     is_trivially_comparable = True
-    for field in message.fields():
-        for property_class in PROTO_FIELD_PROPERTIES[field.type()]:
-            prop = property_class(field, message, root)
-            if not prop.should_appear():
-                continue
-
-            if prop.use_callback():
-                is_trivially_comparable = False
-                break
+    for prop in proto_message_field_props(message, root):
+        if prop.use_callback():
+            is_trivially_comparable = False
+            break
 
     qualified_message = f'{message.cpp_namespace()}::Message'
 
diff --git a/pw_protobuf_compiler/BUILD.bazel b/pw_protobuf_compiler/BUILD.bazel
index 25728e8..00d74c3 100644
--- a/pw_protobuf_compiler/BUILD.bazel
+++ b/pw_protobuf_compiler/BUILD.bazel
@@ -11,8 +11,8 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations under
 # the License.
-load("@rules_proto//proto:defs.bzl", "proto_library")
 load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
+load("@rules_proto//proto:defs.bzl", "proto_library")
 load("//pw_build:pigweed.bzl", "pw_cc_test")
 
 package(default_visibility = ["//visibility:public"])
diff --git a/pw_protobuf_compiler/docs.rst b/pw_protobuf_compiler/docs.rst
index dda5be1..a391b1e 100644
--- a/pw_protobuf_compiler/docs.rst
+++ b/pw_protobuf_compiler/docs.rst
@@ -55,9 +55,9 @@
 
 .. code-block::
 
-  pw_proto_library("test_protos") {
-    sources = [ "my_test_protos/test.proto" ]
-  }
+   pw_proto_library("test_protos") {
+     sources = [ "my_test_protos/test.proto" ]
+   }
 
 ``test_protos.pwpb`` compiles code for pw_protobuf, and ``test_protos.nanopb``
 compiles using Nanopb (if it's installed).
@@ -72,15 +72,15 @@
 
 .. code-block::
 
-  //path/to/my_protos:my_protos.pwpb
-  //path/to/my_protos:pwpb
+   //path/to/my_protos:my_protos.pwpb
+   //path/to/my_protos:pwpb
 
 ``pw_python_package`` subtargets are also available on the ``python`` subtarget:
 
 .. code-block::
 
-  //path/to/my_protos:my_protos.python.lint
-  //path/to/my_protos:python.lint
+   //path/to/my_protos:my_protos.python.lint
+   //path/to/my_protos:python.lint
 
 **Supported Codegen**
 
@@ -124,50 +124,50 @@
 
 .. code-block::
 
-  import("$dir_pw_protobuf_compiler/proto.gni")
+   import("$dir_pw_protobuf_compiler/proto.gni")
 
-  pw_proto_library("my_protos") {
-    sources = [
-      "my_protos/foo.proto",
-      "my_protos/bar.proto",
-    ]
-  }
+   pw_proto_library("my_protos") {
+     sources = [
+       "my_protos/foo.proto",
+       "my_protos/bar.proto",
+     ]
+   }
 
-  pw_proto_library("my_other_protos") {
-    sources = [ "some/other/path/baz.proto" ]  # imports foo.proto
+   pw_proto_library("my_other_protos") {
+     sources = [ "some/other/path/baz.proto" ]  # imports foo.proto
 
-    # This removes the "some/other/path" prefix from the proto files.
-    strip_prefix = "some/other/path"
+     # This removes the "some/other/path" prefix from the proto files.
+     strip_prefix = "some/other/path"
 
-    # This adds the "my_other_protos/" prefix to the proto files.
-    prefix = "my_other_protos"
+     # This adds the "my_other_protos/" prefix to the proto files.
+     prefix = "my_other_protos"
 
-    # Proto libraries depend on other proto libraries directly.
-    deps = [ ":my_protos" ]
-  }
+     # Proto libraries depend on other proto libraries directly.
+     deps = [ ":my_protos" ]
+   }
 
-  source_set("my_cc_code") {
-    sources = [
-      "foo.cc",
-      "bar.cc",
-      "baz.cc",
-    ]
+   source_set("my_cc_code") {
+     sources = [
+       "foo.cc",
+       "bar.cc",
+       "baz.cc",
+     ]
 
-    # When depending on protos in a source_set, specify the generator suffix.
-    deps = [ ":my_other_protos.pwpb" ]
-  }
+     # When depending on protos in a source_set, specify the generator suffix.
+     deps = [ ":my_other_protos.pwpb" ]
+   }
 
 From C++, ``baz.proto`` included as follows:
 
 .. code-block:: cpp
 
-  #include "my_other_protos/baz.pwpb.h"
+   #include "my_other_protos/baz.pwpb.h"
 
 From Python, ``baz.proto`` is imported as follows:
 
 .. code-block:: python
 
-  from my_other_protos import baz_pb2
+   from my_other_protos import baz_pb2
 
 Proto file structure
 --------------------
@@ -180,25 +180,25 @@
 
 .. code-block::
 
-  pw_proto_library("external_protos") {
-    sources = [
-      "//other/external/some_library/src/protos/alpha.proto",
-      "//other/external/some_library/src/protos/beta.proto,
-      "//other/external/some_library/src/protos/internal/gamma.proto",
-    ]
-    strip_prefix = "//other/external/some_library/src/protos"
-    prefix = "some_library"
-  }
+   pw_proto_library("external_protos") {
+     sources = [
+       "//other/external/some_library/src/protos/alpha.proto",
+       "//other/external/some_library/src/protos/beta.proto,
+       "//other/external/some_library/src/protos/internal/gamma.proto",
+     ]
+     strip_prefix = "//other/external/some_library/src/protos"
+     prefix = "some_library"
+   }
 
 These protos will be compiled by protoc as if they were in this file structure:
 
 .. code-block::
 
-  some_library/
-  ├── alpha.proto
-  ├── beta.proto
-  └── internal
-      └── gamma.proto
+   some_library/
+   ├── alpha.proto
+   ├── beta.proto
+   └── internal
+       └── gamma.proto
 
 .. _module-pw_protobuf_compiler-add-to-python-package:
 
@@ -215,30 +215,30 @@
 
 .. code-block::
 
-  pw_proto_library("my_protos") {
-    sources = [ "hello.proto ]
-    prefix = "foo"
-    python_package = ":my_package"
-  }
+   pw_proto_library("my_protos") {
+     sources = [ "hello.proto ]
+     prefix = "foo"
+     python_package = ":my_package"
+   }
 
-  pw_python_pacakge("my_package") {
-    generate_setup = {
-      metadata = {
-        name = "foo"
-        version = "1.0"
-      }
-    }
+   pw_python_pacakge("my_package") {
+     generate_setup = {
+       metadata = {
+         name = "foo"
+         version = "1.0"
+       }
+     }
 
-    sources = [ "foo/cool_module.py" ]
-    proto_library = ":my_protos"
-  }
+     sources = [ "foo/cool_module.py" ]
+     proto_library = ":my_protos"
+   }
 
 The ``hello_pb2.py`` proto module can be used alongside other files in the
 ``foo`` package.
 
 .. code-block:: python
 
-  from foo import cool_module, hello_pb2
+   from foo import cool_module, hello_pb2
 
 Working with externally defined protos
 --------------------------------------
@@ -256,11 +256,11 @@
 
 .. code-block::
 
-  pw_proto_library("proto") {
-    strip_prefix = "$dir_pw_third_party_nanopb/generator/proto"
-    sources = [ "$dir_pw_third_party_nanopb/generator/proto/nanopb.proto" ]
-    python_module_as_package = "nanopb_pb2"
-  }
+   pw_proto_library("proto") {
+     strip_prefix = "$dir_pw_third_party_nanopb/generator/proto"
+     sources = [ "$dir_pw_third_party_nanopb/generator/proto/nanopb.proto" ]
+     python_module_as_package = "nanopb_pb2"
+   }
 
 In Python, this makes ``nanopb.proto`` available as ``import nanopb_pb2`` via
 the ``nanopb_pb2`` Python package. In C++, ``nanopb.proto`` is accessed as
@@ -299,56 +299,56 @@
 
 **Example**
 
- .. code-block:: cmake
+.. code-block:: cmake
 
-  include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
-  include($ENV{PW_ROOT}/pw_protobuf_compiler/proto.cmake)
+   include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+   include($ENV{PW_ROOT}/pw_protobuf_compiler/proto.cmake)
 
-  pw_proto_library(my_module.my_protos
-    SOURCES
-      my_protos/foo.proto
-      my_protos/bar.proto
-  )
+   pw_proto_library(my_module.my_protos
+     SOURCES
+       my_protos/foo.proto
+       my_protos/bar.proto
+   )
 
-  pw_proto_library(my_module.my_protos
-    SOURCES
-      my_protos/foo.proto
-      my_protos/bar.proto
-  )
+   pw_proto_library(my_module.my_protos
+     SOURCES
+       my_protos/foo.proto
+       my_protos/bar.proto
+   )
 
-  pw_proto_library(my_module.my_other_protos
-    SOURCES
-      some/other/path/baz.proto  # imports foo.proto
+   pw_proto_library(my_module.my_other_protos
+     SOURCES
+       some/other/path/baz.proto  # imports foo.proto
 
-    # This removes the "some/other/path" prefix from the proto files.
-    STRIP_PREFIX
-      some/other/path
+     # This removes the "some/other/path" prefix from the proto files.
+     STRIP_PREFIX
+       some/other/path
 
-    # This adds the "my_other_protos/" prefix to the proto files.
-    PREFIX
-      my_other_protos
+     # This adds the "my_other_protos/" prefix to the proto files.
+     PREFIX
+       my_other_protos
 
-    # Proto libraries depend on other proto libraries directly.
-    DEPS
-      my_module.my_protos
-  )
+     # Proto libraries depend on other proto libraries directly.
+     DEPS
+       my_module.my_protos
+   )
 
-  add_library(my_module.my_cc_code
-      foo.cc
-      bar.cc
-      baz.cc
-  )
+   add_library(my_module.my_cc_code
+       foo.cc
+       bar.cc
+       baz.cc
+   )
 
-  # When depending on protos in a source_set, specify the generator suffix.
-  target_link_libraries(my_module.my_cc_code PUBLIC
-    my_module.my_other_protos.pwpb
-  )
+   # When depending on protos in a source_set, specify the generator suffix.
+   target_link_libraries(my_module.my_cc_code PUBLIC
+     my_module.my_other_protos.pwpb
+   )
 
 These proto files are accessed in C++ the same as in the GN build:
 
 .. code-block:: cpp
 
-  #include "my_other_protos/baz.pwpb.h"
+   #include "my_other_protos/baz.pwpb.h"
 
 **Supported Codegen**
 
@@ -380,97 +380,96 @@
 
 .. code-block:: python
 
-  # WORKSPACE ...
-  load("@pigweed//pw_protobuf_compiler:deps.bzl", "pw_protobuf_dependencies")
-  pw_protobuf_dependencies()
+   # WORKSPACE ...
+   load("@pigweed//pw_protobuf_compiler:deps.bzl", "pw_protobuf_dependencies")
+   pw_protobuf_dependencies()
 
 Bazel uses a different set of rules to manage proto files than it does to
 compile them. e.g.
 
 .. code-block:: python
 
-  # BUILD ...
-  load("@rules_proto//proto:defs.bzl", "proto_library")
-  load("@pigweed//pw_protobuf_compiler:pw_proto_library.bzl",
-    "nanopb_proto_library",
-    "nanopb_rpc_proto_library",
-    "pwpb_proto_library",
-    "raw_rpc_proto_library",
-  )
+   # BUILD ...
+   load("@rules_proto//proto:defs.bzl", "proto_library")
+   load("@pigweed//pw_protobuf_compiler:pw_proto_library.bzl",
+     "nanopb_proto_library",
+     "nanopb_rpc_proto_library",
+     "pwpb_proto_library",
+     "raw_rpc_proto_library",
+   )
 
-  # Manages proto sources and dependencies.
-  proto_library(
-    name = "my_proto",
-    srcs = [
-      "my_protos/foo.proto",
-      "my_protos/bar.proto",
-    ]
-  )
+   # Manages proto sources and dependencies.
+   proto_library(
+     name = "my_proto",
+     srcs = [
+       "my_protos/foo.proto",
+       "my_protos/bar.proto",
+     ]
+   )
 
-  # Compiles dependent protos to C++.
-  pwpb_proto_library(
-    name = "my_proto_pwpb",
-    deps = [":my_proto"],
-  )
+   # Compiles dependent protos to C++.
+   pwpb_proto_library(
+     name = "my_proto_pwpb",
+     deps = [":my_proto"],
+   )
 
-  nanopb_proto_library(
-    name = "my_proto_nanopb",
-    deps = [":my_proto"],
-  )
+   nanopb_proto_library(
+     name = "my_proto_nanopb",
+     deps = [":my_proto"],
+   )
 
-  raw_rpc_proto_library(
-    name = "my_proto_raw_rpc",
-    deps = [":my_proto"],
-  )
+   raw_rpc_proto_library(
+     name = "my_proto_raw_rpc",
+     deps = [":my_proto"],
+   )
 
-  nanopb_rpc_proto_library(
-    name = "my_proto_nanopb_rpc",
-    nanopb_proto_library_deps = [":my_proto_nanopb"],
-    deps = [":my_proto"],
-  )
+   nanopb_rpc_proto_library(
+     name = "my_proto_nanopb_rpc",
+     nanopb_proto_library_deps = [":my_proto_nanopb"],
+     deps = [":my_proto"],
+   )
 
-  # Library that depends on only pw_protobuf generated proto targets.
-  pw_cc_library(
-    name = "my_proto_only_lib",
-    srcs = ["my/proto_only.cc"],
-    deps = [":my_proto_pwpb"],
-  )
+   # Library that depends on only pw_protobuf generated proto targets.
+   pw_cc_library(
+     name = "my_proto_only_lib",
+     srcs = ["my/proto_only.cc"],
+     deps = [":my_proto_pwpb"],
+   )
 
-  # Library that depends on only Nanopb generated proto targets.
-  pw_cc_library(
-    name = "my_nanopb_only_lib",
-    srcs = ["my/nanopb_only.cc"],
-    deps = [":my_proto_nanopb"],
-  )
+   # Library that depends on only Nanopb generated proto targets.
+   pw_cc_library(
+     name = "my_nanopb_only_lib",
+     srcs = ["my/nanopb_only.cc"],
+     deps = [":my_proto_nanopb"],
+   )
 
-  # Library that depends on pw_protobuf and pw_rpc/raw.
-  pw_cc_library(
-    name = "my_raw_rpc_lib",
-    srcs = ["my/raw_rpc.cc"],
-    deps = [
-      ":my_proto_pwpb",
-      ":my_proto_raw_rpc",
-    ],
-  )
-  pw_cc_library(
-    name = "my_nanopb_rpc_lib",
-    srcs = ["my/proto_only.cc"],
-    deps = [
-      ":my_proto_nanopb_rpc",
-    ],
-  )
-
+   # Library that depends on pw_protobuf and pw_rpc/raw.
+   pw_cc_library(
+     name = "my_raw_rpc_lib",
+     srcs = ["my/raw_rpc.cc"],
+     deps = [
+       ":my_proto_pwpb",
+       ":my_proto_raw_rpc",
+     ],
+   )
+   pw_cc_library(
+     name = "my_nanopb_rpc_lib",
+     srcs = ["my/proto_only.cc"],
+     deps = [
+       ":my_proto_nanopb_rpc",
+     ],
+   )
 
 From ``my/lib.cc`` you can now include the generated headers.
 e.g.
 
-.. code:: cpp
+.. code-block:: cpp
 
-  #include "my_protos/bar.pwpb.h"
-  // and/or RPC headers
-  #include "my_protos/bar.raw_rpc.pb.h
-  // or
-  #include "my_protos/bar.nanopb_rpc.pb.h"
+   #include "my_protos/bar.pwpb.h"
+   // and/or RPC headers
+   #include "my_protos/bar.raw_rpc.pb.h
+   // or
+   #include "my_protos/bar.nanopb_rpc.pb.h"
 
 
 Why isn't there one rule to generate all the code?
diff --git a/pw_protobuf_compiler/pw_proto_library.bzl b/pw_protobuf_compiler/pw_proto_library.bzl
index ce10a4d..ceb9415 100644
--- a/pw_protobuf_compiler/pw_proto_library.bzl
+++ b/pw_protobuf_compiler/pw_proto_library.bzl
@@ -38,10 +38,10 @@
 _proto_compiler_aspect.
 """
 
-load("@bazel_tools//tools/cpp:toolchain_utils.bzl", "find_cpp_toolchain", "use_cpp_toolchain")
-load("@rules_proto//proto:defs.bzl", "ProtoInfo")
-load("@pigweed//pw_build/bazel_internal:pigweed_internal.bzl", "PW_DEFAULT_COPTS")
 load("@bazel_skylib//lib:paths.bzl", "paths")
+load("@bazel_tools//tools/cpp:toolchain_utils.bzl", "find_cpp_toolchain", "use_cpp_toolchain")
+load("@pigweed//pw_build/bazel_internal:pigweed_internal.bzl", "PW_DEFAULT_COPTS")
+load("@rules_proto//proto:defs.bzl", "ProtoInfo")
 
 # For Copybara use only
 ADDITIONAL_PWPB_DEPS = []
@@ -56,7 +56,7 @@
         name = name,
         protos = deps,
         deps = [
-            Label("//pw_assert:facade"),
+            Label("//pw_assert"),
             Label("//pw_containers:vector"),
             Label("//pw_preprocessor"),
             Label("//pw_protobuf"),
diff --git a/pw_protobuf_compiler/toolchain.gni b/pw_protobuf_compiler/toolchain.gni
index 806a963..4256fd2 100644
--- a/pw_protobuf_compiler/toolchain.gni
+++ b/pw_protobuf_compiler/toolchain.gni
@@ -25,5 +25,5 @@
   # kConstantCase. Set this variable to temporarily enable legacy SNAKE_CASE
   # support while you migrate your codebase to kConstantCase.
   # b/266298474
-  pw_protobuf_compiler_GENERATE_LEGACY_ENUM_SNAKE_CASE_NAMES = true
+  pw_protobuf_compiler_GENERATE_LEGACY_ENUM_SNAKE_CASE_NAMES = false
 }
diff --git a/pw_random/BUILD.gn b/pw_random/BUILD.gn
index 2ce96a8..afc460b 100644
--- a/pw_random/BUILD.gn
+++ b/pw_random/BUILD.gn
@@ -46,14 +46,12 @@
 }
 
 pw_test_group("tests") {
-  tests = [
-    ":xor_shift_star_test",
-    ":get_int_bounded_fuzzer_test",
-  ]
+  tests = [ ":xor_shift_star_test" ]
+  group_deps = [ ":fuzzers" ]
 }
 
-group("fuzzers") {
-  deps = [ ":get_int_bounded_fuzzer" ]
+pw_fuzzer_group("fuzzers") {
+  fuzzers = [ ":get_int_bounded_fuzzer" ]
 }
 
 pw_test("xor_shift_star_test") {
diff --git a/pw_random/docs.rst b/pw_random/docs.rst
index 20ea0b0..2db14e0 100644
--- a/pw_random/docs.rst
+++ b/pw_random/docs.rst
@@ -41,31 +41,6 @@
 .. doxygennamespace:: pw::random
    :members:
 
-----------
-Algorithms
-----------
-
-xorshift*
-=========
-The ``xorshift*`` algorithm is a pseudo-random number generation algorithm. It's
-very simple in principle; the state is represented as an integer that, with each
-generation, performs exclusive OR operations on different left/right bit shifts
-of itself. The "*" refers to a final multiplication that is applied to the
-output value.
-
-Pigweed's implementation augments this with an ability to inject entropy to
-reseed the generator throughout its lifetime. When entropy is injected, the
-results of the generator are no longer completely deterministic based on the
-original seed.
-
-Note that this generator is NOT cryptographically secure.
-
-For more information, see:
-
- * https://en.wikipedia.org/wiki/Xorshift
- * https://www.jstatsoft.org/article/view/v008i14
- * http://vigna.di.unimi.it/ftp/papers/xorshift.pdf
-
 -----------
 Future Work
 -----------
diff --git a/pw_random/public/pw_random/xor_shift.h b/pw_random/public/pw_random/xor_shift.h
index 2e98e9b..a748a20 100644
--- a/pw_random/public/pw_random/xor_shift.h
+++ b/pw_random/public/pw_random/xor_shift.h
@@ -24,20 +24,35 @@
 
 namespace pw::random {
 
-// This is the "xorshift*" algorithm which is a bit stronger than plain XOR
-// shift thanks to the nonlinear transformation at the end (multiplication).
-//
-// See: https://en.wikipedia.org/wiki/Xorshift
-//
-// This random generator is NOT cryptographically secure, and incorporates
-// pseudo-random generation to extrapolate any true injected entropy. The
-// distribution is not guaranteed to be uniform.
+/// A random generator based off the
+/// [xorshift*](https://en.wikipedia.org/wiki/Xorshift) algorithm.
+///
+/// The state is represented as an integer that, with each generation, performs
+/// exclusive OR (XOR) operations on different left/right bit shifts of itself.
+/// The `*` in `xorshift*` refers to a final multiplication that is applied to
+/// the output value. The final multiplication is essentially a nonlinear
+/// transformation that makes the algorithm stronger than a plain XOR shift.
+///
+/// Pigweed's implementation augments `xorshift*` with an ability to inject
+/// entropy to reseed the generator throughout its lifetime. When entropy is
+/// injected, the results of the generator are no longer completely
+/// deterministic based on the original seed.
+///
+/// See also [Xorshift RNGs](https://www.jstatsoft.org/article/view/v008i14)
+/// and [An experimental exploration of Marsaglia's xorshift generators,
+/// scrambled](https://vigna.di.unimi.it/ftp/papers/xorshift.pdf).
+///
+/// @warning This random generator is **NOT** cryptographically secure. It
+/// incorporates pseudo-random generation to extrapolate any true injected
+/// entropy. The distribution is not guaranteed to be uniform.
 class XorShiftStarRng64 : public RandomGenerator {
  public:
   XorShiftStarRng64(uint64_t initial_seed) : state_(initial_seed) {}
 
-  // This generator uses entropy-seeded PRNG to never exhaust its random number
-  // pool.
+  /// Populates the destination buffer with a randomly generated value.
+  ///
+  /// This generator uses entropy-seeded PRNG to never exhaust its random
+  /// number pool.
   void Get(ByteSpan dest) final {
     while (!dest.empty()) {
       uint64_t random = Regenerate();
@@ -47,10 +62,11 @@
     }
   }
 
-  // Entropy is injected by rotating the state by the number of entropy bits
-  // before xoring the entropy with the current state. This ensures seeding
-  // the random value with single bits will progressively fill the state with
-  // more entropy.
+  /// Injects entropy by rotating the state by the number of entropy bits
+  /// before XORing the entropy with the current state.
+  ///
+  /// This technique ensures that seeding the random value with single bits
+  /// will progressively fill the state with more entropy.
   void InjectEntropyBits(uint32_t data, uint_fast8_t num_bits) final {
     if (num_bits == 0) {
       return;
diff --git a/pw_result/BUILD.bazel b/pw_result/BUILD.bazel
index 94f12c3..522170f 100644
--- a/pw_result/BUILD.bazel
+++ b/pw_result/BUILD.bazel
@@ -28,7 +28,7 @@
     hdrs = ["public/pw_result/result.h"],
     includes = ["public"],
     deps = [
-        "//pw_assert:facade",
+        "//pw_assert",
         "//pw_status",
     ],
 )
diff --git a/pw_rpc/BUILD.bazel b/pw_rpc/BUILD.bazel
index 0925c27..044e4ca 100644
--- a/pw_rpc/BUILD.bazel
+++ b/pw_rpc/BUILD.bazel
@@ -12,10 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-load("//pw_build:pigweed.bzl", "pw_cc_library", "pw_cc_test")
-load("//pw_protobuf_compiler:pw_proto_library.bzl", "pw_proto_filegroup", "pw_proto_library")
 load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
 load("@rules_proto//proto:defs.bzl", "proto_library")
+load("//pw_build:pigweed.bzl", "pw_cc_library", "pw_cc_test")
+load("//pw_protobuf_compiler:pw_proto_library.bzl", "pw_proto_filegroup", "pw_proto_library")
 
 package(default_visibility = ["//visibility:public"])
 
@@ -302,6 +302,7 @@
     ],
     deps = [
         ":pw_rpc",
+        "//pw_fuzzer:fuzztest",
     ],
 )
 
@@ -312,6 +313,7 @@
     ],
     deps = [
         ":pw_rpc",
+        "//pw_fuzzer:fuzztest",
     ],
 )
 
diff --git a/pw_rpc/BUILD.gn b/pw_rpc/BUILD.gn
index 1f85abd..1b8dc0e 100644
--- a/pw_rpc/BUILD.gn
+++ b/pw_rpc/BUILD.gn
@@ -525,6 +525,7 @@
 pw_test("packet_test") {
   deps = [
     ":server",
+    "$dir_pw_fuzzer:fuzztest",
     dir_pw_bytes,
     dir_pw_protobuf,
   ]
@@ -537,6 +538,7 @@
 pw_test("packet_meta_test") {
   deps = [
     ":server",
+    "$dir_pw_fuzzer:fuzztest",
     dir_pw_bytes,
   ]
   sources = [ "packet_meta_test.cc" ]
diff --git a/pw_rpc/CMakeLists.txt b/pw_rpc/CMakeLists.txt
index 903d884..4d95c18 100644
--- a/pw_rpc/CMakeLists.txt
+++ b/pw_rpc/CMakeLists.txt
@@ -331,6 +331,7 @@
     packet_test.cc
   PRIVATE_DEPS
     pw_bytes
+    pw_fuzzer.fuzztest
     pw_protobuf
     pw_rpc.server
   GROUPS
@@ -343,6 +344,7 @@
     packet_meta_test.cc
   PRIVATE_DEPS
     pw_bytes
+    pw_fuzzer.fuzztest
     pw_rpc.server
   GROUPS
     modules
diff --git a/pw_rpc/callback_test.cc b/pw_rpc/callback_test.cc
index 72cb963..a68c23b 100644
--- a/pw_rpc/callback_test.cc
+++ b/pw_rpc/callback_test.cc
@@ -89,11 +89,11 @@
 };
 
 TEST_F(CallbacksTest, DestructorWaitsUntilCallbacksComplete) {
-  // Skip this test if locks are disabled because the thread can't yield.
   if (PW_RPC_USE_GLOBAL_MUTEX == 0) {
     callback_thread_sem_.release();
     callback_thread_.join();
-    GTEST_SKIP();
+    GTEST_SKIP()
+        << "Skipping because locks are disabled, so this thread cannot yield.";
   }
 
   {
@@ -134,11 +134,11 @@
 }
 
 TEST_F(CallbacksTest, MoveActiveCall_WaitsForCallbackToComplete) {
-  // Skip this test if locks are disabled because the thread can't yield.
   if (PW_RPC_USE_GLOBAL_MUTEX == 0) {
     callback_thread_sem_.release();
     callback_thread_.join();
-    GTEST_SKIP();
+    GTEST_SKIP()
+        << "Skipping because locks are disabled, so this thread cannot yield.";
   }
 
   call_1_ = TestService::TestBidirectionalStreamRpc(
diff --git a/pw_rpc/client.cc b/pw_rpc/client.cc
index 3175f22..4cf26dc 100644
--- a/pw_rpc/client.cc
+++ b/pw_rpc/client.cc
@@ -42,7 +42,8 @@
 
   if (channel == nullptr) {
     internal::rpc_lock().unlock();
-    PW_LOG_WARN("RPC client received a packet for an unregistered channel");
+    PW_LOG_WARN("RPC client received a packet for an unregistered channel: %lu",
+                static_cast<unsigned long>(packet.channel_id()));
     return Status::Unavailable();
   }
 
diff --git a/pw_rpc/docs.rst b/pw_rpc/docs.rst
index 25598fb..481c648 100644
--- a/pw_rpc/docs.rst
+++ b/pw_rpc/docs.rst
@@ -459,7 +459,7 @@
 
 For example, in C++ with pw_protobuf:
 
-.. code:: c++
+.. code-block:: c++
 
    #include "pw_rpc/server.h"
 
@@ -1040,7 +1040,7 @@
 To send incoming RPC packets from the transport layer to be processed by a
 client, the client's ``ProcessPacket`` function is called with the packet data.
 
-.. code:: c++
+.. code-block:: c++
 
    #include "pw_rpc/client.h"
 
@@ -1683,7 +1683,7 @@
 have a call for the specified method and then responses to it. It supports
 timeout for the waiting part (default timeout is 100ms).
 
-.. code:: c++
+.. code-block:: c++
 
    #include "pw_rpc/test_helpers.h"
 
diff --git a/pw_rpc/java/main/dev/pigweed/pw_rpc/Client.java b/pw_rpc/java/main/dev/pigweed/pw_rpc/Client.java
index cdbfc07..5e682fb 100644
--- a/pw_rpc/java/main/dev/pigweed/pw_rpc/Client.java
+++ b/pw_rpc/java/main/dev/pigweed/pw_rpc/Client.java
@@ -188,7 +188,7 @@
       throw new InvalidRpcServiceException(serviceId);
     }
 
-    Method method = service.methods().get(methodId);
+    Method method = service.method(methodId);
     if (method == null) {
       throw new InvalidRpcServiceMethodException(service, methodId);
     }
diff --git a/pw_rpc/java/main/dev/pigweed/pw_rpc/Service.java b/pw_rpc/java/main/dev/pigweed/pw_rpc/Service.java
index 55a5018..3cf15db 100644
--- a/pw_rpc/java/main/dev/pigweed/pw_rpc/Service.java
+++ b/pw_rpc/java/main/dev/pigweed/pw_rpc/Service.java
@@ -37,33 +37,26 @@
                                            .collect(Collectors.toMap(Method::id, m -> m)));
   }
 
-  public String name() {
+  /** Returns the fully qualified name of this service (package.Service). */
+  public final String name() {
     return name;
   }
 
-  int id() {
+  /** Returns the methods in this service. */
+  public final ImmutableCollection<Method> getMethods() {
+    return methods.values();
+  }
+
+  final int id() {
     return id;
   }
 
-  /**
-   * TODO(hepler): Make this function package private, since it exposes method IDs.
-   *
-   * @deprecated Do not call this function; call getMethods() instead.
-   */
-  @Deprecated
-  public ImmutableMap<Integer, Method> methods() {
-    return methods;
-  }
-
-  /**
-   * Returns the methods in this service.
-   */
-  public final ImmutableCollection<Method> getMethods() {
-    return methods().values();
-  }
-
   final Method method(String name) {
-    return methods().get(Ids.calculate(name));
+    return methods.get(Ids.calculate(name));
+  }
+
+  final Method method(int id) {
+    return methods.get(id);
   }
 
   @Override
diff --git a/pw_rpc/nanopb/docs.rst b/pw_rpc/nanopb/docs.rst
index 993f7c5..4ae7ded 100644
--- a/pw_rpc/nanopb/docs.rst
+++ b/pw_rpc/nanopb/docs.rst
@@ -16,25 +16,25 @@
 (and optionally other related protos), then depend on the ``nanopb_rpc``
 version of that library in the code implementing the service.
 
-.. code::
+.. code-block::
 
-  # chat/BUILD.gn
+   # chat/BUILD.gn
 
-  import("$dir_pw_build/target_types.gni")
-  import("$dir_pw_protobuf_compiler/proto.gni")
+   import("$dir_pw_build/target_types.gni")
+   import("$dir_pw_protobuf_compiler/proto.gni")
 
-  pw_proto_library("chat_protos") {
-    sources = [ "chat_protos/chat_service.proto" ]
-  }
+   pw_proto_library("chat_protos") {
+     sources = [ "chat_protos/chat_service.proto" ]
+   }
 
-  # Library that implements the Chat service.
-  pw_source_set("chat_service") {
-    sources = [
-      "chat_service.cc",
-      "chat_service.h",
-    ]
-    public_deps = [ ":chat_protos.nanopb_rpc" ]
-  }
+   # Library that implements the Chat service.
+   pw_source_set("chat_service") {
+     sources = [
+       "chat_service.cc",
+       "chat_service.h",
+     ]
+     public_deps = [ ":chat_protos.nanopb_rpc" ]
+   }
 
 A C++ header file is generated for each input .proto file, with the ``.proto``
 extension replaced by ``.rpc.pb.h``. For example, given the input file
@@ -45,7 +45,7 @@
 ==================
 All examples in this document use the following RPC service definition.
 
-.. code:: protobuf
+.. code-block:: protobuf
 
   // chat/chat_protos/chat_service.proto
 
@@ -74,7 +74,7 @@
 The generated class is a base class which must be derived to implement the
 service's methods. The base class is templated on the derived class.
 
-.. code:: c++
+.. code-block:: c++
 
   #include "chat_protos/chat_service.rpc.pb.h"
 
@@ -89,7 +89,7 @@
 and populates a response struct to send back, with a status indicating whether
 the request succeeded.
 
-.. code:: c++
+.. code-block:: c++
 
   pw::Status GetRoomInformation(pw::rpc::
                                 const RoomInfoRequest& request,
@@ -100,7 +100,7 @@
 A server streaming RPC receives the client's request message alongside a
 ``ServerWriter``, used to stream back responses.
 
-.. code:: c++
+.. code-block:: c++
 
   void ListUsersInRoom(pw::rpc::
                        const ListUsersRequest& request,
diff --git a/pw_rpc/packet_meta_test.cc b/pw_rpc/packet_meta_test.cc
index d21f3a4..0f8a968 100644
--- a/pw_rpc/packet_meta_test.cc
+++ b/pw_rpc/packet_meta_test.cc
@@ -15,21 +15,22 @@
 #include "pw_rpc/packet_meta.h"
 
 #include "gtest/gtest.h"
+#include "pw_fuzzer/fuzztest.h"
 #include "pw_rpc/internal/packet.h"
 
 namespace pw::rpc {
 namespace {
 
-TEST(PacketMeta, FromBufferDecodesValidMinimalPacket) {
-  const uint32_t kChannelId = 12;
-  const ServiceId kServiceId = internal::WrapServiceId(0xdeadbeef);
-  const uint32_t kMethodId = 44;
+using namespace fuzzer;
 
+void FromBufferDecodesValidMinimalPacket(uint32_t channel_id,
+                                         uint32_t service_id,
+                                         uint32_t method_id) {
   internal::Packet packet;
-  packet.set_channel_id(kChannelId);
-  packet.set_service_id(internal::UnwrapServiceId(kServiceId));
+  packet.set_channel_id(channel_id);
+  packet.set_service_id(service_id);
   packet.set_type(internal::pwpb::PacketType::RESPONSE);
-  packet.set_method_id(kMethodId);
+  packet.set_method_id(method_id);
 
   std::byte buffer[128];
   Result<ConstByteSpan> encode_result = packet.Encode(buffer);
@@ -37,11 +38,21 @@
 
   Result<PacketMeta> decode_result = PacketMeta::FromBuffer(*encode_result);
   ASSERT_EQ(decode_result.status(), OkStatus());
-  EXPECT_EQ(decode_result->channel_id(), kChannelId);
-  EXPECT_EQ(decode_result->service_id(), kServiceId);
+  EXPECT_EQ(decode_result->channel_id(), channel_id);
+  EXPECT_EQ(decode_result->service_id(), internal::WrapServiceId(service_id));
   EXPECT_TRUE(decode_result->destination_is_client());
 }
 
+TEST(PacketMeta, FromBufferDecodesValidMinimalPacketConst) {
+  const uint32_t kChannelId = 12;
+  const uint32_t kServiceId = 0xdeadbeef;
+  const uint32_t kMethodId = 44;
+  FromBufferDecodesValidMinimalPacket(kChannelId, kServiceId, kMethodId);
+}
+
+FUZZ_TEST(PacketMeta, FromBufferDecodesValidMinimalPacket)
+    .WithDomains(NonZero<uint32_t>(), NonZero<uint32_t>(), NonZero<uint32_t>());
+
 TEST(PacketMeta, FromBufferFailsOnIncompletePacket) {
   internal::Packet packet;
 
diff --git a/pw_rpc/packet_test.cc b/pw_rpc/packet_test.cc
index a0d7da3..b105195 100644
--- a/pw_rpc/packet_test.cc
+++ b/pw_rpc/packet_test.cc
@@ -16,6 +16,7 @@
 
 #include "gtest/gtest.h"
 #include "pw_bytes/array.h"
+#include "pw_fuzzer/fuzztest.h"
 #include "pw_protobuf/wire_format.h"
 
 namespace pw::rpc::internal {
@@ -24,6 +25,7 @@
 using protobuf::FieldKey;
 using ::pw::rpc::internal::pwpb::PacketType;
 using std::byte;
+using namespace fuzzer;
 
 constexpr auto kPayload = bytes::Array<0x82, 0x02, 0xff, 0xff>();
 
@@ -116,16 +118,19 @@
   EXPECT_EQ(Status::DataLoss(), Packet::FromBuffer(bad_data).status());
 }
 
-TEST(Packet, EncodeDecode) {
-  constexpr byte payload[]{byte(0x00), byte(0x01), byte(0x02), byte(0x03)};
-
+void EncodeDecode(uint32_t channel_id,
+                  uint32_t service_id,
+                  uint32_t method_id,
+                  uint32_t call_id,
+                  ConstByteSpan payload,
+                  Status status) {
   Packet packet;
-  packet.set_channel_id(12);
-  packet.set_service_id(0xdeadbeef);
-  packet.set_method_id(0x03a82921);
-  packet.set_call_id(33);
+  packet.set_channel_id(channel_id);
+  packet.set_service_id(service_id);
+  packet.set_method_id(method_id);
+  packet.set_call_id(call_id);
   packet.set_payload(payload);
-  packet.set_status(Status::Unavailable());
+  packet.set_status(status);
 
   byte buffer[128];
   Result result = packet.Encode(buffer);
@@ -146,9 +151,22 @@
                         packet.payload().data(),
                         packet.payload().size()),
             0);
-  EXPECT_EQ(decoded.status(), Status::Unavailable());
+  EXPECT_EQ(decoded.status(), status);
 }
 
+TEST(Packet, EncodeDecodeFixed) {
+  constexpr byte payload[]{byte(0x00), byte(0x01), byte(0x02), byte(0x03)};
+  EncodeDecode(12, 0xdeadbeef, 0x03a82921, 33, payload, Status::Unavailable());
+}
+
+FUZZ_TEST(Packet, EncodeDecode)
+    .WithDomains(NonZero<uint32_t>(),
+                 NonZero<uint32_t>(),
+                 NonZero<uint32_t>(),
+                 NonZero<uint32_t>(),
+                 VectorOf<100>(Arbitrary<byte>()),
+                 Arbitrary<Status>());
+
 constexpr size_t kReservedSize = 2 /* type */ + 2 /* channel */ +
                                  5 /* service */ + 5 /* method */ +
                                  2 /* payload key */ + 2 /* status */;
diff --git a/pw_rpc/pwpb/docs.rst b/pw_rpc/pwpb/docs.rst
index a1332a5..7c45067 100644
--- a/pw_rpc/pwpb/docs.rst
+++ b/pw_rpc/pwpb/docs.rst
@@ -12,25 +12,25 @@
 (and optionally other related protos), then depend on the ``pwpb_rpc``
 version of that library in the code implementing the service.
 
-.. code::
+.. code-block::
 
-  # chat/BUILD.gn
+   # chat/BUILD.gn
 
-  import("$dir_pw_build/target_types.gni")
-  import("$dir_pw_protobuf_compiler/proto.gni")
+   import("$dir_pw_build/target_types.gni")
+   import("$dir_pw_protobuf_compiler/proto.gni")
 
-  pw_proto_library("chat_protos") {
-    sources = [ "chat_protos/chat_service.proto" ]
-  }
+   pw_proto_library("chat_protos") {
+     sources = [ "chat_protos/chat_service.proto" ]
+   }
 
-  # Library that implements the Chat service.
-  pw_source_set("chat_service") {
-    sources = [
-      "chat_service.cc",
-      "chat_service.h",
-    ]
-    public_deps = [ ":chat_protos.pwpb_rpc" ]
-  }
+   # Library that implements the Chat service.
+   pw_source_set("chat_service") {
+     sources = [
+       "chat_service.cc",
+       "chat_service.h",
+     ]
+     public_deps = [ ":chat_protos.pwpb_rpc" ]
+   }
 
 A C++ header file is generated for each input .proto file, with the ``.proto``
 extension replaced by ``.rpc.pwpb.h``. For example, given the input file
@@ -41,7 +41,7 @@
 ==================
 All examples in this document use the following RPC service definition.
 
-.. code:: protobuf
+.. code-block:: protobuf
 
   // chat/chat_protos/chat_service.proto
 
@@ -70,7 +70,7 @@
 The generated class is a base class which must be derived to implement the
 service's methods. The base class is templated on the derived class.
 
-.. code:: c++
+.. code-block:: c++
 
   #include "chat_protos/chat_service.rpc.pwpb.h"
 
@@ -85,7 +85,7 @@
 and populates a response struct to send back, with a status indicating whether
 the request succeeded.
 
-.. code:: c++
+.. code-block:: c++
 
   pw::Status GetRoomInformation(const RoomInfoRequest::Message& request,
                                 RoomInfoResponse::Message& response);
@@ -95,7 +95,7 @@
 A server streaming RPC receives the client's request message alongside a
 ``ServerWriter``, used to stream back responses.
 
-.. code:: c++
+.. code-block:: c++
 
   void ListUsersInRoom(const ListUsersRequest::Message& request,
                        pw::rpc::ServerWriter<ListUsersResponse::Message>& writer);
diff --git a/pw_rpc/pwpb/public/pw_rpc/pwpb/serde.h b/pw_rpc/pwpb/public/pw_rpc/pwpb/serde.h
index 92572de..a9108a5 100644
--- a/pw_rpc/pwpb/public/pw_rpc/pwpb/serde.h
+++ b/pw_rpc/pwpb/public/pw_rpc/pwpb/serde.h
@@ -51,9 +51,9 @@
     StreamEncoder encoder(output, scratch_buffer);
     const Status result = encoder.Write(as_bytes(span(&message, 1)), *table_);
 
-    // TODO(b/269633514): Add 1 to the encoded size because pw_protobuf
+    // TODO(b/269633514): Add 16 to the encoded size because pw_protobuf
     //     sometimes fails to encode to buffers that exactly fit the output.
-    return StatusWithSize(result, output.bytes_written() + 1);
+    return StatusWithSize(result, output.bytes_written() + 16);
   }
 
   // Decodes a serialized protobuf into a pw_protobuf message struct.
diff --git a/pw_rpc/py/pw_rpc/callback_client/call.py b/pw_rpc/py/pw_rpc/callback_client/call.py
index 0d3d15c..581e4ba 100644
--- a/pw_rpc/py/pw_rpc/callback_client/call.py
+++ b/pw_rpc/py/pw_rpc/callback_client/call.py
@@ -322,6 +322,11 @@
     ) -> Iterator:
         return self._get_responses(count=count, timeout_s=timeout_s)
 
+    def request_completion(self) -> None:
+        """Sends client completion packet to server."""
+        if not self.completed():
+            self._rpcs.send_client_stream_end(self._rpc)
+
     def __iter__(self) -> Iterator:
         return self.get_responses()
 
diff --git a/pw_rpc/py/tests/callback_client_test.py b/pw_rpc/py/tests/callback_client_test.py
index 3b437c2..c014e3e 100755
--- a/pw_rpc/py/tests/callback_client_test.py
+++ b/pw_rpc/py/tests/callback_client_test.py
@@ -688,6 +688,40 @@
             ]
         )
 
+    def test_request_completion(self) -> None:
+        resp = self.rpc.method.response_type(payload='!!!')
+        self._enqueue_server_stream(CLIENT_CHANNEL_ID, self.rpc.method, resp)
+
+        callback = mock.Mock()
+        call = self.rpc.invoke(self._request(magic_number=3), callback)
+        callback.assert_called_once_with(
+            call, self.rpc.method.response_type(payload='!!!')
+        )
+
+        callback.reset_mock()
+
+        call.request_completion()
+
+        self.assertEqual(
+            self.last_request().type,
+            packet_pb2.PacketType.CLIENT_REQUEST_COMPLETION,
+        )
+
+        # Ensure the RPC can be called after being completed.
+        self._enqueue_server_stream(CLIENT_CHANNEL_ID, self.method, resp)
+        self._enqueue_response(CLIENT_CHANNEL_ID, self.method, Status.OK)
+
+        call = self.rpc.invoke(
+            self._request(magic_number=3), callback, callback
+        )
+
+        callback.assert_has_calls(
+            [
+                mock.call(call, self.method.response_type(payload='!!!')),
+                mock.call(call, Status.OK),
+            ]
+        )
+
     def test_nonblocking_with_request_args(self) -> None:
         self.rpc.invoke(request_args=dict(magic_number=1138))
         self.assertEqual(
diff --git a/pw_rpc_transport/BUILD.bazel b/pw_rpc_transport/BUILD.bazel
index 1359cf9..f1a1da5 100644
--- a/pw_rpc_transport/BUILD.bazel
+++ b/pw_rpc_transport/BUILD.bazel
@@ -12,18 +12,18 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-load("//pw_build:pigweed.bzl", "pw_cc_library", "pw_cc_test")
 load("@rules_proto//proto:defs.bzl", "proto_library")
+load("//pw_build:pigweed.bzl", "pw_cc_library", "pw_cc_test")
+load(
+    "//pw_build:selects.bzl",
+    "TARGET_COMPATIBLE_WITH_HOST_SELECT",
+)
 load(
     "//pw_protobuf_compiler:pw_proto_library.bzl",
     "pw_proto_filegroup",
     "pwpb_proto_library",
     "pwpb_rpc_proto_library",
 )
-load(
-    "//pw_build:selects.bzl",
-    "TARGET_COMPATIBLE_WITH_HOST_SELECT",
-)
 
 package(default_visibility = ["//visibility:public"])
 
diff --git a/pw_rust/BUILD.bazel b/pw_rust/BUILD.bazel
index 5af4a0f..48cf0c7 100644
--- a/pw_rust/BUILD.bazel
+++ b/pw_rust/BUILD.bazel
@@ -16,9 +16,18 @@
 
 rust_docs(
     name = "docs",
+
+    # These need to be kept in dependency order for inter-crate linking to
+    # work.
+    #
+    # TODO: b/295227522 - Add support to `rust_docs` to automatically processs
+    # crates in topological order.
     crates = [
         "//pw_status/rust:pw_status",
         "//pw_stream/rust:pw_stream",
         "//pw_varint/rust:pw_varint",
+        "//pw_tokenizer/rust:pw_tokenizer_core",
+        "//pw_tokenizer/rust:pw_tokenizer_printf",
+        "//pw_tokenizer/rust:pw_tokenizer",
     ],
 )
diff --git a/pw_rust/bazel_patches/0001-rustdoc_test-Apply-prefix-stripping-to-proc_macro-de.patch b/pw_rust/bazel_patches/0001-rustdoc_test-Apply-prefix-stripping-to-proc_macro-de.patch
index 82b931f..ac96bbe 100644
--- a/pw_rust/bazel_patches/0001-rustdoc_test-Apply-prefix-stripping-to-proc_macro-de.patch
+++ b/pw_rust/bazel_patches/0001-rustdoc_test-Apply-prefix-stripping-to-proc_macro-de.patch
@@ -1,7 +1,7 @@
-From a6e43874ce6de22c4ddf568c339e5b8b923533d9 Mon Sep 17 00:00:00 2001
+From c50a4e729812a7d10f15c4e009ee1c2f80519880 Mon Sep 17 00:00:00 2001
 From: Erik Gilling <konkers@google.com>
 Date: Tue, 2 May 2023 21:54:55 +0000
-Subject: [PATCH] rustdoc_test: Apply prefix stripping to proc_macro
+Subject: [PATCH 1/2] rustdoc_test: Apply prefix stripping to proc_macro
  dependencies.
 
 Without stripping the prefix, rustdoc can not find the proc macro
@@ -11,7 +11,7 @@
  1 file changed, 1 insertion(+), 1 deletion(-)
 
 diff --git a/rust/private/rustdoc_test.bzl b/rust/private/rustdoc_test.bzl
-index 10be63eb..39ad4657 100644
+index 9fb73e1e..6d968f51 100644
 --- a/rust/private/rustdoc_test.bzl
 +++ b/rust/private/rustdoc_test.bzl
 @@ -67,7 +67,7 @@ def _construct_writer_arguments(ctx, test_runner, opt_test_params, action, crate
@@ -24,5 +24,5 @@
          dep_dep_info = getattr(dep, "dep_info", None)
          if dep_crate_info:
 -- 
-2.40.1.495.gc816e09b53d-goog
+2.41.0.694.ge786442a9b-goog
 
diff --git a/pw_rust/bazel_patches/0002-Add-opt_level-argument-to-rust_repository_set.patch b/pw_rust/bazel_patches/0002-Add-opt_level-argument-to-rust_repository_set.patch
deleted file mode 100644
index 3ab9ac8..0000000
--- a/pw_rust/bazel_patches/0002-Add-opt_level-argument-to-rust_repository_set.patch
+++ /dev/null
@@ -1,250 +0,0 @@
-From c8ea89704b63622a9863aa6a1ceaa297c7b80377 Mon Sep 17 00:00:00 2001
-From: Erik Gilling <konkers@google.com>
-Date: Tue, 27 Jun 2023 18:01:41 +0000
-Subject: [PATCH 2/2] Add `opt_level` argument to `rust_repository_set`.
-
-`rust_toolchain` allows setting of optimization level. This changes
-plumbs that into `rust_repository_set` to allow control when
-declaring toolchains through that rule.
----
- docs/flatten.md                   | 16 ++++++++++------
- docs/rust_repositories.md         | 16 ++++++++++------
- rust/private/repository_utils.bzl |  6 +++++-
- rust/repositories.bzl             | 10 ++++++++++
- 4 files changed, 35 insertions(+), 13 deletions(-)
-
-diff --git a/docs/flatten.md b/docs/flatten.md
-index 40550451..584fcede 100644
---- a/docs/flatten.md
-+++ b/docs/flatten.md
-@@ -1284,8 +1284,8 @@ Generates a toolchain-bearing repository that declares the toolchains from some
- <pre>
- rust_toolchain_tools_repository(<a href="#rust_toolchain_tools_repository-name">name</a>, <a href="#rust_toolchain_tools_repository-allocator_library">allocator_library</a>, <a href="#rust_toolchain_tools_repository-auth">auth</a>, <a href="#rust_toolchain_tools_repository-dev_components">dev_components</a>, <a href="#rust_toolchain_tools_repository-edition">edition</a>, <a href="#rust_toolchain_tools_repository-exec_triple">exec_triple</a>,
-                                 <a href="#rust_toolchain_tools_repository-extra_exec_rustc_flags">extra_exec_rustc_flags</a>, <a href="#rust_toolchain_tools_repository-extra_rustc_flags">extra_rustc_flags</a>, <a href="#rust_toolchain_tools_repository-global_allocator_library">global_allocator_library</a>,
--                                <a href="#rust_toolchain_tools_repository-iso_date">iso_date</a>, <a href="#rust_toolchain_tools_repository-repo_mapping">repo_mapping</a>, <a href="#rust_toolchain_tools_repository-rustfmt_version">rustfmt_version</a>, <a href="#rust_toolchain_tools_repository-sha256s">sha256s</a>, <a href="#rust_toolchain_tools_repository-target_triple">target_triple</a>, <a href="#rust_toolchain_tools_repository-urls">urls</a>,
--                                <a href="#rust_toolchain_tools_repository-version">version</a>)
-+                                <a href="#rust_toolchain_tools_repository-iso_date">iso_date</a>, <a href="#rust_toolchain_tools_repository-opt_level">opt_level</a>, <a href="#rust_toolchain_tools_repository-repo_mapping">repo_mapping</a>, <a href="#rust_toolchain_tools_repository-rustfmt_version">rustfmt_version</a>, <a href="#rust_toolchain_tools_repository-sha256s">sha256s</a>,
-+                                <a href="#rust_toolchain_tools_repository-target_triple">target_triple</a>, <a href="#rust_toolchain_tools_repository-urls">urls</a>, <a href="#rust_toolchain_tools_repository-version">version</a>)
- </pre>
- 
- Composes a single workspace containing the toolchain components for compiling on a given platform to a series of target platforms.
-@@ -1307,6 +1307,7 @@ A given instance of this rule should be accompanied by a toolchain_repository_pr
- | <a id="rust_toolchain_tools_repository-extra_rustc_flags"></a>extra_rustc_flags |  Extra flags to pass to rustc in non-exec configuration   | List of strings | optional | <code>[]</code> |
- | <a id="rust_toolchain_tools_repository-global_allocator_library"></a>global_allocator_library |  Target that provides allocator functions when a global allocator is used with cc_common.link.   | String | optional | <code>""</code> |
- | <a id="rust_toolchain_tools_repository-iso_date"></a>iso_date |  The date of the tool (or None, if the version is a specific version).   | String | optional | <code>""</code> |
-+| <a id="rust_toolchain_tools_repository-opt_level"></a>opt_level |  Rustc optimization levels.   | <a href="https://bazel.build/rules/lib/dict">Dictionary: String -> String</a> | optional | <code>{}</code> |
- | <a id="rust_toolchain_tools_repository-repo_mapping"></a>repo_mapping |  A dictionary from local repository name to global repository name. This allows controls over workspace dependency resolution for dependencies of this repository.&lt;p&gt;For example, an entry <code>"@foo": "@bar"</code> declares that, for any time this repository depends on <code>@foo</code> (such as a dependency on <code>@foo//some:target</code>, it should actually resolve that dependency within globally-declared <code>@bar</code> (<code>@bar//some:target</code>).   | <a href="https://bazel.build/rules/lib/dict">Dictionary: String -> String</a> | required |  |
- | <a id="rust_toolchain_tools_repository-rustfmt_version"></a>rustfmt_version |  The version of the tool among "nightly", "beta", or an exact version.   | String | optional | <code>""</code> |
- | <a id="rust_toolchain_tools_repository-sha256s"></a>sha256s |  A dict associating tool subdirectories to sha256 hashes. See [rust_repositories](#rust_repositories) for more details.   | <a href="https://bazel.build/rules/lib/dict">Dictionary: String -> String</a> | optional | <code>{}</code> |
-@@ -1933,8 +1934,9 @@ rust_repositories(<a href="#rust_repositories-kwargs">kwargs</a>)
- <pre>
- rust_repository_set(<a href="#rust_repository_set-name">name</a>, <a href="#rust_repository_set-exec_triple">exec_triple</a>, <a href="#rust_repository_set-target_settings">target_settings</a>, <a href="#rust_repository_set-version">version</a>, <a href="#rust_repository_set-versions">versions</a>, <a href="#rust_repository_set-allocator_library">allocator_library</a>,
-                     <a href="#rust_repository_set-global_allocator_library">global_allocator_library</a>, <a href="#rust_repository_set-extra_target_triples">extra_target_triples</a>, <a href="#rust_repository_set-iso_date">iso_date</a>, <a href="#rust_repository_set-rustfmt_version">rustfmt_version</a>,
--                    <a href="#rust_repository_set-edition">edition</a>, <a href="#rust_repository_set-dev_components">dev_components</a>, <a href="#rust_repository_set-extra_rustc_flags">extra_rustc_flags</a>, <a href="#rust_repository_set-extra_exec_rustc_flags">extra_exec_rustc_flags</a>, <a href="#rust_repository_set-sha256s">sha256s</a>, <a href="#rust_repository_set-urls">urls</a>,
--                    <a href="#rust_repository_set-auth">auth</a>, <a href="#rust_repository_set-register_toolchain">register_toolchain</a>, <a href="#rust_repository_set-exec_compatible_with">exec_compatible_with</a>, <a href="#rust_repository_set-default_target_compatible_with">default_target_compatible_with</a>)
-+                    <a href="#rust_repository_set-edition">edition</a>, <a href="#rust_repository_set-dev_components">dev_components</a>, <a href="#rust_repository_set-extra_rustc_flags">extra_rustc_flags</a>, <a href="#rust_repository_set-extra_exec_rustc_flags">extra_exec_rustc_flags</a>, <a href="#rust_repository_set-opt_level">opt_level</a>,
-+                    <a href="#rust_repository_set-sha256s">sha256s</a>, <a href="#rust_repository_set-urls">urls</a>, <a href="#rust_repository_set-auth">auth</a>, <a href="#rust_repository_set-register_toolchain">register_toolchain</a>, <a href="#rust_repository_set-exec_compatible_with">exec_compatible_with</a>,
-+                    <a href="#rust_repository_set-default_target_compatible_with">default_target_compatible_with</a>)
- </pre>
- 
- Assembles a remote repository for the given toolchain params, produces a proxy repository     to contain the toolchain declaration, and registers the toolchains.
-@@ -1958,6 +1960,7 @@ Assembles a remote repository for the given toolchain params, produces a proxy r
- | <a id="rust_repository_set-dev_components"></a>dev_components |  Whether to download the rustc-dev components. Requires version to be "nightly".   |  `False` |
- | <a id="rust_repository_set-extra_rustc_flags"></a>extra_rustc_flags |  Dictionary of target triples to list of extra flags to pass to rustc in non-exec configuration.   |  `None` |
- | <a id="rust_repository_set-extra_exec_rustc_flags"></a>extra_exec_rustc_flags |  Extra flags to pass to rustc in exec configuration.   |  `None` |
-+| <a id="rust_repository_set-opt_level"></a>opt_level |  Dictionary of target triples to optimiztion config.   |  `None` |
- | <a id="rust_repository_set-sha256s"></a>sha256s |  A dict associating tool subdirectories to sha256 hashes. See [rust_repositories](#rust_repositories) for more details.   |  `None` |
- | <a id="rust_repository_set-urls"></a>urls |  A list of mirror urls containing the tools from the Rust-lang static file server. These must contain the '{}' used to substitute the tool being fetched (using .format).   |  `["https://static.rust-lang.org/dist/{}.tar.gz"]` |
- | <a id="rust_repository_set-auth"></a>auth |  Auth object compatible with repository_ctx.download to use when downloading files. See [repository_ctx.download](https://docs.bazel.build/versions/main/skylark/lib/repository_ctx.html#download) for more details.   |  `None` |
-@@ -2038,8 +2041,8 @@ rust_test_suite(
- rust_toolchain_repository(<a href="#rust_toolchain_repository-name">name</a>, <a href="#rust_toolchain_repository-version">version</a>, <a href="#rust_toolchain_repository-exec_triple">exec_triple</a>, <a href="#rust_toolchain_repository-target_triple">target_triple</a>, <a href="#rust_toolchain_repository-exec_compatible_with">exec_compatible_with</a>,
-                           <a href="#rust_toolchain_repository-target_compatible_with">target_compatible_with</a>, <a href="#rust_toolchain_repository-target_settings">target_settings</a>, <a href="#rust_toolchain_repository-channel">channel</a>, <a href="#rust_toolchain_repository-allocator_library">allocator_library</a>,
-                           <a href="#rust_toolchain_repository-global_allocator_library">global_allocator_library</a>, <a href="#rust_toolchain_repository-iso_date">iso_date</a>, <a href="#rust_toolchain_repository-rustfmt_version">rustfmt_version</a>, <a href="#rust_toolchain_repository-edition">edition</a>,
--                          <a href="#rust_toolchain_repository-dev_components">dev_components</a>, <a href="#rust_toolchain_repository-extra_rustc_flags">extra_rustc_flags</a>, <a href="#rust_toolchain_repository-extra_exec_rustc_flags">extra_exec_rustc_flags</a>, <a href="#rust_toolchain_repository-sha256s">sha256s</a>, <a href="#rust_toolchain_repository-urls">urls</a>,
--                          <a href="#rust_toolchain_repository-auth">auth</a>)
-+                          <a href="#rust_toolchain_repository-dev_components">dev_components</a>, <a href="#rust_toolchain_repository-extra_rustc_flags">extra_rustc_flags</a>, <a href="#rust_toolchain_repository-extra_exec_rustc_flags">extra_exec_rustc_flags</a>, <a href="#rust_toolchain_repository-opt_level">opt_level</a>,
-+                          <a href="#rust_toolchain_repository-sha256s">sha256s</a>, <a href="#rust_toolchain_repository-urls">urls</a>, <a href="#rust_toolchain_repository-auth">auth</a>)
- </pre>
- 
- Assembles a remote repository for the given toolchain params, produces a proxy repository     to contain the toolchain declaration, and registers the toolchains.
-@@ -2065,6 +2068,7 @@ Assembles a remote repository for the given toolchain params, produces a proxy r
- | <a id="rust_toolchain_repository-dev_components"></a>dev_components |  Whether to download the rustc-dev components. Requires version to be "nightly". Defaults to False.   |  `False` |
- | <a id="rust_toolchain_repository-extra_rustc_flags"></a>extra_rustc_flags |  Extra flags to pass to rustc in non-exec configuration.   |  `None` |
- | <a id="rust_toolchain_repository-extra_exec_rustc_flags"></a>extra_exec_rustc_flags |  Extra flags to pass to rustc in exec configuration.   |  `None` |
-+| <a id="rust_toolchain_repository-opt_level"></a>opt_level |  Optimization level config for this toolchain.   |  `None` |
- | <a id="rust_toolchain_repository-sha256s"></a>sha256s |  A dict associating tool subdirectories to sha256 hashes. See [rust_repositories](#rust_repositories) for more details.   |  `None` |
- | <a id="rust_toolchain_repository-urls"></a>urls |  A list of mirror urls containing the tools from the Rust-lang static file server. These must contain the '{}' used to substitute the tool being fetched (using .format). Defaults to ['https://static.rust-lang.org/dist/{}.tar.gz']   |  `["https://static.rust-lang.org/dist/{}.tar.gz"]` |
- | <a id="rust_toolchain_repository-auth"></a>auth |  Auth object compatible with repository_ctx.download to use when downloading files. See [repository_ctx.download](https://docs.bazel.build/versions/main/skylark/lib/repository_ctx.html#download) for more details.   |  `None` |
-diff --git a/docs/rust_repositories.md b/docs/rust_repositories.md
-index 831e9ca4..69633581 100644
---- a/docs/rust_repositories.md
-+++ b/docs/rust_repositories.md
-@@ -154,8 +154,8 @@ Generates a toolchain-bearing repository that declares the toolchains from some
- <pre>
- rust_toolchain_tools_repository(<a href="#rust_toolchain_tools_repository-name">name</a>, <a href="#rust_toolchain_tools_repository-allocator_library">allocator_library</a>, <a href="#rust_toolchain_tools_repository-auth">auth</a>, <a href="#rust_toolchain_tools_repository-dev_components">dev_components</a>, <a href="#rust_toolchain_tools_repository-edition">edition</a>, <a href="#rust_toolchain_tools_repository-exec_triple">exec_triple</a>,
-                                 <a href="#rust_toolchain_tools_repository-extra_exec_rustc_flags">extra_exec_rustc_flags</a>, <a href="#rust_toolchain_tools_repository-extra_rustc_flags">extra_rustc_flags</a>, <a href="#rust_toolchain_tools_repository-global_allocator_library">global_allocator_library</a>,
--                                <a href="#rust_toolchain_tools_repository-iso_date">iso_date</a>, <a href="#rust_toolchain_tools_repository-repo_mapping">repo_mapping</a>, <a href="#rust_toolchain_tools_repository-rustfmt_version">rustfmt_version</a>, <a href="#rust_toolchain_tools_repository-sha256s">sha256s</a>, <a href="#rust_toolchain_tools_repository-target_triple">target_triple</a>, <a href="#rust_toolchain_tools_repository-urls">urls</a>,
--                                <a href="#rust_toolchain_tools_repository-version">version</a>)
-+                                <a href="#rust_toolchain_tools_repository-iso_date">iso_date</a>, <a href="#rust_toolchain_tools_repository-opt_level">opt_level</a>, <a href="#rust_toolchain_tools_repository-repo_mapping">repo_mapping</a>, <a href="#rust_toolchain_tools_repository-rustfmt_version">rustfmt_version</a>, <a href="#rust_toolchain_tools_repository-sha256s">sha256s</a>,
-+                                <a href="#rust_toolchain_tools_repository-target_triple">target_triple</a>, <a href="#rust_toolchain_tools_repository-urls">urls</a>, <a href="#rust_toolchain_tools_repository-version">version</a>)
- </pre>
- 
- Composes a single workspace containing the toolchain components for compiling on a given platform to a series of target platforms.
-@@ -177,6 +177,7 @@ A given instance of this rule should be accompanied by a toolchain_repository_pr
- | <a id="rust_toolchain_tools_repository-extra_rustc_flags"></a>extra_rustc_flags |  Extra flags to pass to rustc in non-exec configuration   | List of strings | optional | <code>[]</code> |
- | <a id="rust_toolchain_tools_repository-global_allocator_library"></a>global_allocator_library |  Target that provides allocator functions when a global allocator is used with cc_common.link.   | String | optional | <code>""</code> |
- | <a id="rust_toolchain_tools_repository-iso_date"></a>iso_date |  The date of the tool (or None, if the version is a specific version).   | String | optional | <code>""</code> |
-+| <a id="rust_toolchain_tools_repository-opt_level"></a>opt_level |  Rustc optimization levels.   | <a href="https://bazel.build/rules/lib/dict">Dictionary: String -> String</a> | optional | <code>{}</code> |
- | <a id="rust_toolchain_tools_repository-repo_mapping"></a>repo_mapping |  A dictionary from local repository name to global repository name. This allows controls over workspace dependency resolution for dependencies of this repository.&lt;p&gt;For example, an entry <code>"@foo": "@bar"</code> declares that, for any time this repository depends on <code>@foo</code> (such as a dependency on <code>@foo//some:target</code>, it should actually resolve that dependency within globally-declared <code>@bar</code> (<code>@bar//some:target</code>).   | <a href="https://bazel.build/rules/lib/dict">Dictionary: String -> String</a> | required |  |
- | <a id="rust_toolchain_tools_repository-rustfmt_version"></a>rustfmt_version |  The version of the tool among "nightly", "beta", or an exact version.   | String | optional | <code>""</code> |
- | <a id="rust_toolchain_tools_repository-sha256s"></a>sha256s |  A dict associating tool subdirectories to sha256 hashes. See [rust_repositories](#rust_repositories) for more details.   | <a href="https://bazel.build/rules/lib/dict">Dictionary: String -> String</a> | optional | <code>{}</code> |
-@@ -302,8 +303,9 @@ rust_repositories(<a href="#rust_repositories-kwargs">kwargs</a>)
- <pre>
- rust_repository_set(<a href="#rust_repository_set-name">name</a>, <a href="#rust_repository_set-exec_triple">exec_triple</a>, <a href="#rust_repository_set-target_settings">target_settings</a>, <a href="#rust_repository_set-version">version</a>, <a href="#rust_repository_set-versions">versions</a>, <a href="#rust_repository_set-allocator_library">allocator_library</a>,
-                     <a href="#rust_repository_set-global_allocator_library">global_allocator_library</a>, <a href="#rust_repository_set-extra_target_triples">extra_target_triples</a>, <a href="#rust_repository_set-iso_date">iso_date</a>, <a href="#rust_repository_set-rustfmt_version">rustfmt_version</a>,
--                    <a href="#rust_repository_set-edition">edition</a>, <a href="#rust_repository_set-dev_components">dev_components</a>, <a href="#rust_repository_set-extra_rustc_flags">extra_rustc_flags</a>, <a href="#rust_repository_set-extra_exec_rustc_flags">extra_exec_rustc_flags</a>, <a href="#rust_repository_set-sha256s">sha256s</a>, <a href="#rust_repository_set-urls">urls</a>,
--                    <a href="#rust_repository_set-auth">auth</a>, <a href="#rust_repository_set-register_toolchain">register_toolchain</a>, <a href="#rust_repository_set-exec_compatible_with">exec_compatible_with</a>, <a href="#rust_repository_set-default_target_compatible_with">default_target_compatible_with</a>)
-+                    <a href="#rust_repository_set-edition">edition</a>, <a href="#rust_repository_set-dev_components">dev_components</a>, <a href="#rust_repository_set-extra_rustc_flags">extra_rustc_flags</a>, <a href="#rust_repository_set-extra_exec_rustc_flags">extra_exec_rustc_flags</a>, <a href="#rust_repository_set-opt_level">opt_level</a>,
-+                    <a href="#rust_repository_set-sha256s">sha256s</a>, <a href="#rust_repository_set-urls">urls</a>, <a href="#rust_repository_set-auth">auth</a>, <a href="#rust_repository_set-register_toolchain">register_toolchain</a>, <a href="#rust_repository_set-exec_compatible_with">exec_compatible_with</a>,
-+                    <a href="#rust_repository_set-default_target_compatible_with">default_target_compatible_with</a>)
- </pre>
- 
- Assembles a remote repository for the given toolchain params, produces a proxy repository     to contain the toolchain declaration, and registers the toolchains.
-@@ -327,6 +329,7 @@ Assembles a remote repository for the given toolchain params, produces a proxy r
- | <a id="rust_repository_set-dev_components"></a>dev_components |  Whether to download the rustc-dev components. Requires version to be "nightly".   |  `False` |
- | <a id="rust_repository_set-extra_rustc_flags"></a>extra_rustc_flags |  Dictionary of target triples to list of extra flags to pass to rustc in non-exec configuration.   |  `None` |
- | <a id="rust_repository_set-extra_exec_rustc_flags"></a>extra_exec_rustc_flags |  Extra flags to pass to rustc in exec configuration.   |  `None` |
-+| <a id="rust_repository_set-opt_level"></a>opt_level |  Dictionary of target triples to optimiztion config.   |  `None` |
- | <a id="rust_repository_set-sha256s"></a>sha256s |  A dict associating tool subdirectories to sha256 hashes. See [rust_repositories](#rust_repositories) for more details.   |  `None` |
- | <a id="rust_repository_set-urls"></a>urls |  A list of mirror urls containing the tools from the Rust-lang static file server. These must contain the '{}' used to substitute the tool being fetched (using .format).   |  `["https://static.rust-lang.org/dist/{}.tar.gz"]` |
- | <a id="rust_repository_set-auth"></a>auth |  Auth object compatible with repository_ctx.download to use when downloading files. See [repository_ctx.download](https://docs.bazel.build/versions/main/skylark/lib/repository_ctx.html#download) for more details.   |  `None` |
-@@ -343,8 +346,8 @@ Assembles a remote repository for the given toolchain params, produces a proxy r
- rust_toolchain_repository(<a href="#rust_toolchain_repository-name">name</a>, <a href="#rust_toolchain_repository-version">version</a>, <a href="#rust_toolchain_repository-exec_triple">exec_triple</a>, <a href="#rust_toolchain_repository-target_triple">target_triple</a>, <a href="#rust_toolchain_repository-exec_compatible_with">exec_compatible_with</a>,
-                           <a href="#rust_toolchain_repository-target_compatible_with">target_compatible_with</a>, <a href="#rust_toolchain_repository-target_settings">target_settings</a>, <a href="#rust_toolchain_repository-channel">channel</a>, <a href="#rust_toolchain_repository-allocator_library">allocator_library</a>,
-                           <a href="#rust_toolchain_repository-global_allocator_library">global_allocator_library</a>, <a href="#rust_toolchain_repository-iso_date">iso_date</a>, <a href="#rust_toolchain_repository-rustfmt_version">rustfmt_version</a>, <a href="#rust_toolchain_repository-edition">edition</a>,
--                          <a href="#rust_toolchain_repository-dev_components">dev_components</a>, <a href="#rust_toolchain_repository-extra_rustc_flags">extra_rustc_flags</a>, <a href="#rust_toolchain_repository-extra_exec_rustc_flags">extra_exec_rustc_flags</a>, <a href="#rust_toolchain_repository-sha256s">sha256s</a>, <a href="#rust_toolchain_repository-urls">urls</a>,
--                          <a href="#rust_toolchain_repository-auth">auth</a>)
-+                          <a href="#rust_toolchain_repository-dev_components">dev_components</a>, <a href="#rust_toolchain_repository-extra_rustc_flags">extra_rustc_flags</a>, <a href="#rust_toolchain_repository-extra_exec_rustc_flags">extra_exec_rustc_flags</a>, <a href="#rust_toolchain_repository-opt_level">opt_level</a>,
-+                          <a href="#rust_toolchain_repository-sha256s">sha256s</a>, <a href="#rust_toolchain_repository-urls">urls</a>, <a href="#rust_toolchain_repository-auth">auth</a>)
- </pre>
- 
- Assembles a remote repository for the given toolchain params, produces a proxy repository     to contain the toolchain declaration, and registers the toolchains.
-@@ -370,6 +373,7 @@ Assembles a remote repository for the given toolchain params, produces a proxy r
- | <a id="rust_toolchain_repository-dev_components"></a>dev_components |  Whether to download the rustc-dev components. Requires version to be "nightly". Defaults to False.   |  `False` |
- | <a id="rust_toolchain_repository-extra_rustc_flags"></a>extra_rustc_flags |  Extra flags to pass to rustc in non-exec configuration.   |  `None` |
- | <a id="rust_toolchain_repository-extra_exec_rustc_flags"></a>extra_exec_rustc_flags |  Extra flags to pass to rustc in exec configuration.   |  `None` |
-+| <a id="rust_toolchain_repository-opt_level"></a>opt_level |  Optimization level config for this toolchain.   |  `None` |
- | <a id="rust_toolchain_repository-sha256s"></a>sha256s |  A dict associating tool subdirectories to sha256 hashes. See [rust_repositories](#rust_repositories) for more details.   |  `None` |
- | <a id="rust_toolchain_repository-urls"></a>urls |  A list of mirror urls containing the tools from the Rust-lang static file server. These must contain the '{}' used to substitute the tool being fetched (using .format). Defaults to ['https://static.rust-lang.org/dist/{}.tar.gz']   |  `["https://static.rust-lang.org/dist/{}.tar.gz"]` |
- | <a id="rust_toolchain_repository-auth"></a>auth |  Auth object compatible with repository_ctx.download to use when downloading files. See [repository_ctx.download](https://docs.bazel.build/versions/main/skylark/lib/repository_ctx.html#download) for more details.   |  `None` |
-diff --git a/rust/private/repository_utils.bzl b/rust/private/repository_utils.bzl
-index 1f5a01aa..80c51ff8 100644
---- a/rust/private/repository_utils.bzl
-+++ b/rust/private/repository_utils.bzl
-@@ -253,6 +253,7 @@ rust_toolchain(
-     visibility = ["//visibility:public"],
-     extra_rustc_flags = {extra_rustc_flags},
-     extra_exec_rustc_flags = {extra_exec_rustc_flags},
-+    opt_level = {opt_level},
- )
- """
- 
-@@ -267,7 +268,8 @@ def BUILD_for_rust_toolchain(
-         include_llvm_tools,
-         stdlib_linkflags = None,
-         extra_rustc_flags = None,
--        extra_exec_rustc_flags = None):
-+        extra_exec_rustc_flags = None,
-+        opt_level = None):
-     """Emits a toolchain declaration to match an existing compiler and stdlib.
- 
-     Args:
-@@ -286,6 +288,7 @@ def BUILD_for_rust_toolchain(
-                                            None.
-         extra_rustc_flags (list, optional): Extra flags to pass to rustc in non-exec configuration.
-         extra_exec_rustc_flags (list, optional): Extra flags to pass to rustc in exec configuration.
-+        opt_level (dict, optional): Optimization level config for this toolchain.
- 
-     Returns:
-         str: A rendered template of a `rust_toolchain` declaration
-@@ -324,6 +327,7 @@ def BUILD_for_rust_toolchain(
-         llvm_profdata_label = llvm_profdata_label,
-         extra_rustc_flags = extra_rustc_flags,
-         extra_exec_rustc_flags = extra_exec_rustc_flags,
-+        opt_level = opt_level,
-     )
- 
- _build_file_for_toolchain_template = """\
-diff --git a/rust/repositories.bzl b/rust/repositories.bzl
-index b7c26b76..0cb61571 100644
---- a/rust/repositories.bzl
-+++ b/rust/repositories.bzl
-@@ -359,6 +359,7 @@ def _rust_toolchain_tools_repository_impl(ctx):
-         include_llvm_tools = include_llvm_tools,
-         extra_rustc_flags = ctx.attr.extra_rustc_flags,
-         extra_exec_rustc_flags = ctx.attr.extra_exec_rustc_flags,
-+        opt_level = ctx.attr.opt_level if ctx.attr.opt_level else None,
-     ))
- 
-     # Not all target triples are expected to have dev components
-@@ -413,6 +414,9 @@ rust_toolchain_tools_repository = repository_rule(
-         "iso_date": attr.string(
-             doc = "The date of the tool (or None, if the version is a specific version).",
-         ),
-+        "opt_level": attr.string_dict(
-+            doc = "Rustc optimization levels.",
-+        ),
-         "rustfmt_version": attr.string(
-             doc = "The version of the tool among \"nightly\", \"beta\", or an exact version.",
-         ),
-@@ -498,6 +502,7 @@ def rust_toolchain_repository(
-         dev_components = False,
-         extra_rustc_flags = None,
-         extra_exec_rustc_flags = None,
-+        opt_level = None,
-         sha256s = None,
-         urls = DEFAULT_STATIC_RUST_URL_TEMPLATES,
-         auth = None):
-@@ -523,6 +528,7 @@ def rust_toolchain_repository(
-             Requires version to be "nightly". Defaults to False.
-         extra_rustc_flags (list, optional): Extra flags to pass to rustc in non-exec configuration.
-         extra_exec_rustc_flags (list, optional): Extra flags to pass to rustc in exec configuration.
-+        opt_level (dict, optional): Optimization level config for this toolchain.
-         sha256s (str, optional): A dict associating tool subdirectories to sha256 hashes. See
-             [rust_repositories](#rust_repositories) for more details.
-         urls (list, optional): A list of mirror urls containing the tools from the Rust-lang static file server. These must contain the '{}' used to substitute the tool being fetched (using .format). Defaults to ['https://static.rust-lang.org/dist/{}.tar.gz']
-@@ -559,6 +565,7 @@ def rust_toolchain_repository(
-         dev_components = dev_components,
-         extra_rustc_flags = extra_rustc_flags,
-         extra_exec_rustc_flags = extra_exec_rustc_flags,
-+        opt_level = opt_level,
-         sha256s = sha256s,
-         urls = urls,
-         auth = auth,
-@@ -888,6 +895,7 @@ def rust_repository_set(
-         dev_components = False,
-         extra_rustc_flags = None,
-         extra_exec_rustc_flags = None,
-+        opt_level = None,
-         sha256s = None,
-         urls = DEFAULT_STATIC_RUST_URL_TEMPLATES,
-         auth = None,
-@@ -918,6 +926,7 @@ def rust_repository_set(
-             Requires version to be "nightly".
-         extra_rustc_flags (dict, list, optional): Dictionary of target triples to list of extra flags to pass to rustc in non-exec configuration.
-         extra_exec_rustc_flags (list, optional): Extra flags to pass to rustc in exec configuration.
-+        opt_level (dict, dict, optional): Dictionary of target triples to optimiztion config.
-         sha256s (str, optional): A dict associating tool subdirectories to sha256 hashes. See
-             [rust_repositories](#rust_repositories) for more details.
-         urls (list, optional): A list of mirror urls containing the tools from the Rust-lang static file server. These
-@@ -971,6 +980,7 @@ def rust_repository_set(
-             exec_triple = exec_triple,
-             extra_exec_rustc_flags = extra_exec_rustc_flags,
-             extra_rustc_flags = extra_rustc_flags.get(toolchain.target_triple) if extra_rustc_flags != None else None,
-+            opt_level = opt_level.get(toolchain.target_triple) if opt_level != None else None,
-             target_settings = target_settings,
-             iso_date = toolchain.channel.iso_date,
-             rustfmt_version = rustfmt_version,
--- 
-2.41.0.255.g8b1d071c50-goog
-
diff --git a/pw_rust/bazel_patches/0003-PROTOTYPE-Add-ability-to-document-multiple-crates-at.patch b/pw_rust/bazel_patches/0002-PROTOTYPE-Add-ability-to-document-multiple-crates-at.patch
similarity index 94%
rename from pw_rust/bazel_patches/0003-PROTOTYPE-Add-ability-to-document-multiple-crates-at.patch
rename to pw_rust/bazel_patches/0002-PROTOTYPE-Add-ability-to-document-multiple-crates-at.patch
index 701ad44..c457c8d 100644
--- a/pw_rust/bazel_patches/0003-PROTOTYPE-Add-ability-to-document-multiple-crates-at.patch
+++ b/pw_rust/bazel_patches/0002-PROTOTYPE-Add-ability-to-document-multiple-crates-at.patch
@@ -1,18 +1,17 @@
-From ab7763c6cd48ff6a2b653dd9ea8560e5ad7b1ff0 Mon Sep 17 00:00:00 2001
+From 0b48b44de59c8fd85f3ec3267061dd3610503655 Mon Sep 17 00:00:00 2001
 From: Erik Gilling <konkers@google.com>
 Date: Mon, 27 Mar 2023 16:48:26 +0000
-Subject: [PATCH 3/3] PROTOTYPE: Add ability to document multiple crates at
+Subject: [PATCH 2/2] PROTOTYPE: Add ability to document multiple crates at
  once
 
 ---
  rust/defs.bzl                     |   4 +
  rust/private/rustdoc.bzl          | 203 ++++++++++++++++++++++++++++++
- util/BUILD.bazel                  |  12 ++
  util/capture_args/BUILD.bazel     |   8 ++
  util/capture_args/capture_args.rs |  28 +++++
  util/run_scripts/BUILD.bazel      |   8 ++
  util/run_scripts/run_scripts.rs   |  40 ++++++
- 7 files changed, 303 insertions(+)
+ 6 files changed, 291 insertions(+)
  create mode 100644 util/capture_args/BUILD.bazel
  create mode 100644 util/capture_args/capture_args.rs
  create mode 100644 util/run_scripts/BUILD.bazel
@@ -258,26 +257,6 @@
 +    ],
 +    incompatible_use_toolchain_transition = True,
 +)
-diff --git a/util/BUILD.bazel b/util/BUILD.bazel
-index 8502870b..76e1d0d5 100644
---- a/util/BUILD.bazel
-+++ b/util/BUILD.bazel
-@@ -9,3 +9,15 @@ filegroup(
-     srcs = ["collect_coverage.sh"],
-     visibility = ["//visibility:public"],
- )
-+
-+sh_binary(
-+    name = "dump_args",
-+    srcs = ["dump_args.sh"],
-+    visibility = ["//visibility:public"],
-+)
-+
-+sh_binary(
-+    name = "run_all",
-+    srcs = ["run_all.sh"],
-+    visibility = ["//visibility:public"],
-+)
 diff --git a/util/capture_args/BUILD.bazel b/util/capture_args/BUILD.bazel
 new file mode 100644
 index 00000000..0d7146c2
@@ -387,5 +366,5 @@
 +    }
 +}
 -- 
-2.41.0.487.g6d72f3e995-goog
+2.41.0.694.ge786442a9b-goog
 
diff --git a/pw_rust/docs.rst b/pw_rust/docs.rst
index b917b54..d2d0a3c 100644
--- a/pw_rust/docs.rst
+++ b/pw_rust/docs.rst
@@ -19,7 +19,7 @@
 the following commands where ``PLATFORM`` is one of ``lm3s6965evb`` or
 ``microbit``.
 
-.. code:: bash
+.. code-block:: bash
 
    $ bazel build //pw_rust/examples/embedded_hello:hello \
      --platforms //pw_build/platforms:${PLATFORM} \
@@ -42,13 +42,13 @@
 To build the sample rust targets, you need to enable
 ``pw_rust_ENABLE_EXPERIMENTAL_BUILD``:
 
-.. code:: bash
+.. code-block:: bash
 
    $ gn gen out --args="pw_rust_ENABLE_EXPERIMENTAL_BUILD=true"
 
 Once that is set, you can build and run the ``hello`` example:
 
-.. code:: bash
+.. code-block:: bash
 
    $ ninja -C out host_clang_debug/obj/pw_rust/example/bin/hello
    $ ./out/host_clang_debug/obj/pw_rust/examples/host_executable/bin/hello
diff --git a/pw_snapshot/BUILD.bazel b/pw_snapshot/BUILD.bazel
index 414f193..14762bd 100644
--- a/pw_snapshot/BUILD.bazel
+++ b/pw_snapshot/BUILD.bazel
@@ -17,8 +17,8 @@
     "pw_cc_library",
     "pw_cc_test",
 )
-load("//pw_protobuf_compiler:pw_proto_library.bzl", "pw_proto_library")
 load("//pw_build/bazel_internal:py_proto_library.bzl", "py_proto_library")
+load("//pw_protobuf_compiler:pw_proto_library.bzl", "pw_proto_library")
 
 package(default_visibility = ["//visibility:public"])
 
diff --git a/pw_snapshot/proto_format.rst b/pw_snapshot/proto_format.rst
index 14e450d..ef1a865 100644
--- a/pw_snapshot/proto_format.rst
+++ b/pw_snapshot/proto_format.rst
@@ -10,19 +10,19 @@
 
 .. code-block::
 
-  // This message uses field numbers 1, 2, and 3.
-  message BasicLog {
-    string message = 1;
-    LogLevel level = 2;
-    int64 timestamp = 3;
-  }
+   // This message uses field numbers 1, 2, and 3.
+   message BasicLog {
+     string message = 1;
+     LogLevel level = 2;
+     int64 timestamp = 3;
+   }
 
-  // This message uses field numbers 16 and 17, which are mutually exclusive
-  // to the numbers used in BasicLog.
-  message ExtendedLog {
-    string file_name = 16;
-    uint32 line_nubmer = 17;
-  }
+   // This message uses field numbers 16 and 17, which are mutually exclusive
+   // to the numbers used in BasicLog.
+   message ExtendedLog {
+     string file_name = 16;
+     uint32 line_nubmer = 17;
+   }
 
 In the above example, a BasicLog and ExtendedLog can be encoded to the same
 buffer and then be decoded without causing any problems. What breaks
@@ -32,24 +32,24 @@
 
 .. code-block::
 
-  message BasicLog {
-    string message = 1;
-    LogLevel level = 2;
-    int64 timestamp = 3;
+   message BasicLog {
+     string message = 1;
+     LogLevel level = 2;
+     int64 timestamp = 3;
 
-    // ExtendedLog uses these field numbers. These field numbers should never
-    // be used by BasicLog.
-    reserved 16 to max;
-  }
+     // ExtendedLog uses these field numbers. These field numbers should never
+     // be used by BasicLog.
+     reserved 16 to max;
+   }
 
-  message ExtendedLog {
-    // BasicLog uses these field numbers. These field numbers should never
-    // be used by ExtendedLog.
-    reserved 1 to 15;
+   message ExtendedLog {
+     // BasicLog uses these field numbers. These field numbers should never
+     // be used by ExtendedLog.
+     reserved 1 to 15;
 
-    string file_name = 16;
-    uint32 line_nubmer = 17;
-  }
+     string file_name = 16;
+     uint32 line_nubmer = 17;
+   }
 
 This is exactly how the Snapshot proto is set up. While a SnapshotMetadata proto
 message provides a good portion of the valuable snapshot contents, the larger
@@ -69,21 +69,21 @@
 Example:
 .. code-block::
 
-  // snapshot.proto
-  message Snapshot {
-    ...
-    // Information about allocated Thread.
-    repeated pw.thread.Thread threads = 18;
-  }
+   // snapshot.proto
+   message Snapshot {
+     ...
+     // Information about allocated Thread.
+     repeated pw.thread.Thread threads = 18;
+   }
 
-  // thread.proto
+   // thread.proto
 
-  // This message overlays the pw.snapshot.Snapshot proto. It's valid to encode
-  // this message to the same sink that a Snapshot proto is being written to.
-  message SnapshotThread {
-    // Thread information.
-    repeated pw.thread.Thread threads = 18;
-  }
+   // This message overlays the pw.snapshot.Snapshot proto. It's valid to encode
+   // this message to the same sink that a Snapshot proto is being written to.
+   message SnapshotThread {
+     // Thread information.
+     repeated pw.thread.Thread threads = 18;
+   }
 
 It is **critical** that the SnapshotThread message is in sync with the larger
 Snapshot proto. If the type or field numbers are different, the proto decode
diff --git a/pw_software_update/BUILD.bazel b/pw_software_update/BUILD.bazel
index c5b08cc..002c7cd 100644
--- a/pw_software_update/BUILD.bazel
+++ b/pw_software_update/BUILD.bazel
@@ -12,13 +12,13 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-load("//pw_build/bazel_internal:py_proto_library.bzl", "py_proto_library")
 load("@rules_proto//proto:defs.bzl", "proto_library")
 load(
     "//pw_build:pigweed.bzl",
     "pw_cc_library",
     "pw_cc_test",
 )
+load("//pw_build/bazel_internal:py_proto_library.bzl", "py_proto_library")
 load("//pw_protobuf_compiler:pw_proto_library.bzl", "pw_proto_library")
 
 package(default_visibility = ["//visibility:public"])
diff --git a/pw_software_update/design.rst b/pw_software_update/design.rst
index b431a91..81fa1b2 100644
--- a/pw_software_update/design.rst
+++ b/pw_software_update/design.rst
@@ -23,26 +23,28 @@
 
 .. mermaid::
 
-       flowchart LR
-              A[/Source/] --> |Build| B[/Target files/]
-              B --> |Assemble & Sign| C[(Update bundle)]
-              C --> |Publish| D[(Available updates)]
-              D --> |OTA| E[Device]
+   flowchart LR
+          A[/Source/] --> |Build| B[/Target files/]
+          B --> |Assemble & Sign| C[(Update bundle)]
+          C --> |Publish| D[(Available updates)]
+          D --> |OTA| E[Device]
 
 Update bundles
 ^^^^^^^^^^^^^^
 
 Update bundles represent software releases packaged ready for delivery. A bundle
-is essentially an archived folder matching the following structure::
+is essentially an archived folder matching the following structure:
 
-  /
-  ├── root_metadata
-  ├── targets_metadata
-  └── targets
-      ├── release_notes.txt
-      ├── manifest.txt
-      ├── rtos.bin
-      └── app.bin
+.. code-block:: text
+
+   /
+   ├── root_metadata
+   ├── targets_metadata
+   └── targets
+       ├── release_notes.txt
+       ├── manifest.txt
+       ├── rtos.bin
+       └── app.bin
 
 Bundles are encoded as serialized "protocol buffers".
 
@@ -55,10 +57,10 @@
 
 .. mermaid::
 
-       flowchart LR
-              A[Verified boot] --> |Embed & Verify| B[/Root key/]
-              B --> |Delegate & Rotate| C[/Targets key/]
-              C --> |Sign| D[/Target files/]
+   flowchart LR
+          A[Verified boot] --> |Embed & Verify| B[/Root key/]
+          B --> |Delegate & Rotate| C[/Targets key/]
+          C --> |Sign| D[/Target files/]
 
 
 The "root" role delegates the "targets" role to directly authorize each release.
@@ -72,7 +74,6 @@
 
 Signing service
 ^^^^^^^^^^^^^^^
-
 Production signing keys MUST be kept secure and clean. That means we must
 carefully control access, log usage details, and revoke the key if it was
 (accidentally) used to sign a "questionable" build.
@@ -81,18 +82,18 @@
 
 .. mermaid::
 
-       sequenceDiagram
-              actor Releaser
+   sequenceDiagram
+     actor Releaser
 
-              Releaser->>Signer: Sign my bundle with my key, please.
+     Releaser->>Signer: Sign my bundle with my key, please.
 
-              activate Signer
+     activate Signer
 
-              Signer->>Signer: Check permission.
-              Signer->>Signer: Validate & sign bundle.
-              Signer->>Signer: Log action. Email alerts.
-              Signer-->>Releaser: Done!
-              deactivate Signer
+     Signer->>Signer: Check permission.
+     Signer->>Signer: Validate & sign bundle.
+     Signer->>Signer: Log action. Email alerts.
+     Signer-->>Releaser: Done!
+     deactivate Signer
 
 We don't yet have a public-facing service. External users should source their
 own solution.
@@ -102,9 +103,8 @@
 
 .. mermaid::
 
-       flowchart LR
-              A[(Incoming bundle)] --> |UpdateBundleAccessor| B[/Verified target files/]
-
+   flowchart LR
+     A[(Incoming bundle)] --> |UpdateBundleAccessor| B[/Verified target files/]
 
 The :cpp:type:`UpdateBundleAccessor` decodes, verifies, and exposes the target
 files from an incoming bundle. This class hides the details of the bundle
@@ -122,43 +122,43 @@
 
 .. mermaid::
 
-       stateDiagram-v2
-       direction LR
+   stateDiagram-v2
+   direction LR
 
-       [*] --> Inactive
+   [*] --> Inactive
 
-       Inactive --> Transferring: Start()
-       Inactive --> Finished: Start() error
+   Inactive --> Transferring: Start()
+   Inactive --> Finished: Start() error
 
-       Transferring --> Transferring: GetStatus()
-       Transferring --> Transferred
-       Transferring --> Aborting: Abort()
-       Transferring --> Finished: Transfer error
+   Transferring --> Transferring: GetStatus()
+   Transferring --> Transferred
+   Transferring --> Aborting: Abort()
+   Transferring --> Finished: Transfer error
 
-       Transferred --> Transferred: GetStatus()
-       Transferred --> Verifying: Verify()
-       Transferred --> Verifying: Apply()
-       Transferred --> Aborting: Abort()
+   Transferred --> Transferred: GetStatus()
+   Transferred --> Verifying: Verify()
+   Transferred --> Verifying: Apply()
+   Transferred --> Aborting: Abort()
 
-       Verifying --> Verifying: GetStatus()
-       Verifying --> Verified
-       Verifying --> Aborting: Abort()
+   Verifying --> Verifying: GetStatus()
+   Verifying --> Verified
+   Verifying --> Aborting: Abort()
 
-       Verified --> Verified: GetStatus()
-       Verified --> Applying: Apply()
-       Verified --> Aborting: Abort()
+   Verified --> Verified: GetStatus()
+   Verified --> Applying: Apply()
+   Verified --> Aborting: Abort()
 
-       Applying --> Applying: GetStatus()
-       Applying --> Finished: Apply() OK
-       Applying --> Finished: Apply() error
+   Applying --> Applying: GetStatus()
+   Applying --> Finished: Apply() OK
+   Applying --> Finished: Apply() error
 
-       Aborting --> Aborting: GetStatus()
-       Aborting --> Finished: Abort() OK
-       Aborting --> Finished: Abort() error
+   Aborting --> Aborting: GetStatus()
+   Aborting --> Finished: Abort() OK
+   Aborting --> Finished: Abort() error
 
-       Finished --> Finished: GetStatus()
-       Finished --> Inactive: Reset()
-       Finished --> Finished: Reset() error
+   Finished --> Finished: GetStatus()
+   Finished --> Inactive: Reset()
+   Finished --> Finished: Reset() error
 
 
 Tooling
@@ -169,37 +169,36 @@
 
 The python package
 ~~~~~~~~~~~~~~~~~~
-
 ``pw_software_update`` comes with a python package of the same name, providing
 the following functionalities.
 
-  - Local signing key generation for development.
-  - TUF root metadata generation and signing.
-  - Bundle generation, signing, and verification.
-  - Signing server integration.
+- Local signing key generation for development.
+- TUF root metadata generation and signing.
+- Bundle generation, signing, and verification.
+- Signing server integration.
 
 A typical use of the package is for build system integration.
 
-.. code:: python
+.. code-block:: text
 
-       Help on package pw_software_update:
+   Help on package pw_software_update:
 
-       NAME
-              pw_software_update - pw_software_update
+   NAME
+          pw_software_update - pw_software_update
 
-       PACKAGE CONTENTS
-              bundled_update_pb2
-              cli
-              dev_sign
-              generate_test_bundle
-              keys
-              metadata
-              remote_sign
-              root_metadata
-              tuf_pb2
-              update_bundle
-              update_bundle_pb2
-              verify
+   PACKAGE CONTENTS
+          bundled_update_pb2
+          cli
+          dev_sign
+          generate_test_bundle
+          keys
+          metadata
+          remote_sign
+          root_metadata
+          tuf_pb2
+          update_bundle
+          update_bundle_pb2
+          verify
 
 
 The command line utility
@@ -213,24 +212,24 @@
 one. In the future you will be able to use the CLI to update a reference
 target.
 
-.. code:: bash
+.. code-block:: text
 
-       usage: pw update [sub-commands]
+   usage: pw update [sub-commands]
 
-       sub-commands:
+   sub-commands:
 
-              generate-key
-              create-root-metadata
-              sign-root-metadata
-              inspect-root-metadata
-              create-empty-bundle
-              add-root-metadata-to-bundle
-              add-file-to-bundle
-              sign-bundle
-              inspect-bundle
+          generate-key
+          create-root-metadata
+          sign-root-metadata
+          inspect-root-metadata
+          create-empty-bundle
+          add-root-metadata-to-bundle
+          add-file-to-bundle
+          sign-bundle
+          inspect-bundle
 
-       options:
-              -h, --help            show this help message and exit
+   options:
+          -h, --help            show this help message and exit
 
 
 To learn more, see :ref:`module-pw_software_update-cli`.
diff --git a/pw_software_update/get_started.rst b/pw_software_update/get_started.rst
index 753c30f..b7763f8 100644
--- a/pw_software_update/get_started.rst
+++ b/pw_software_update/get_started.rst
@@ -21,7 +21,7 @@
    ``pw update`` command is not yet visible outside the Pigweed
    directory.
 
-.. code:: bash
+.. code-block:: bash
 
     $ cd ~/pigweed
     $ source activate.sh
@@ -38,7 +38,7 @@
    `Google Cloud KMS <https://cloud.google.com/security-key-management>`_)
    to generate, control access to, and log usage of software signing keys.
 
-.. code:: bash
+.. code-block:: bash
 
     $ mkdir keys
     $ pw update generate-key keys/root_key
@@ -54,7 +54,7 @@
 3. Now that we have the keys, let's find them an owner by creating the root
    metadata.
 
-.. code:: bash
+.. code-block:: bash
 
     # Assign a single key to each "root" and "targets" roles.
     $ pw update create-root-metadata --append-root-key keys/root_key.pub \
@@ -69,7 +69,7 @@
 
 4. Now we are ready to create a bundle.
 
-.. code:: bash
+.. code-block:: bash
 
     # Start with an empty bundle.
     $ pw update create-empty-bundle my_bundle.pb
@@ -109,7 +109,7 @@
    Here we are using ``python3 -m pw_software_update.verify`` because the
    ``pw verify-bundle`` command is WIP.
 
-.. code:: bash
+.. code-block:: bash
 
     $ python3 -m pw_software_update.verify --incoming my_bundle.pb
        Verifying: my_bundle.pb
diff --git a/pw_spi/BUILD.bazel b/pw_spi/BUILD.bazel
index e51d1cd..bb8335a 100644
--- a/pw_spi/BUILD.bazel
+++ b/pw_spi/BUILD.bazel
@@ -43,6 +43,7 @@
     includes = ["public"],
     deps = [
         "//pw_bytes",
+        "//pw_function",
         "//pw_status",
     ],
 )
@@ -113,6 +114,7 @@
     ],
     deps = [
         ":device",
+        ":responder",
         "//pw_sync:mutex",
         "//pw_unit_test",
     ],
diff --git a/pw_spi/BUILD.gn b/pw_spi/BUILD.gn
index f5a6362..cc1d0b2 100644
--- a/pw_spi/BUILD.gn
+++ b/pw_spi/BUILD.gn
@@ -48,6 +48,7 @@
   public = [ "public/pw_spi/responder.h" ]
   public_deps = [
     "$dir_pw_bytes",
+    "$dir_pw_function",
     "$dir_pw_status",
   ]
 }
@@ -116,6 +117,7 @@
   sources = [ "spi_test.cc" ]
   deps = [
     ":device",
+    ":responder",
     "$dir_pw_sync:mutex",
   ]
 }
diff --git a/pw_spi/CMakeLists.txt b/pw_spi/CMakeLists.txt
index 7d2d448..4f9ac00 100644
--- a/pw_spi/CMakeLists.txt
+++ b/pw_spi/CMakeLists.txt
@@ -32,6 +32,7 @@
     public
   PUBLIC_DEPS
     pw_bytes
+    pw_function
     pw_status
 )
 
diff --git a/pw_spi/public/pw_spi/responder.h b/pw_spi/public/pw_spi/responder.h
index 576c761..283c341 100644
--- a/pw_spi/public/pw_spi/responder.h
+++ b/pw_spi/public/pw_spi/responder.h
@@ -33,7 +33,7 @@
   // A value of CANCELLED for the Status parameter indicates Abort() was called.
   // Partially transferred data may be passed in that case as well.
   // Other Status values are implementer defined.
-  void SetCompletionHandler(Function<ByteSpan, Status> callback);
+  void SetCompletionHandler(Function<void(ByteSpan, Status)> callback);
 
   // `tx_data` is queued for tx when called, but only transmitted when
   //   the initiator starts the next transaction. It's up to the implementer to
diff --git a/pw_spi/spi_test.cc b/pw_spi/spi_test.cc
index f3b42ae..9deb7d5 100644
--- a/pw_spi/spi_test.cc
+++ b/pw_spi/spi_test.cc
@@ -19,6 +19,7 @@
 #include "pw_spi/chip_selector.h"
 #include "pw_spi/device.h"
 #include "pw_spi/initiator.h"
+#include "pw_spi/responder.h"
 #include "pw_status/status.h"
 #include "pw_sync/borrow.h"
 #include "pw_sync/mutex.h"
@@ -63,6 +64,17 @@
   Device device_;
 };
 
+class SpiResponderTestDevice : public ::testing::Test {
+ public:
+  SpiResponderTestDevice() : responder_() {}
+
+ private:
+  // Stub SPI Responder, used to exercise public API surface.
+  class TestResponder : public Responder {};
+
+  TestResponder responder_;
+};
+
 // Simple test ensuring the SPI HAL compiles
 TEST_F(SpiTestDevice, CompilationSucceeds) {
   // arrange
@@ -71,5 +83,8 @@
   EXPECT_TRUE(true);
 }
 
+// Simple test ensuring the SPI Responder HAL compiles
+TEST_F(SpiResponderTestDevice, CompilationSucceeds) { EXPECT_TRUE(true); }
+
 }  // namespace
 }  // namespace pw::spi
diff --git a/pw_status/docs.rst b/pw_status/docs.rst
index 3453857..3432361 100644
--- a/pw_status/docs.rst
+++ b/pw_status/docs.rst
@@ -392,21 +392,21 @@
 
 .. code-block:: cpp
 
-  Status overall_status;
-  for (Sector& sector : sectors) {
-    Status erase_status = sector.Erase();
-    if (!overall_status.ok()) {
-      overall_status = erase_status;
-    }
+   Status overall_status;
+   for (Sector& sector : sectors) {
+     Status erase_status = sector.Erase();
+     if (!overall_status.ok()) {
+       overall_status = erase_status;
+     }
 
-    if (erase_status.ok()) {
-      Status header_write_status = sector.WriteHeader();
-      if (!overall_status.ok()) {
-        overall_status = header_write_status;
-      }
-    }
-  }
-  return overall_status;
+     if (erase_status.ok()) {
+       Status header_write_status = sector.WriteHeader();
+       if (!overall_status.ok()) {
+         overall_status = header_write_status;
+       }
+     }
+   }
+   return overall_status;
 
 :cpp:class:`pw::Status` has a :cpp:func:`pw::Status::Update()` helper function
 that does exactly this to reduce visual clutter and succinctly highlight the
@@ -414,16 +414,16 @@
 
 .. code-block:: cpp
 
-  Status overall_status;
-  for (Sector& sector : sectors) {
-    Status erase_status = sector.Erase();
-    overall_status.Update(erase_status);
+   Status overall_status;
+   for (Sector& sector : sectors) {
+     Status erase_status = sector.Erase();
+     overall_status.Update(erase_status);
 
-    if (erase_status.ok()) {
-      overall_status.Update(sector.WriteHeader());
-    }
-  }
-  return overall_status;
+     if (erase_status.ok()) {
+       overall_status.Update(sector.WriteHeader());
+     }
+   }
+   return overall_status;
 
 Unused result warnings
 ----------------------
@@ -460,16 +460,16 @@
 ``pw::StatusWithSize`` values may be created with functions similar to
 ``pw::Status``. For example,
 
-  .. code-block:: cpp
+.. code-block:: cpp
 
-    // An OK StatusWithSize with a size of 123.
-    StatusWithSize(123)
+   // An OK StatusWithSize with a size of 123.
+   StatusWithSize(123)
 
-    // A NOT_FOUND StatusWithSize with a size of 0.
-    StatusWithSize::NotFound()
+   // A NOT_FOUND StatusWithSize with a size of 0.
+   StatusWithSize::NotFound()
 
-    // A RESOURCE_EXHAUSTED StatusWithSize with a size of 10.
-    StatusWithSize::ResourceExhausted(10)
+   // A RESOURCE_EXHAUSTED StatusWithSize with a size of 10.
+   StatusWithSize::ResourceExhausted(10)
 
 ------
 PW_TRY
@@ -483,19 +483,19 @@
 
 .. code-block:: cpp
 
-  Status PwTryExample() {
-    PW_TRY(FunctionThatReturnsStatus());
-    PW_TRY(FunctionThatReturnsStatusWithSize());
+   Status PwTryExample() {
+     PW_TRY(FunctionThatReturnsStatus());
+     PW_TRY(FunctionThatReturnsStatusWithSize());
 
-    // Do something, only executed if both functions above return OK.
-  }
+     // Do something, only executed if both functions above return OK.
+   }
 
-  StatusWithSize PwTryWithSizeExample() {
-    PW_TRY_WITH_SIZE(FunctionThatReturnsStatus());
-    PW_TRY_WITH_SIZE(FunctionThatReturnsStatusWithSize());
+   StatusWithSize PwTryWithSizeExample() {
+     PW_TRY_WITH_SIZE(FunctionThatReturnsStatus());
+     PW_TRY_WITH_SIZE(FunctionThatReturnsStatusWithSize());
 
-    // Do something, only executed if both functions above return OK.
-  }
+     // Do something, only executed if both functions above return OK.
+   }
 
 ``PW_TRY_ASSIGN`` is for working with ``StatusWithSize`` objects in in functions
 that return Status. It is similar to ``PW_TRY`` with the addition of assigning
@@ -503,13 +503,13 @@
 
 .. code-block:: cpp
 
-  Status PwTryAssignExample() {
-    size_t size_value
-    PW_TRY_ASSIGN(size_value, FunctionThatReturnsStatusWithSize());
+   Status PwTryAssignExample() {
+     size_t size_value
+     PW_TRY_ASSIGN(size_value, FunctionThatReturnsStatusWithSize());
 
-    // Do something that uses size_value. size_value is only assigned and this
-    // following code executed if the PW_TRY_ASSIGN function above returns OK.
-  }
+     // Do something that uses size_value. size_value is only assigned and this
+     // following code executed if the PW_TRY_ASSIGN function above returns OK.
+   }
 
 ------
 Zephyr
diff --git a/pw_stm32cube_build/py/pw_stm32cube_build/find_files.py b/pw_stm32cube_build/py/pw_stm32cube_build/find_files.py
index 549ca50..7e69303 100644
--- a/pw_stm32cube_build/py/pw_stm32cube_build/find_files.py
+++ b/pw_stm32cube_build/py/pw_stm32cube_build/find_files.py
@@ -239,7 +239,7 @@
 
 def get_sources_and_headers(
     files: List[str], stm32cube_path: pathlib.Path
-) -> Tuple[List[str], List[str]]:
+) -> Tuple[List[pathlib.Path], List[pathlib.Path]]:
     """Gets list of all sources and headers needed to build the stm32cube hal.
 
     Args:
@@ -265,7 +265,7 @@
         files,
     )
 
-    rebase_path = lambda f: str(stm32cube_path / f)
+    rebase_path = lambda f: pathlib.Path(stm32cube_path / f)
     return list(map(rebase_path, source_files)), list(
         map(rebase_path, header_files)
     )
@@ -304,13 +304,10 @@
     (family, defines, name) = parse_product_str(product_str)
 
     family_header_path = list(
-        filter(lambda p: p.endswith(f'/{family}.h'), headers)
+        filter(lambda p: p.name == f'{family}.h', headers)
     )[0]
 
-    with open(family_header_path, 'rb') as family_header:
-        family_header_str = family_header.read().decode(
-            'utf-8', errors='ignore'
-        )
+    family_header_str = family_header_path.read_text('utf-8', errors='ignore')
 
     define = select_define(defines, family_header_str)
 
diff --git a/pw_stm32cube_build/py/tests/find_files_test.py b/pw_stm32cube_build/py/tests/find_files_test.py
index 364e2b0..7ee6d5c 100644
--- a/pw_stm32cube_build/py/tests/find_files_test.py
+++ b/pw_stm32cube_build/py/tests/find_files_test.py
@@ -319,8 +319,8 @@
         self.assertSetEqual(
             set(
                 [
-                    str(path / 'hal_driver/Src/stm32f4xx_hal_adc.c'),
-                    str(path / 'hal_driver/Src/stm32f4xx_hal_eth.c'),
+                    path / 'hal_driver/Src/stm32f4xx_hal_adc.c',
+                    path / 'hal_driver/Src/stm32f4xx_hal_eth.c',
                 ]
             ),
             set(sources),
@@ -328,11 +328,11 @@
         self.assertSetEqual(
             set(
                 [
-                    str(path / 'cmsis_core/Include/core_cm4.h'),
-                    str(path / 'cmsis_device/Include/stm32f4xx.h'),
-                    str(path / 'cmsis_device/Include/stm32f439xx.h'),
-                    str(path / 'hal_driver/Inc/stm32f4xx_hal_eth.h'),
-                    str(path / 'hal_driver/Inc/stm32f4xx_hal.h'),
+                    path / 'cmsis_core/Include/core_cm4.h',
+                    path / 'cmsis_device/Include/stm32f4xx.h',
+                    path / 'cmsis_device/Include/stm32f439xx.h',
+                    path / 'hal_driver/Inc/stm32f4xx_hal_eth.h',
+                    path / 'hal_driver/Inc/stm32f4xx_hal.h',
                 ]
             ),
             set(headers),
diff --git a/pw_stream/docs.rst b/pw_stream/docs.rst
index 9bd747c..a53f96d 100644
--- a/pw_stream/docs.rst
+++ b/pw_stream/docs.rst
@@ -354,7 +354,7 @@
 
 An alternative approach is to have the reading, writing, and seeking portions of
 the interface provided by different entities. This is how Go's `io
-<https://pkg.go.dev/io package>`_ and C++'s `input/output library
+package <https://pkg.go.dev/io>`_ and C++'s `input/output library
 <https://en.cppreference.com/w/cpp/io>`_ are structured.
 
 We chose to use a single base class for a few reasons:
diff --git a/pw_stream/public/pw_stream/socket_stream.h b/pw_stream/public/pw_stream/socket_stream.h
index 4666d91..c7f1dec 100644
--- a/pw_stream/public/pw_stream/socket_stream.h
+++ b/pw_stream/public/pw_stream/socket_stream.h
@@ -26,6 +26,8 @@
 class SocketStream : public NonSeekableReaderWriter {
  public:
   constexpr SocketStream() = default;
+  // Construct a SocketStream directly from a file descriptor.
+  explicit SocketStream(int connection_fd) : connection_fd_(connection_fd) {}
 
   // SocketStream objects are moveable but not copyable.
   SocketStream& operator=(SocketStream&& other) {
diff --git a/pw_stream/socket_stream.cc b/pw_stream/socket_stream.cc
index 266bb46..a50b2e5 100644
--- a/pw_stream/socket_stream.cc
+++ b/pw_stream/socket_stream.cc
@@ -129,6 +129,10 @@
 StatusWithSize SocketStream::DoRead(ByteSpan dest) {
   ssize_t bytes_rcvd = recv(connection_fd_, dest.data(), dest.size_bytes(), 0);
   if (bytes_rcvd == 0) {
+    // Remote peer has closed the connection.
+    Close();
+    return StatusWithSize::OutOfRange();
+  } else if (bytes_rcvd < 0) {
     if (errno == EAGAIN || errno == EWOULDBLOCK) {
       // Socket timed out when trying to read.
       // This should only occur if SO_RCVTIMEO was configured to be nonzero, or
@@ -136,10 +140,6 @@
       // blocking when performing reads or writes.
       return StatusWithSize::ResourceExhausted();
     }
-    // Remote peer has closed the connection.
-    Close();
-    return StatusWithSize::OutOfRange();
-  } else if (bytes_rcvd < 0) {
     return StatusWithSize::Unknown();
   }
   return StatusWithSize(bytes_rcvd);
diff --git a/pw_string/BUILD.bazel b/pw_string/BUILD.bazel
index 8fb043c..c0ccfe8 100644
--- a/pw_string/BUILD.bazel
+++ b/pw_string/BUILD.bazel
@@ -75,7 +75,7 @@
     hdrs = ["public/pw_string/string.h"],
     includes = ["public"],
     deps = [
-        "//pw_assert:facade",
+        "//pw_assert",
         "//pw_polyfill",
     ],
 )
@@ -104,7 +104,7 @@
     includes = ["public"],
     deps = [
         ":string",
-        "//pw_assert:facade",
+        "//pw_assert",
         "//pw_result",
         "//pw_span",
         "//pw_status",
diff --git a/pw_string/docs.rst b/pw_string/docs.rst
index c74d69d..4be6d93 100644
--- a/pw_string/docs.rst
+++ b/pw_string/docs.rst
@@ -17,7 +17,7 @@
    *Pick three!* If you know how to use ``std::string``, just use
    :cpp:type:`pw::InlineString` in the same way:
 
-   .. code:: cpp
+   .. code-block:: cpp
 
       // Create a string from a C-style char array; storage is pre-allocated!
       pw::InlineString<16> my_string = "Literally";
@@ -33,7 +33,7 @@
    ``std::ostringstream``, but with most of the efficiency and memory benefits
    of :cpp:type:`pw::InlineString`:
 
-   .. code:: cpp
+   .. code-block:: cpp
 
       // Create a pw::StringBuilder with a built-in buffer
       pw::StringBuffer<32> my_string_builder = "Is it really this easy?";
@@ -92,14 +92,14 @@
 Getting Started
 ---------------
 
-.. tabs::
+.. tab-set::
 
-   .. group-tab:: GN
+   .. tab-item:: GN
 
       Add ``$dir_pw_string`` to the ``deps`` list in your ``pw_executable()``
       build target:
 
-      .. code::
+      .. code-block::
 
          pw_executable("...") {
            # ...
@@ -113,7 +113,7 @@
       See `//source/BUILD.gn <https://pigweed.googlesource.com/pigweed/sample_project/+/refs/heads/main/source/BUILD.gn>`_
       in the Pigweed Sample Project for an example.
 
-   .. group-tab:: Zephyr
+   .. tab-item:: Zephyr
 
       Add ``CONFIG_PIGWEED_STRING=y`` to the Zephyr project's configuration.
 
diff --git a/pw_symbolizer/docs.rst b/pw_symbolizer/docs.rst
index 5d01216..7b8cfca 100644
--- a/pw_symbolizer/docs.rst
+++ b/pw_symbolizer/docs.rst
@@ -24,7 +24,7 @@
 ``Symbolizer`` Also provides a helper function for producing nicely formatted
 stack trace style dumps.
 
-.. code:: py
+.. code-block:: py
 
   import pw_symbolizer
 
@@ -63,7 +63,7 @@
 interface with a fixed database of address to ``Symbol`` mappings. This is
 useful for testing, or as a no-op ``Symbolizer``.
 
-.. code:: py
+.. code-block:: py
 
   import pw_symbolizer
 
@@ -78,10 +78,15 @@
 LlvmSymbolizer
 ==============
 The ``LlvmSymbolizer`` is a python layer that wraps ``llvm-symbolizer`` to
-produce symbols from provided addresses. This module will only work if
-``llvm-symbolizer`` is available on the system ``PATH``.
+produce symbols from provided addresses. This module requires either:
 
-.. code:: py
+* ``llvm-symbolizer`` is available on the system ``PATH``.
+* ``llvm_symbolizer_binary`` argument is specified and points to the executable.
+
+This object also defines a ``close`` to ensure the background process is
+cleaned up deterministically.
+
+.. code-block:: py
 
   import pw_symbolizer
 
diff --git a/pw_symbolizer/py/llvm_symbolizer_test.py b/pw_symbolizer/py/llvm_symbolizer_test.py
index 52b8cf2..a4bc271 100644
--- a/pw_symbolizer/py/llvm_symbolizer_test.py
+++ b/pw_symbolizer/py/llvm_symbolizer_test.py
@@ -14,6 +14,7 @@
 """Tests for pw_symbolizer's llvm-symbolizer based symbolization."""
 
 import os
+import shutil
 import subprocess
 import tempfile
 import unittest
@@ -42,7 +43,7 @@
                 self.assertEqual(result.file, _CPP_TEST_FILE_NAME)
                 self.assertEqual(result.line, expected_symbol['Line'])
 
-    def test_symbolization(self):
+    def _parameterized_test_symbolization(self, **llvm_symbolizer_kwargs):
         """Tests that the symbolizer can symbolize addresses properly."""
         self.assertTrue('PW_PIGWEED_CIPD_INSTALL_DIR' in os.environ)
         sysroot = Path(os.environ['PW_PIGWEED_CIPD_INSTALL_DIR']).joinpath(
@@ -85,15 +86,39 @@
                 for line in process.stdout.decode().splitlines()
             ]
 
-            symbolizer = pw_symbolizer.LlvmSymbolizer(exe_file)
-            self._test_symbolization_results(expected_symbols, symbolizer)
+            with self.subTest("non-legacy"):
+                symbolizer = pw_symbolizer.LlvmSymbolizer(
+                    exe_file, **llvm_symbolizer_kwargs
+                )
+                self._test_symbolization_results(expected_symbols, symbolizer)
+                symbolizer.close()
 
-            # Test backwards compatibility with older versions of
-            # llvm-symbolizer.
-            symbolizer = pw_symbolizer.LlvmSymbolizer(
-                exe_file, force_legacy=True
+            with self.subTest("backwards-compability"):
+                # Test backwards compatibility with older versions of
+                # llvm-symbolizer.
+                symbolizer = pw_symbolizer.LlvmSymbolizer(
+                    exe_file, force_legacy=True, **llvm_symbolizer_kwargs
+                )
+                self._test_symbolization_results(expected_symbols, symbolizer)
+                symbolizer.close()
+
+    def test_symbolization_default_binary(self):
+        self._parameterized_test_symbolization()
+
+    def test_symbolization_specified_binary(self):
+        location = Path(
+            subprocess.run(
+                ['which', 'llvm-symbolizer'], check=True, stdout=subprocess.PIPE
             )
-            self._test_symbolization_results(expected_symbols, symbolizer)
+            .stdout.decode()
+            .strip()
+        )
+        with tempfile.TemporaryDirectory() as copy_dir:
+            copy_location = Path(copy_dir) / "copy-llvm-symbolizer"
+            shutil.copy(location, copy_location)
+            self._parameterized_test_symbolization(
+                llvm_symbolizer_binary=copy_location
+            )
 
 
 if __name__ == '__main__':
diff --git a/pw_symbolizer/py/pw_symbolizer/llvm_symbolizer.py b/pw_symbolizer/py/pw_symbolizer/llvm_symbolizer.py
index b362678..f799be5 100644
--- a/pw_symbolizer/py/pw_symbolizer/llvm_symbolizer.py
+++ b/pw_symbolizer/py/pw_symbolizer/llvm_symbolizer.py
@@ -25,21 +25,32 @@
 class LlvmSymbolizer(symbolizer.Symbolizer):
     """A symbolizer that wraps llvm-symbolizer."""
 
-    def __init__(self, binary: Optional[Path] = None, force_legacy=False):
+    def __init__(
+        self,
+        binary: Optional[Path] = None,
+        force_legacy=False,
+        llvm_symbolizer_binary: Optional[Path] = None,
+    ):
         # Lets destructor return cleanly if the binary is not found.
         self._symbolizer = None
-        if shutil.which('llvm-symbolizer') is None:
-            raise FileNotFoundError(
-                'llvm-symbolizer not installed. Run bootstrap, or download '
-                'LLVM (https://github.com/llvm/llvm-project/releases/) and add '
-                'the tools to your system PATH'
-            )
+        if llvm_symbolizer_binary:
+            self._symbolizer_binary = str(llvm_symbolizer_binary)
+        else:
+            self._symbolizer_binary = 'llvm-symbolizer'
+            if shutil.which(self._symbolizer_binary) is None:
+                raise FileNotFoundError(
+                    'llvm-symbolizer not installed. Run bootstrap, or download '
+                    'LLVM (https://github.com/llvm/llvm-project/releases/) and '
+                    'add the tools to your system PATH'
+                )
 
         # Prefer JSON output as it's easier to decode.
         if force_legacy:
             self._json_mode = False
         else:
-            self._json_mode = LlvmSymbolizer._is_json_compatibile()
+            self._json_mode = LlvmSymbolizer._is_json_compatibile(
+                self._symbolizer_binary
+            )
 
         if binary is not None:
             if not binary.exists():
@@ -47,7 +58,7 @@
 
             output_style = 'JSON' if self._json_mode else 'LLVM'
             cmd = [
-                'llvm-symbolizer',
+                self._symbolizer_binary,
                 '--no-inlines',
                 '--demangle',
                 '--functions',
@@ -62,15 +73,22 @@
             self._lock: threading.Lock = threading.Lock()
 
     def __del__(self):
-        if self._symbolizer:
+        self.close()
+
+    def close(self):
+        """Closes the active llvm-symbolizer process."""
+        if self._symbolizer is not None:
             self._symbolizer.terminate()
             self._symbolizer.wait()
+            self._symbolizer.stdin.close()
+            self._symbolizer.stdout.close()
+            self._symbolizer = None
 
     @staticmethod
-    def _is_json_compatibile() -> bool:
+    def _is_json_compatibile(symbolizer_binary: str) -> bool:
         """Checks llvm-symbolizer to ensure compatibility"""
         result = subprocess.run(
-            ('llvm-symbolizer', '--help'),
+            (symbolizer_binary, '--help'),
             stdout=subprocess.PIPE,
             stdin=subprocess.PIPE,
         )
diff --git a/pw_sync/docs.rst b/pw_sync/docs.rst
index 9e0df4c..aba3ea4 100644
--- a/pw_sync/docs.rst
+++ b/pw_sync/docs.rst
@@ -731,9 +731,9 @@
 virtual lock interface could be used here to minimize the code-size cost that
 would occur otherwise if the flash driver were templated.
 
-VirtualBasicLock
-----------------
-The ``VirtualBasicLock`` interface meets the
+VirtualBasicLockable
+--------------------
+The ``VirtualBasicLockable`` interface meets the
 `BasicLockable <https://en.cppreference.com/w/cpp/named_req/BasicLockable>`_ C++
 named requirement. Our critical section lock primitives offer optional virtual
 versions, including:
@@ -742,9 +742,24 @@
 * :cpp:func:`pw::sync::VirtualTimedMutex`
 * :cpp:func:`pw::sync::VirtualInterruptSpinLock`
 
+.. _module-pw_sync-genericbasiclockable:
+
+GenericBasicLockable
+--------------------
+``GenericBasicLockable`` is a helper construct that can be used to declare
+virtual versions of a critical section lock primitive that meets the
+`BasicLockable <https://en.cppreference.com/w/cpp/named_req/BasicLockable>`_
+C++ named requirement. For example, given a ``Mutex`` type with ``lock()`` and
+``unlock()`` methods, a ``VirtualMutex`` type that derives from
+``VirtualBasicLockable`` can be declared as follows:
+
+.. code-block:: cpp
+
+   class VirtualMutex : public GenericBasicLockable<Mutex> {};
+
 Borrowable
 ==========
-The Borrowable is a helper construct that enables callers to borrow an object
+``Borrowable`` is a helper construct that enables callers to borrow an object
 which is guarded by a lock, enabling a containerized style of external locking.
 
 Users who need access to the guarded object can ask to acquire a
diff --git a/pw_sync/mutex_facade_test.cc b/pw_sync/mutex_facade_test.cc
index 2b7926e..bf4e61d 100644
--- a/pw_sync/mutex_facade_test.cc
+++ b/pw_sync/mutex_facade_test.cc
@@ -33,7 +33,7 @@
 // TODO(b/235284163): Add real concurrency tests once we have pw::thread.
 
 TEST(Mutex, LockUnlock) {
-  pw::sync::Mutex mutex;
+  Mutex mutex;
   mutex.lock();
   // TODO(b/235284163): Ensure it fails to lock when already held.
   // EXPECT_FALSE(mutex.try_lock());
@@ -49,7 +49,7 @@
 }
 
 TEST(Mutex, TryLockUnlock) {
-  pw::sync::Mutex mutex;
+  Mutex mutex;
   const bool locked = mutex.try_lock();
   EXPECT_TRUE(locked);
   if (locked) {
@@ -62,7 +62,7 @@
 PW_SYNC_ADD_BORROWABLE_LOCK_NAMED_TESTS(BorrowableMutex, Mutex);
 
 TEST(VirtualMutex, LockUnlock) {
-  pw::sync::VirtualMutex mutex;
+  VirtualMutex mutex;
   mutex.lock();
   // TODO(b/235284163): Ensure it fails to lock when already held.
   // EXPECT_FALSE(mutex.try_lock());
@@ -77,16 +77,25 @@
   static_virtual_mutex.unlock();
 }
 
+TEST(VirtualMutex, LockUnlockExternal) {
+  VirtualMutex virtual_mutex;
+  auto& mutex = virtual_mutex.mutex();
+  mutex.lock();
+  // TODO(b/235284163): Ensure it fails to lock when already held.
+  // EXPECT_FALSE(mutex.try_lock());
+  mutex.unlock();
+}
+
 PW_SYNC_ADD_BORROWABLE_LOCK_NAMED_TESTS(BorrowableVirtualMutex, VirtualMutex);
 
 TEST(Mutex, LockUnlockInC) {
-  pw::sync::Mutex mutex;
+  Mutex mutex;
   pw_sync_Mutex_CallLock(&mutex);
   pw_sync_Mutex_CallUnlock(&mutex);
 }
 
 TEST(Mutex, TryLockUnlockInC) {
-  pw::sync::Mutex mutex;
+  Mutex mutex;
   ASSERT_TRUE(pw_sync_Mutex_CallTryLock(&mutex));
   // TODO(b/235284163): Ensure it fails to lock when already held.
   // EXPECT_FALSE(pw_sync_Mutex_CallTryLock(&mutex));
diff --git a/pw_sync/public/pw_sync/interrupt_spin_lock.h b/pw_sync/public/pw_sync/interrupt_spin_lock.h
index 7726c92..996ad6d 100644
--- a/pw_sync/public/pw_sync/interrupt_spin_lock.h
+++ b/pw_sync/public/pw_sync/interrupt_spin_lock.h
@@ -81,32 +81,8 @@
 };
 
 class PW_LOCKABLE("pw::sync::VirtualInterruptSpinLock")
-    VirtualInterruptSpinLock final : public VirtualBasicLockable {
- public:
-  VirtualInterruptSpinLock() = default;
-
-  VirtualInterruptSpinLock(const VirtualInterruptSpinLock&) = delete;
-  VirtualInterruptSpinLock(VirtualInterruptSpinLock&&) = delete;
-  VirtualInterruptSpinLock& operator=(const VirtualInterruptSpinLock&) = delete;
-  VirtualInterruptSpinLock& operator=(VirtualInterruptSpinLock&&) = delete;
-
-  InterruptSpinLock& interrupt_spin_lock() { return interrupt_spin_lock_; }
-
- private:
-  void DoLockOperation(Operation operation) override
-      PW_NO_LOCK_SAFETY_ANALYSIS {
-    switch (operation) {
-      case Operation::kLock:
-        return interrupt_spin_lock_.lock();
-
-      case Operation::kUnlock:
-      default:
-        return interrupt_spin_lock_.unlock();
-    }
-  }
-
-  InterruptSpinLock interrupt_spin_lock_;
-};
+    VirtualInterruptSpinLock final
+    : public GenericBasicLockable<InterruptSpinLock> {};
 
 }  // namespace pw::sync
 
diff --git a/pw_sync/public/pw_sync/mutex.h b/pw_sync/public/pw_sync/mutex.h
index 5b42468..18729f5 100644
--- a/pw_sync/public/pw_sync/mutex.h
+++ b/pw_sync/public/pw_sync/mutex.h
@@ -86,31 +86,9 @@
 };
 
 class PW_LOCKABLE("pw::sync::VirtualMutex") VirtualMutex final
-    : public VirtualBasicLockable {
+    : public GenericBasicLockable<Mutex> {
  public:
-  VirtualMutex() = default;
-
-  VirtualMutex(const VirtualMutex&) = delete;
-  VirtualMutex(VirtualMutex&&) = delete;
-  VirtualMutex& operator=(const VirtualMutex&) = delete;
-  VirtualMutex& operator=(VirtualMutex&&) = delete;
-
-  Mutex& mutex() { return mutex_; }
-
- private:
-  void DoLockOperation(Operation operation) override
-      PW_NO_LOCK_SAFETY_ANALYSIS {
-    switch (operation) {
-      case Operation::kLock:
-        return mutex_.lock();
-
-      case Operation::kUnlock:
-      default:
-        return mutex_.unlock();
-    }
-  }
-
-  Mutex mutex_;
+  Mutex& mutex() { return impl(); }
 };
 
 }  // namespace pw::sync
diff --git a/pw_sync/public/pw_sync/timed_mutex.h b/pw_sync/public/pw_sync/timed_mutex.h
index abb48c1..396f9b6 100644
--- a/pw_sync/public/pw_sync/timed_mutex.h
+++ b/pw_sync/public/pw_sync/timed_mutex.h
@@ -71,31 +71,9 @@
 };
 
 class PW_LOCKABLE("pw::sync::VirtualTimedMutex") VirtualTimedMutex final
-    : public VirtualBasicLockable {
+    : public GenericBasicLockable<TimedMutex> {
  public:
-  VirtualTimedMutex() = default;
-
-  VirtualTimedMutex(const VirtualTimedMutex&) = delete;
-  VirtualTimedMutex(VirtualTimedMutex&&) = delete;
-  VirtualTimedMutex& operator=(const VirtualTimedMutex&) = delete;
-  VirtualTimedMutex& operator=(VirtualTimedMutex&&) = delete;
-
-  TimedMutex& timed_mutex() { return timed_mutex_; }
-
- private:
-  void DoLockOperation(Operation operation) override
-      PW_NO_LOCK_SAFETY_ANALYSIS {
-    switch (operation) {
-      case Operation::kLock:
-        return timed_mutex_.lock();
-
-      case Operation::kUnlock:
-      default:
-        return timed_mutex_.unlock();
-    }
-  }
-
-  TimedMutex timed_mutex_;
+  TimedMutex& timed_mutex() { return impl(); }
 };
 
 }  // namespace pw::sync
diff --git a/pw_sync/public/pw_sync/virtual_basic_lockable.h b/pw_sync/public/pw_sync/virtual_basic_lockable.h
index 5010365..c41f39b 100644
--- a/pw_sync/public/pw_sync/virtual_basic_lockable.h
+++ b/pw_sync/public/pw_sync/virtual_basic_lockable.h
@@ -70,4 +70,34 @@
   void DoLockOperation(Operation) override {}
 };
 
+/// Templated base class to facilitate making "Virtual{LockType}" from a
+/// "LockType" class that provides `lock()` and `unlock()` methods.
+/// The resulting classes will derive from `VirtualBasicLockable`.
+///
+/// Example:
+///   class VirtualMutex : public GenericBasicLockable<Mutex> {};
+template <typename LockType>
+class GenericBasicLockable : public VirtualBasicLockable {
+ public:
+  virtual ~GenericBasicLockable() = default;
+
+ protected:
+  LockType& impl() { return impl_; }
+
+ private:
+  void DoLockOperation(Operation operation) override
+      PW_NO_LOCK_SAFETY_ANALYSIS {
+    switch (operation) {
+      case Operation::kLock:
+        return impl_.lock();
+
+      case Operation::kUnlock:
+      default:
+        return impl_.unlock();
+    }
+  }
+
+  LockType impl_;
+};
+
 }  // namespace pw::sync
diff --git a/pw_sync/timed_mutex_facade_test.cc b/pw_sync/timed_mutex_facade_test.cc
index b74c569..26a3b47 100644
--- a/pw_sync/timed_mutex_facade_test.cc
+++ b/pw_sync/timed_mutex_facade_test.cc
@@ -49,7 +49,7 @@
 // TODO(b/235284163): Add real concurrency tests once we have pw::thread.
 
 TEST(TimedMutex, LockUnlock) {
-  pw::sync::TimedMutex mutex;
+  TimedMutex mutex;
   mutex.lock();
   mutex.unlock();
   // TODO(b/235284163): Ensure it fails to lock when already held by someone
@@ -67,7 +67,7 @@
 }
 
 TEST(TimedMutex, TryLockUnlock) {
-  pw::sync::TimedMutex mutex;
+  TimedMutex mutex;
   const bool locked = mutex.try_lock();
   EXPECT_TRUE(locked);
   if (locked) {
@@ -79,7 +79,7 @@
 }
 
 TEST(TimedMutex, TryLockUnlockFor) {
-  pw::sync::TimedMutex mutex;
+  TimedMutex mutex;
 
   SystemClock::time_point before = SystemClock::now();
   const bool locked = mutex.try_lock_for(kRoundedArbitraryDuration);
@@ -98,7 +98,7 @@
 }
 
 TEST(TimedMutex, TryLockUnlockUntil) {
-  pw::sync::TimedMutex mutex;
+  TimedMutex mutex;
 
   const SystemClock::time_point deadline =
       SystemClock::now() + kRoundedArbitraryDuration;
@@ -121,7 +121,7 @@
                                               chrono::SystemClock);
 
 TEST(VirtualTimedMutex, LockUnlock) {
-  pw::sync::VirtualTimedMutex mutex;
+  VirtualTimedMutex mutex;
   mutex.lock();
   // TODO(b/235284163): Ensure it fails to lock when already held by someone
   // else.
@@ -138,18 +138,27 @@
   static_virtual_mutex.unlock();
 }
 
+TEST(VirtualMutex, LockUnlockExternal) {
+  VirtualTimedMutex virtual_timed_mutex;
+  auto& mutex = virtual_timed_mutex.timed_mutex();
+  mutex.lock();
+  // TODO(b/235284163): Ensure it fails to lock when already held.
+  // EXPECT_FALSE(mutex.try_lock());
+  mutex.unlock();
+}
+
 PW_SYNC_ADD_BORROWABLE_TIMED_LOCK_NAMED_TESTS(BorrowableVirtualTimedMutex,
                                               VirtualTimedMutex,
                                               chrono::SystemClock);
 
 TEST(TimedMutex, LockUnlockInC) {
-  pw::sync::TimedMutex mutex;
+  TimedMutex mutex;
   pw_sync_TimedMutex_CallLock(&mutex);
   pw_sync_TimedMutex_CallUnlock(&mutex);
 }
 
 TEST(TimedMutex, TryLockUnlockInC) {
-  pw::sync::TimedMutex mutex;
+  TimedMutex mutex;
   ASSERT_TRUE(pw_sync_TimedMutex_CallTryLock(&mutex));
   // TODO(b/235284163): Ensure it fails to lock when already held by someone
   // else.
@@ -158,7 +167,7 @@
 }
 
 TEST(TimedMutex, TryLockUnlockForInC) {
-  pw::sync::TimedMutex mutex;
+  TimedMutex mutex;
 
   pw_chrono_SystemClock_TimePoint before = pw_chrono_SystemClock_Now();
   ASSERT_TRUE(
@@ -176,7 +185,7 @@
 }
 
 TEST(TimedMutex, TryLockUnlockUntilInC) {
-  pw::sync::TimedMutex mutex;
+  TimedMutex mutex;
   pw_chrono_SystemClock_TimePoint deadline;
   deadline.duration_since_epoch.ticks =
       pw_chrono_SystemClock_Now().duration_since_epoch.ticks +
diff --git a/pw_sys_io/docs.rst b/pw_sys_io/docs.rst
index 33d63e6..4a12573 100644
--- a/pw_sys_io/docs.rst
+++ b/pw_sys_io/docs.rst
@@ -30,9 +30,9 @@
 =====
 This module requires relatively minimal setup:
 
-  1. Choose a ``pw_sys_io`` backend, or write one yourself.
-  2. If using GN build, Specify the ``pw_sys_io_BACKEND`` GN build arg to point
-     the library that provides a ``pw_sys_io`` backend.
+1. Choose a ``pw_sys_io`` backend, or write one yourself.
+2. If using GN build, Specify the ``pw_sys_io_BACKEND`` GN build arg to point
+   the library that provides a ``pw_sys_io`` backend.
 
 Module usage
 ============
@@ -50,6 +50,7 @@
 
 Dependencies
 ============
-  * pw_sys_io_backend
-  * pw_span
-  * pw_status
+- :ref:`module-pw_sys_io`
+- :ref:`module-pw_span`
+- :ref:`module-pw_status`
+
diff --git a/pw_sys_io_ambiq_sdk/BUILD.bazel b/pw_sys_io_ambiq_sdk/BUILD.bazel
new file mode 100644
index 0000000..bd423e1
--- /dev/null
+++ b/pw_sys_io_ambiq_sdk/BUILD.bazel
@@ -0,0 +1,39 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_library",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+constraint_value(
+    name = "backend",
+    constraint_setting = "//pw_sys_io:backend_constraint_setting",
+)
+
+pw_cc_library(
+    name = "pw_sys_ambiq_sdk",
+    srcs = ["sys_io.cc"],
+    hdrs = ["public/pw_sys_io_ambiq_sdk/init.h"],
+    tags = ["manual"],
+    deps = [
+        "//pw_boot_cortex_m:armv7m",
+        "//pw_preprocessor",
+        "//pw_sys_io:facade",
+    ],
+)
diff --git a/pw_sys_io_ambiq_sdk/BUILD.gn b/pw_sys_io_ambiq_sdk/BUILD.gn
new file mode 100644
index 0000000..1f286a7
--- /dev/null
+++ b/pw_sys_io_ambiq_sdk/BUILD.gn
@@ -0,0 +1,44 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_third_party/ambiq/ambiq.gni")
+import("$dir_pw_unit_test/test.gni")
+
+config("default_config") {
+  include_dirs = [ "public" ]
+}
+
+pw_source_set("pw_sys_io_ambiq_sdk") {
+  public_configs = [ ":default_config" ]
+  public = [ "public/pw_sys_io_ambiq_sdk/init.h" ]
+  public_deps = [ "$dir_pw_preprocessor" ]
+
+  deps = [
+    "$dir_pw_sys_io:default_putget_bytes",
+    "$dir_pw_sys_io:facade",
+    "$dir_pw_third_party/ambiq:sdk",
+  ]
+  sources = [ "sys_io.cc" ]
+}
+
+pw_test_group("tests") {
+}
+
+pw_doc_group("docs") {
+  sources = [ "docs.rst" ]
+}
diff --git a/pw_sys_io_ambiq_sdk/docs.rst b/pw_sys_io_ambiq_sdk/docs.rst
new file mode 100644
index 0000000..fef2be4
--- /dev/null
+++ b/pw_sys_io_ambiq_sdk/docs.rst
@@ -0,0 +1,23 @@
+.. _module-pw_sys_io_ambiq_sdk:
+
+===================
+pw_sys_io_ambiq_sdk
+===================
+``pw_sys_io_ambiq_sdk`` implements the ``pw_sys_io`` facade over UART using the
+Ambiq Suite SDK HAL.
+
+The UART baud rate is fixed at 115200 (8N1).
+
+Setup
+=====
+This module requires relatively minimal setup:
+
+1. Write code against the ``pw_sys_io`` facade.
+2. Specify the ``dir_pw_sys_io_backend`` GN global variable to point to this
+   backend.
+3. Call ``pw_sys_io_Init()`` during init so the UART is properly initialized and
+   configured.
+
+The UART peripheral and the GPIO pins are defined in the ``am_bsp.h`` file. Make sure
+that the build argument ``pw_third_party_ambiq_PRODUCT`` is set correctly so that
+the correct bsp header file is included.
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h b/pw_sys_io_ambiq_sdk/public/pw_sys_io_ambiq_sdk/init.h
similarity index 72%
rename from pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
rename to pw_sys_io_ambiq_sdk/public/pw_sys_io_ambiq_sdk/init.h
index af31532..d4262c5 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
+++ b/pw_sys_io_ambiq_sdk/public/pw_sys_io_ambiq_sdk/init.h
@@ -13,7 +13,11 @@
 // the License.
 #pragma once
 
-// The HDLC address to which to write Base64-encoded tokenized logs.
-#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
-#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
-#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+#include "pw_preprocessor/util.h"
+
+PW_EXTERN_C_START
+
+// The actual implement of PreMainInit() in sys_io_BACKEND.
+void pw_sys_io_Init();
+
+PW_EXTERN_C_END
diff --git a/pw_sys_io_ambiq_sdk/sys_io.cc b/pw_sys_io_ambiq_sdk/sys_io.cc
new file mode 100644
index 0000000..5e19f1f
--- /dev/null
+++ b/pw_sys_io_ambiq_sdk/sys_io.cc
@@ -0,0 +1,138 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_sys_io/sys_io.h"
+
+#include <cinttypes>
+
+#include "am_bsp.h"
+#include "am_mcu_apollo.h"
+#include "pw_preprocessor/compiler.h"
+
+namespace {
+void* hal_uart_handle{};
+}  // namespace
+
+PW_EXTERN_C void pw_sys_io_Init() {
+  // Use baud rate of 115200 (8N1).
+  static constexpr am_hal_uart_config_t kUartConfig = {
+      .ui32BaudRate = 115200,
+      .eDataBits = AM_HAL_UART_DATA_BITS_8,
+      .eParity = AM_HAL_UART_PARITY_NONE,
+      .eStopBits = AM_HAL_UART_ONE_STOP_BIT,
+      .eFlowControl = AM_HAL_UART_FLOW_CTRL_NONE,
+      .eTXFifoLevel = AM_HAL_UART_FIFO_LEVEL_16,
+      .eRXFifoLevel = AM_HAL_UART_FIFO_LEVEL_16,
+  };
+
+  // Initialize the UART peripheral.
+  am_hal_uart_initialize(AM_BSP_UART_PRINT_INST, &hal_uart_handle);
+
+  // Change the power state of the UART peripheral.
+  am_hal_uart_power_control(hal_uart_handle, AM_HAL_SYSCTRL_WAKE, false);
+
+  // Configure UART (baudrate etc.).
+  am_hal_uart_configure(hal_uart_handle, &kUartConfig);
+
+  // Enable the UART TX and RX GPIO's.
+  am_hal_gpio_pinconfig(AM_BSP_GPIO_COM_UART_TX, g_AM_BSP_GPIO_COM_UART_TX);
+  am_hal_gpio_pinconfig(AM_BSP_GPIO_COM_UART_RX, g_AM_BSP_GPIO_COM_UART_RX);
+}
+
+namespace pw::sys_io {
+
+// Wait for a byte to read on UART0. This blocks until a byte is read. This is
+// extremely inefficient as it requires the target to burn CPU cycles polling to
+// see if a byte is ready yet.
+Status ReadByte(std::byte* dest) {
+  while (true) {
+    if (TryReadByte(dest).ok()) {
+      return OkStatus();
+    }
+  }
+}
+
+Status TryReadByte(std::byte* dest) {
+  am_hal_uart_transfer_t transaction{};
+  uint32_t bytes_read{};
+
+  // Configure UART transaction for the read operation.
+  transaction.eType = AM_HAL_UART_BLOCKING_READ;
+  transaction.pui8Data = reinterpret_cast<uint8_t*>(dest);
+  transaction.ui32NumBytes = 1;
+  transaction.ui32TimeoutMs = AM_HAL_UART_WAIT_FOREVER;
+  transaction.pui32BytesTransferred = &bytes_read;
+
+  // Do read data over UART.
+  if (am_hal_uart_transfer(hal_uart_handle, &transaction) !=
+      AM_HAL_STATUS_SUCCESS) {
+    return Status::ResourceExhausted();
+  }
+
+  if (bytes_read != 1u) {
+    return Status::DataLoss();
+  }
+
+  return OkStatus();
+}
+
+Status WriteByte(std::byte b) {
+  am_hal_uart_transfer_t transaction{};
+  uint32_t chars_written{};
+
+  // Configure UART transaction for the write operation.
+  transaction.eType = AM_HAL_UART_BLOCKING_WRITE;
+  transaction.pui8Data =
+      const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(&b));
+  transaction.ui32NumBytes = 1;
+  transaction.ui32TimeoutMs = AM_HAL_UART_WAIT_FOREVER;
+  transaction.pui32BytesTransferred = &chars_written;
+
+  // Do write data over UART.
+  if (am_hal_uart_transfer(hal_uart_handle, &transaction) !=
+      AM_HAL_STATUS_SUCCESS) {
+    return Status::ResourceExhausted();
+  }
+
+  if (chars_written != 1) {
+    return Status::DataLoss();
+  }
+
+  return OkStatus();
+}
+
+// Writes a string using pw::sys_io, and add newline characters at the end.
+StatusWithSize WriteLine(const std::string_view& s) {
+  StatusWithSize result = WriteBytes(as_bytes(span(s)));
+  if (!result.ok()) {
+    return result;
+  }
+
+  size_t chars_written = result.size();
+  if (chars_written != s.size()) {
+    return StatusWithSize::DataLoss(chars_written);
+  }
+
+  // Write trailing newline.
+  result = WriteBytes(as_bytes(span("\r\n", 2)));
+  chars_written += result.size();
+
+  if (result.size() != 2) {
+    return StatusWithSize::DataLoss(chars_written);
+  }
+
+  return StatusWithSize(chars_written);
+}
+
+}  // namespace pw::sys_io
diff --git a/pw_sys_io_arduino/docs.rst b/pw_sys_io_arduino/docs.rst
index 79c533d..0cc321b 100644
--- a/pw_sys_io_arduino/docs.rst
+++ b/pw_sys_io_arduino/docs.rst
@@ -12,11 +12,10 @@
 
 .. code-block:: cpp
 
-  Serial.begin(115200);
+   Serial.begin(115200);
 
-  // Wait for serial port to be available
-  while (!Serial) {
-  }
+   // Wait for serial port to be available
+   while (!Serial) {}
 
 After ``Serial.begin(115200)`` it will busy wait until a host connects to the
 serial port.
diff --git a/pw_sys_io_baremetal_lm3s6965evb/docs.rst b/pw_sys_io_baremetal_lm3s6965evb/docs.rst
index a865373..fc99395 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/docs.rst
+++ b/pw_sys_io_baremetal_lm3s6965evb/docs.rst
@@ -6,4 +6,4 @@
 
 .. warning::
 
-  This documentation is under construction.
+   This documentation is under construction.
diff --git a/pw_sys_io_baremetal_stm32f429/docs.rst b/pw_sys_io_baremetal_stm32f429/docs.rst
index 1f1fe08..41f2633 100644
--- a/pw_sys_io_baremetal_stm32f429/docs.rst
+++ b/pw_sys_io_baremetal_stm32f429/docs.rst
@@ -3,7 +3,6 @@
 -----------------------------
 pw_sys_io_baremetal_stm32f429
 -----------------------------
-
 ``pw_sys_io_baremetal_stm32f429`` implements the ``pw_sys_io`` facade over
 UART.
 
@@ -21,16 +20,16 @@
 =====
 This module requires relatively minimal setup:
 
-  1. Write code against the ``pw_sys_io`` facade.
-  2. Specify the ``dir_pw_sys_io_backend`` GN global variable to point to this
-     backend.
-  3. Build an executable with a main() function using a toolchain that
-     supports Cortex-M4.
+1. Write code against the ``pw_sys_io`` facade.
+2. Specify the ``dir_pw_sys_io_backend`` GN global variable to point to this
+   backend.
+3. Build an executable with a main() function using a toolchain that
+   supports Cortex-M4.
 
 .. note::
-  This module provides early firmware init and a linker script, so it will
-  conflict with other modules that do any early device init or provide a linker
-  script.
+   This module provides early firmware init and a linker script, so it will
+   conflict with other modules that do any early device init or provide a linker
+   script.
 
 Module usage
 ============
@@ -47,15 +46,15 @@
 
 .. code-block:: text
 
-  --USB Serial--+    +-----STM32F429 MCU-----
-                |    |
-             TX o--->o PA10/USART1_RX
-                |    |
-             RX o<---o PA9/USART1_TX
-                |    |
-  --------------+    +-----------------------
+   --USB Serial--+    +-----STM32F429 MCU-----
+                 |    |
+              TX o--->o PA10/USART1_RX
+                 |    |
+              RX o<---o PA9/USART1_TX
+                 |    |
+   --------------+    +-----------------------
 
 Dependencies
 ============
-  * ``pw_sys_io`` facade
-  * ``pw_preprocessor`` module
+- :ref:`module-pw_sys_io`
+- :ref:`module-pw_preprocessor`
diff --git a/pw_sys_io_emcraft_sf2/docs.rst b/pw_sys_io_emcraft_sf2/docs.rst
index 66ef6c9..948588d 100644
--- a/pw_sys_io_emcraft_sf2/docs.rst
+++ b/pw_sys_io_emcraft_sf2/docs.rst
@@ -19,17 +19,17 @@
 =====
 This module requires relatively minimal setup:
 
-  1. Write code against the ``pw_sys_io`` facade.
-  2. Specify the ``dir_pw_sys_io_backend`` GN global variable to point to this
-     backend.
-  3. pw_sys_io_Init() provided by this module needs to be called in early boot
-     to get pw_sys_io into a working state.
-  4. Build an executable with a main() function using a toolchain that
-     supports Cortex-M3.
+1. Write code against the ``pw_sys_io`` facade.
+2. Specify the ``dir_pw_sys_io_backend`` GN global variable to point to this
+   backend.
+3. pw_sys_io_Init() provided by this module needs to be called in early boot
+   to get pw_sys_io into a working state.
+4. Build an executable with a main() function using a toolchain that
+   supports Cortex-M3.
 
 .. note::
-  This module provides early firmware init, so it will conflict with other
-  modules that do any early device init.
+   This module provides early firmware init, so it will conflict with other
+   modules that do any early device init.
 
 Module usage
 ============
@@ -40,5 +40,5 @@
 
 Dependencies
 ============
-  * ``pw_sys_io`` facade
-  * ``pw_preprocessor`` module
+- :ref:`module-pw_sys_io`
+- :ref:`module-pw_preprocessor`
diff --git a/pw_sys_io_mcuxpresso/docs.rst b/pw_sys_io_mcuxpresso/docs.rst
index a237611..fc30fc1 100644
--- a/pw_sys_io_mcuxpresso/docs.rst
+++ b/pw_sys_io_mcuxpresso/docs.rst
@@ -12,13 +12,13 @@
 =====
 This module requires a little setup:
 
- 1. Use ``pw_build_mcuxpresso`` to create a ``pw_source_set`` for an
-    MCUXpresso SDK.
- 2. Include the debug console component in this SDK definition.
- 3. Specify the ``pw_third_party_mcuxpresso_SDK`` GN global variable to specify
-    the name of this source set.
- 4. Use a target that calls ``pw_sys_io_mcuxpresso_Init`` in
-    ``pw_boot_PreMainInit`` or similar.
+1. Use ``pw_build_mcuxpresso`` to create a ``pw_source_set`` for an
+   MCUXpresso SDK.
+2. Include the debug console component in this SDK definition.
+3. Specify the ``pw_third_party_mcuxpresso_SDK`` GN global variable to specify
+   the name of this source set.
+4. Use a target that calls ``pw_sys_io_mcuxpresso_Init`` in
+   ``pw_boot_PreMainInit`` or similar.
 
 The name of the SDK source set must be set in the
 "pw_third_party_mcuxpresso_SDK" GN arg
@@ -31,7 +31,7 @@
 
 .. c:macro:: DEBUG_CONSOLE_TRANSFER_NON_BLOCKING
 
-  Whether the MCUXpresso debug console supports non-blocking transfers. The
-  default will depend on your SDK configuration.
+   Whether the MCUXpresso debug console supports non-blocking transfers. The
+   default will depend on your SDK configuration.
 
-  Enabling this adds support for ``pw::sys_io::TryReadByte``.
+   Enabling this adds support for ``pw::sys_io::TryReadByte``.
diff --git a/pw_sys_io_stdio/docs.rst b/pw_sys_io_stdio/docs.rst
index 7280656..eaa45af 100644
--- a/pw_sys_io_stdio/docs.rst
+++ b/pw_sys_io_stdio/docs.rst
@@ -8,7 +8,6 @@
 
 Why not just use stdio directly?
 --------------------------------
-
 The nice thing about using ``pw_sys_io`` is that it's rather easy to get a
 board up and running with a target-specific backend. This means when drafting
 out a quick application you can write it against ``pw_sys_io`` and, with some
@@ -21,8 +20,8 @@
 =====
 This module requires relatively minimal setup:
 
-  1. Write code against the ``pw_sys_io`` facade.
-  2. Direct the ``pw_sys_io_BACKEND`` GN build arg to point to this backend.
+1. Write code against the ``pw_sys_io`` facade.
+2. Direct the ``pw_sys_io_BACKEND`` GN build arg to point to this backend.
 
 Module usage
 ============
@@ -31,4 +30,4 @@
 
 Dependencies
 ============
-  * ``pw_sys_io`` facade
+- :ref:`module-pw_sys_io`
diff --git a/pw_sys_io_stm32cube/docs.rst b/pw_sys_io_stm32cube/docs.rst
index e4155d6..a470fbb 100644
--- a/pw_sys_io_stm32cube/docs.rst
+++ b/pw_sys_io_stm32cube/docs.rst
@@ -13,11 +13,11 @@
 =====
 This module requires relatively minimal setup:
 
-  1. Write code against the ``pw_sys_io`` facade.
-  2. Specify the ``dir_pw_sys_io_backend`` GN global variable to point to this
-     backend.
-  3. Call ``pw_sys_io_Init()`` during init so the UART is properly initialized
-     and configured.
+1. Write code against the ``pw_sys_io`` facade.
+2. Specify the ``dir_pw_sys_io_backend`` GN global variable to point to this
+   backend.
+3. Call ``pw_sys_io_Init()`` during init so the UART is properly initialized
+   and configured.
 
 For devices other than the STM32F429I-DISC1, this module will need to be
 configured to use the appropriate GPIO pins and USART peripheral.
@@ -83,10 +83,10 @@
 
 .. code-block:: text
 
-  --USB Serial--+    +-----STM32F429 MCU-----
-                |    |
-             TX o--->o PA10/USART1_RX
-                |    |
-             RX o<---o PA9/USART1_TX
-                |    |
-  --------------+    +-----------------------
+   --USB Serial--+    +-----STM32F429 MCU-----
+                 |    |
+              TX o--->o PA10/USART1_RX
+                 |    |
+              RX o<---o PA9/USART1_TX
+                 |    |
+   --------------+    +-----------------------
diff --git a/pw_system/BUILD.gn b/pw_system/BUILD.gn
index 8cb2d67..72873a9 100644
--- a/pw_system/BUILD.gn
+++ b/pw_system/BUILD.gn
@@ -14,6 +14,7 @@
 
 import("//build_overrides/pigweed.gni")
 
+import("$dir_pigweed/third_party/ambiq/ambiq.gni")
 import("$dir_pigweed/third_party/freertos/freertos.gni")
 import("$dir_pigweed/third_party/nanopb/nanopb.gni")
 import("$dir_pigweed/third_party/pico_sdk/pi_pico.gni")
@@ -38,6 +39,33 @@
   include_dirs = [ "public" ]
 }
 
+# This config moves RPC logging to a separate RPC channel and HDLC
+# address. This does two things:
+#   * The separate RPC channel allows logging traffic to be treated as
+#     if it is being sent to a different client via a separate RPC
+#     channel. This illustrates the ability for an RPC server to
+#     communicate to multiple clients over multiple physical links.
+#   * The separate HDLC address completely isolates typical RPC traffic
+#     from logging traffic by communicating to a different HDLC endpoint
+#     address. This effectively creates two virtual data pipes over the
+#     same physical link.
+#
+# This is mostly to illustrate pw_rpc's capability to route and multiplex
+# traffic.
+config("multi_endpoint_rpc_overrides") {
+  defines = [
+    "PW_SYSTEM_LOGGING_CHANNEL_ID=10000",
+    "PW_SYSTEM_LOGGING_RPC_HDLC_ADDRESS=10000",
+  ]
+}
+
+# The Pigweed config pattern requires a pw_source_set to provide the
+# configuration defines. This provides the flags in
+# multi_endpoint_rpc_overrides.
+pw_source_set("multi_endpoint_rpc_config") {
+  public_configs = [ ":multi_endpoint_rpc_overrides" ]
+}
+
 pw_source_set("config") {
   sources = [ "public/pw_system/config.h" ]
   public_configs = [ ":public_include_path" ]
@@ -255,6 +283,12 @@
       ":system_example($dir_pigweed/targets/rp2040_pw_system:rp2040_pw_system.size_optimized)",
     ]
   }
+  if (dir_pw_third_party_ambiq_SDK != "" && dir_pw_third_party_freertos != "") {
+    deps += [
+      ":system_example($dir_pigweed/targets/apollo4_pw_system:apollo4_pw_system.debug)",
+      ":system_example($dir_pigweed/targets/apollo4_pw_system:apollo4_pw_system.size_optimized)",
+    ]
+  }
 }
 
 pw_doc_group("docs") {
diff --git a/pw_system/docs.rst b/pw_system/docs.rst
index e2d289d..48394e3 100644
--- a/pw_system/docs.rst
+++ b/pw_system/docs.rst
@@ -79,78 +79,78 @@
 
 .. code-block::
 
-  # Declare a toolchain with suggested, compiler, compiler flags, and default
-  # backends.
-  pw_system_target("stm32f429i_disc1_stm32cube_size_optimized") {
-    # These options drive the logic for automatic configuration by this
-    # template.
-    cpu = PW_SYSTEM_CPU.CORTEX_M4F
-    scheduler = PW_SYSTEM_SCHEDULER.FREERTOS
+   # Declare a toolchain with suggested, compiler, compiler flags, and default
+   # backends.
+   pw_system_target("stm32f429i_disc1_stm32cube_size_optimized") {
+     # These options drive the logic for automatic configuration by this
+     # template.
+     cpu = PW_SYSTEM_CPU.CORTEX_M4F
+     scheduler = PW_SYSTEM_SCHEDULER.FREERTOS
 
-    # Optionally, override pw_system's defaults to build with clang.
-    system_toolchain = pw_toolchain_arm_clang
+     # Optionally, override pw_system's defaults to build with clang.
+     system_toolchain = pw_toolchain_arm_clang
 
-    # The pre_init source set provides things like the interrupt vector table,
-    # pre-main init, and provision of FreeRTOS hooks.
-    link_deps = [ "$dir_pigweed/targets/stm32f429i_disc1_stm32cube:pre_init" ]
+     # The pre_init source set provides things like the interrupt vector table,
+     # pre-main init, and provision of FreeRTOS hooks.
+     link_deps = [ "$dir_pigweed/targets/stm32f429i_disc1_stm32cube:pre_init" ]
 
-    # These are hardware-specific options that set up this particular board.
-    # These are declared in ``declare_args()`` blocks throughout Pigweed. Any
-    # build arguments set by the user will be overridden by these settings.
-    build_args = {
-      pw_third_party_freertos_CONFIG = "$dir_pigweed/targets/stm32f429i_disc1_stm32cube:stm32f4xx_freertos_config"
-      pw_third_party_freertos_PORT = "$dir_pw_third_party/freertos:arm_cm4f"
-      pw_sys_io_BACKEND = dir_pw_sys_io_stm32cube
-      dir_pw_third_party_stm32cube = dir_pw_third_party_stm32cube_f4
-      pw_third_party_stm32cube_PRODUCT = "STM32F429xx"
-      pw_third_party_stm32cube_CONFIG =
-          "//targets/stm32f429i_disc1_stm32cube:stm32f4xx_hal_config"
-      pw_third_party_stm32cube_CORE_INIT = ""
-      pw_boot_cortex_m_LINK_CONFIG_DEFINES = [
-        "PW_BOOT_FLASH_BEGIN=0x08000200",
-        "PW_BOOT_FLASH_SIZE=2048K",
-        "PW_BOOT_HEAP_SIZE=7K",
-        "PW_BOOT_MIN_STACK_SIZE=1K",
-        "PW_BOOT_RAM_BEGIN=0x20000000",
-        "PW_BOOT_RAM_SIZE=192K",
-        "PW_BOOT_VECTOR_TABLE_BEGIN=0x08000000",
-        "PW_BOOT_VECTOR_TABLE_SIZE=512",
-      ]
-    }
-  }
+     # These are hardware-specific options that set up this particular board.
+     # These are declared in ``declare_args()`` blocks throughout Pigweed. Any
+     # build arguments set by the user will be overridden by these settings.
+     build_args = {
+       pw_third_party_freertos_CONFIG = "$dir_pigweed/targets/stm32f429i_disc1_stm32cube:stm32f4xx_freertos_config"
+       pw_third_party_freertos_PORT = "$dir_pw_third_party/freertos:arm_cm4f"
+       pw_sys_io_BACKEND = dir_pw_sys_io_stm32cube
+       dir_pw_third_party_stm32cube = dir_pw_third_party_stm32cube_f4
+       pw_third_party_stm32cube_PRODUCT = "STM32F429xx"
+       pw_third_party_stm32cube_CONFIG =
+           "//targets/stm32f429i_disc1_stm32cube:stm32f4xx_hal_config"
+       pw_third_party_stm32cube_CORE_INIT = ""
+       pw_boot_cortex_m_LINK_CONFIG_DEFINES = [
+         "PW_BOOT_FLASH_BEGIN=0x08000200",
+         "PW_BOOT_FLASH_SIZE=2048K",
+         "PW_BOOT_HEAP_SIZE=7K",
+         "PW_BOOT_MIN_STACK_SIZE=1K",
+         "PW_BOOT_RAM_BEGIN=0x20000000",
+         "PW_BOOT_RAM_SIZE=192K",
+         "PW_BOOT_VECTOR_TABLE_BEGIN=0x08000000",
+         "PW_BOOT_VECTOR_TABLE_SIZE=512",
+       ]
+     }
+   }
 
-  # Example for the Emcraft SmartFusion2 system-on-module
-  pw_system_target("emcraft_sf2_som_size_optimized") {
-    cpu = PW_SYSTEM_CPU.CORTEX_M3
-    scheduler = PW_SYSTEM_SCHEDULER.FREERTOS
+   # Example for the Emcraft SmartFusion2 system-on-module
+   pw_system_target("emcraft_sf2_som_size_optimized") {
+     cpu = PW_SYSTEM_CPU.CORTEX_M3
+     scheduler = PW_SYSTEM_SCHEDULER.FREERTOS
 
-    link_deps = [ "$dir_pigweed/targets/emcraft_sf2_som:pre_init" ]
-    build_args = {
-      pw_log_BACKEND = dir_pw_log_basic #dir_pw_log_tokenized
-      pw_log_tokenized_HANDLER_BACKEND = "//pw_system:log"
-      pw_third_party_freertos_CONFIG = "$dir_pigweed/targets/emcraft_sf2_som:sf2_freertos_config"
-      pw_third_party_freertos_PORT = "$dir_pw_third_party/freertos:arm_cm3"
-      pw_sys_io_BACKEND = dir_pw_sys_io_emcraft_sf2
-      dir_pw_third_party_smartfusion_mss = dir_pw_third_party_smartfusion_mss_exported
-      pw_third_party_stm32cube_CONFIG =
-          "//targets/emcraft_sf2_som:sf2_mss_hal_config"
-      pw_third_party_stm32cube_CORE_INIT = ""
-      pw_boot_cortex_m_LINK_CONFIG_DEFINES = [
-        "PW_BOOT_FLASH_BEGIN=0x00000200",
-        "PW_BOOT_FLASH_SIZE=200K",
+     link_deps = [ "$dir_pigweed/targets/emcraft_sf2_som:pre_init" ]
+     build_args = {
+       pw_log_BACKEND = dir_pw_log_basic #dir_pw_log_tokenized
+       pw_log_tokenized_HANDLER_BACKEND = "//pw_system:log"
+       pw_third_party_freertos_CONFIG = "$dir_pigweed/targets/emcraft_sf2_som:sf2_freertos_config"
+       pw_third_party_freertos_PORT = "$dir_pw_third_party/freertos:arm_cm3"
+       pw_sys_io_BACKEND = dir_pw_sys_io_emcraft_sf2
+       dir_pw_third_party_smartfusion_mss = dir_pw_third_party_smartfusion_mss_exported
+       pw_third_party_stm32cube_CONFIG =
+           "//targets/emcraft_sf2_som:sf2_mss_hal_config"
+       pw_third_party_stm32cube_CORE_INIT = ""
+       pw_boot_cortex_m_LINK_CONFIG_DEFINES = [
+         "PW_BOOT_FLASH_BEGIN=0x00000200",
+         "PW_BOOT_FLASH_SIZE=200K",
 
-        # TODO(b/235348465): Currently "pw_tokenizer/detokenize_test" requires at
-        # least 6K bytes in heap when using pw_malloc_freelist. The heap size
-        # required for tests should be investigated.
-        "PW_BOOT_HEAP_SIZE=7K",
-        "PW_BOOT_MIN_STACK_SIZE=1K",
-        "PW_BOOT_RAM_BEGIN=0x20000000",
-        "PW_BOOT_RAM_SIZE=64K",
-        "PW_BOOT_VECTOR_TABLE_BEGIN=0x00000000",
-        "PW_BOOT_VECTOR_TABLE_SIZE=512",
-      ]
-    }
-  }
+         # TODO(b/235348465): Currently "pw_tokenizer/detokenize_test" requires at
+         # least 6K bytes in heap when using pw_malloc_freelist. The heap size
+         # required for tests should be investigated.
+         "PW_BOOT_HEAP_SIZE=7K",
+         "PW_BOOT_MIN_STACK_SIZE=1K",
+         "PW_BOOT_RAM_BEGIN=0x20000000",
+         "PW_BOOT_RAM_SIZE=64K",
+         "PW_BOOT_VECTOR_TABLE_BEGIN=0x00000000",
+         "PW_BOOT_VECTOR_TABLE_SIZE=512",
+       ]
+     }
+   }
 
 -------
 Metrics
@@ -169,3 +169,21 @@
    :maxdepth: 1
 
    cli
+
+-------------------
+Multi-endpoint mode
+-------------------
+
+The default configuration serves all its traffic with the same
+channel ID and RPC address. There is an alternative mode that assigns a separate
+channel ID and address for logging. This can be useful if you want to separate logging and primary RPC to
+``pw_system`` among multiple clients.
+
+To use this mode, add the following to ``gn args out``:
+
+.. code-block::
+
+   pw_system_USE_MULTI_ENDPOINT_CONFIG = true
+
+The settings for the channel ID and address can be found in the target
+``//pw_system:multi_endpoint_rpc_overrides``.
diff --git a/pw_system/hdlc_rpc_server.cc b/pw_system/hdlc_rpc_server.cc
index 5bbcd83..3923d4f 100644
--- a/pw_system/hdlc_rpc_server.cc
+++ b/pw_system/hdlc_rpc_server.cc
@@ -16,17 +16,25 @@
 #include <cstddef>
 #include <cstdint>
 #include <cstdio>
+#include <mutex>
 
 #include "pw_assert/check.h"
 #include "pw_hdlc/encoded_size.h"
 #include "pw_hdlc/rpc_channel.h"
 #include "pw_hdlc/rpc_packets.h"
 #include "pw_log/log.h"
+#include "pw_rpc/channel.h"
 #include "pw_sync/mutex.h"
 #include "pw_system/config.h"
 #include "pw_system/io.h"
 #include "pw_system/rpc_server.h"
 
+#if PW_SYSTEM_DEFAULT_CHANNEL_ID != PW_SYSTEM_LOGGING_CHANNEL_ID && \
+    PW_SYSTEM_DEFAULT_RPC_HDLC_ADDRESS == PW_SYSTEM_LOGGING_RPC_HDLC_ADDRESS
+#error \
+    "Default and logging addresses must be different to support multiple channels."
+#endif
+
 namespace pw::system {
 namespace {
 
@@ -35,10 +43,50 @@
 static_assert(kMaxTransmissionUnit ==
               hdlc::MaxEncodedFrameSize(rpc::cfg::kEncodingBufferSizeBytes));
 
+#if PW_SYSTEM_DEFAULT_CHANNEL_ID == PW_SYSTEM_LOGGING_CHANNEL_ID
 hdlc::FixedMtuChannelOutput<kMaxTransmissionUnit> hdlc_channel_output(
     GetWriter(), PW_SYSTEM_DEFAULT_RPC_HDLC_ADDRESS, "HDLC channel");
 rpc::Channel channels[] = {
     rpc::Channel::Create<kDefaultRpcChannelId>(&hdlc_channel_output)};
+#else
+class SynchronizedChannelOutput : public rpc::ChannelOutput {
+ public:
+  SynchronizedChannelOutput(stream::Writer& writer,
+                            uint64_t address,
+                            const char* channel_name)
+      : rpc::ChannelOutput(channel_name),
+        inner_(writer, address, channel_name) {}
+
+  Status Send(span<const std::byte> buffer) override {
+    std::lock_guard guard(mtx_);
+    auto s = inner_.Send(buffer);
+    return s;
+  }
+
+  size_t MaximumTransmissionUnit() override {
+    std::lock_guard guard(mtx_);
+    auto s = inner_.MaximumTransmissionUnit();
+    return s;
+  }
+
+ private:
+  sync::Mutex mtx_;
+  hdlc::FixedMtuChannelOutput<kMaxTransmissionUnit> inner_ PW_GUARDED_BY(mtx_);
+};
+
+SynchronizedChannelOutput hdlc_channel_output[] = {
+    SynchronizedChannelOutput(GetWriter(),
+                              PW_SYSTEM_DEFAULT_RPC_HDLC_ADDRESS,
+                              "HDLC default channel"),
+    SynchronizedChannelOutput(GetWriter(),
+                              PW_SYSTEM_LOGGING_RPC_HDLC_ADDRESS,
+                              "HDLC logging channel"),
+};
+rpc::Channel channels[] = {
+    rpc::Channel::Create<kDefaultRpcChannelId>(&hdlc_channel_output[0]),
+    rpc::Channel::Create<kLoggingRpcChannelId>(&hdlc_channel_output[1]),
+};
+#endif
 rpc::Server server(channels);
 
 constexpr size_t kDecoderBufferSize =
@@ -69,7 +117,8 @@
         for (std::byte byte : ret_val.value()) {
           if (auto result = decoder.Process(byte); result.ok()) {
             hdlc::Frame& frame = result.value();
-            if (frame.address() == PW_SYSTEM_DEFAULT_RPC_HDLC_ADDRESS) {
+            if (frame.address() == PW_SYSTEM_DEFAULT_RPC_HDLC_ADDRESS ||
+                frame.address() == PW_SYSTEM_LOGGING_RPC_HDLC_ADDRESS) {
               server.ProcessPacket(frame.data());
             }
           }
diff --git a/pw_system/init.cc b/pw_system/init.cc
index 13c4e1b..4196115 100644
--- a/pw_system/init.cc
+++ b/pw_system/init.cc
@@ -42,7 +42,7 @@
 
   // Setup logging.
   const Status status = GetLogThread().OpenUnrequestedLogStream(
-      kDefaultRpcChannelId, GetRpcServer(), GetLogService());
+      kLoggingRpcChannelId, GetRpcServer(), GetLogService());
   if (!status.ok()) {
     PW_LOG_ERROR("Error opening unrequested log streams %d",
                  static_cast<int>(status.code()));
diff --git a/pw_system/log.cc b/pw_system/log.cc
index 652f3e2..3a5f0fd 100644
--- a/pw_system/log.cc
+++ b/pw_system/log.cc
@@ -46,7 +46,7 @@
     PW_GUARDED_BY(drains_mutex);
 
 std::array<RpcLogDrain, 1> drains{{
-    RpcLogDrain(kDefaultRpcChannelId,
+    RpcLogDrain(kLoggingRpcChannelId,
                 log_decode_buffer,
                 drains_mutex,
                 RpcLogDrain::LogDrainErrorHandling::kIgnoreWriterErrors),
diff --git a/pw_system/public/pw_system/config.h b/pw_system/public/pw_system/config.h
index 1dc3c3c..0d04158 100644
--- a/pw_system/public/pw_system/config.h
+++ b/pw_system/public/pw_system/config.h
@@ -44,6 +44,16 @@
 #define PW_SYSTEM_DEFAULT_CHANNEL_ID 1
 #endif  // PW_SYSTEM_DEFAULT_CHANNEL_ID
 
+// PW_SYSTEM_LOGGING_CHANNEL_ID logging RPC channel ID to host. If this is
+// different from PW_SYSTEM_DEFAULT_CHANNEL_ID, then
+// PW_SYSTEM_LOGGING_RPC_HDLC_ADDRESS must also be different from
+// PW_SYSTEM_DEFAULT_RPC_HDLC_ADDRESS.
+//
+// Defaults to PW_SYSTEM_DEFAULT_CHANNEL_ID.
+#ifndef PW_SYSTEM_LOGGING_CHANNEL_ID
+#define PW_SYSTEM_LOGGING_CHANNEL_ID PW_SYSTEM_DEFAULT_CHANNEL_ID
+#endif  // PW_SYSTEM_LOGGING_CHANNEL_ID
+
 // PW_SYSTEM_DEFAULT_RPC_HDLC_ADDRESS RPC HDLC default address.
 //
 // Defaults to 82.
@@ -51,6 +61,13 @@
 #define PW_SYSTEM_DEFAULT_RPC_HDLC_ADDRESS 82
 #endif  // PW_SYSTEM_DEFAULT_RPC_HDLC_ADDRESS
 
+// PW_SYSTEM_LOGGING_RPC_HDLC_ADDRESS RPC HDLC logging address.
+//
+// Defaults to PW_SYSTEM_DEFAULT_RPC_HDLC_ADDRESS.
+#ifndef PW_SYSTEM_LOGGING_RPC_HDLC_ADDRESS
+#define PW_SYSTEM_LOGGING_RPC_HDLC_ADDRESS PW_SYSTEM_DEFAULT_RPC_HDLC_ADDRESS
+#endif  // PW_SYSTEM_LOGGING_RPC_HDLC_ADDRESS
+
 // PW_SYSTEM_ENABLE_THREAD_SNAPSHOT_SERVICE specifies if the thread snapshot
 // RPC service is enabled.
 //
diff --git a/pw_system/public/pw_system/rpc_server.h b/pw_system/public/pw_system/rpc_server.h
index bf05fcc..0337570 100644
--- a/pw_system/public/pw_system/rpc_server.h
+++ b/pw_system/public/pw_system/rpc_server.h
@@ -23,10 +23,13 @@
 namespace pw::system {
 
 // This is the default channel used by the pw_system RPC server. Some other
-// parts of pw_system (e.g. logging) use this channel ID as the default
-// destination for unrequested data streams.
+// parts of pw_system use this channel ID as the default destination for
+// unrequested data streams.
 inline constexpr uint32_t kDefaultRpcChannelId = PW_SYSTEM_DEFAULT_CHANNEL_ID;
 
+// This is the channel ID used for logging.
+inline constexpr uint32_t kLoggingRpcChannelId = PW_SYSTEM_LOGGING_CHANNEL_ID;
+
 rpc::Server& GetRpcServer();
 
 thread::ThreadCore& GetRpcDispatchThread();
diff --git a/pw_system/py/pw_system/device.py b/pw_system/py/pw_system/device.py
index 8d8f1a2..f8cfe31 100644
--- a/pw_system/py/pw_system/device.py
+++ b/pw_system/py/pw_system/device.py
@@ -33,7 +33,7 @@
 from pw_thread_protos import thread_pb2
 from pw_tokenizer import detokenize
 from pw_tokenizer.proto import decode_optionally_tokenized
-from pw_unit_test.rpc import run_tests as pw_unit_test_run_tests
+from pw_unit_test.rpc import run_tests as pw_unit_test_run_tests, TestRecord
 
 # Internal log for troubleshooting this tool (the console).
 _LOG = logging.getLogger('tools')
@@ -144,7 +144,7 @@
         """Returns an object for accessing services on the specified channel."""
         return next(iter(self.client.client.channels())).rpcs
 
-    def run_tests(self, timeout_s: Optional[float] = 5) -> bool:
+    def run_tests(self, timeout_s: Optional[float] = 5) -> TestRecord:
         """Runs the unit tests on this device."""
         return pw_unit_test_run_tests(self.rpcs, timeout_s=timeout_s)
 
diff --git a/pw_system/system_target.gni b/pw_system/system_target.gni
index 4c138e8..d38ccd3 100644
--- a/pw_system/system_target.gni
+++ b/pw_system/system_target.gni
@@ -57,6 +57,18 @@
   NATIVE = "native"
 }
 
+declare_args() {
+  # This argument is intended to be user-facing and should NOT be set by a
+  # toolchain. This switches ALL pw_system_target toolchains to use the
+  # multi_endpoint_rpc_config config to illustrate a multi-endpoint mode that
+  # isolates logging and RPC traffic via HDLC multiplexing.
+  #
+  # If you would like to use this in production, it is strongly recommended that
+  # you instead just add the appropriate defines to your target's toolchain
+  # definition.
+  pw_system_USE_MULTI_ENDPOINT_CONFIG = false
+}
+
 # Defines a target toolchain, automatically setting many required build
 # arguments to simplify instantiation of a target.
 #
@@ -113,6 +125,10 @@
     # TODO(amontanez): This should be set to a "$dir_pw_unit_test:rpc_main"
     # when RPC is working.
     pw_unit_test_MAIN = "$dir_pw_unit_test:logging_main"
+
+    if (pw_system_USE_MULTI_ENDPOINT_CONFIG) {
+      pw_system_CONFIG = "$dir_pw_system:multi_endpoint_rpc_config"
+    }
   }
 
   # Populate architecture-specific build args.
diff --git a/pw_target_runner/docs.rst b/pw_target_runner/docs.rst
index 25b210d..17efcc8 100644
--- a/pw_target_runner/docs.rst
+++ b/pw_target_runner/docs.rst
@@ -51,7 +51,7 @@
 
 **server_config.txt**
 
-.. code:: text
+.. code-block:: text
 
   runner {
     command: "stm32f429i_disc1_unit_test_runner"
@@ -75,7 +75,7 @@
 To start the standalone server, run the ``pw_target_runner_server`` program and
 point it to your config file.
 
-.. code:: text
+.. code-block:: text
 
   $ pw_target_runner_server -config server_config.txt -port 8080
 
@@ -85,7 +85,7 @@
 To request the server to run an executable, run the ``pw_target_runner_client``,
 specifying the path to the executable through a ``-binary`` option.
 
-.. code:: text
+.. code-block:: text
 
   $ pw_target_runner_client -host localhost -port 8080 -binary /path/to/my/test.elf
 
diff --git a/pw_thread/docs.rst b/pw_thread/docs.rst
index 682526a..32438d0 100644
--- a/pw_thread/docs.rst
+++ b/pw_thread/docs.rst
@@ -510,7 +510,7 @@
 
 For example:
 
-.. code::
+.. code-block::
 
    #include "pw_rpc/server.h"
    #include "pw_thread/thread_snapshot_service.h"
diff --git a/pw_thread_freertos/BUILD.bazel b/pw_thread_freertos/BUILD.bazel
index ff5dfef..606dc41 100644
--- a/pw_thread_freertos/BUILD.bazel
+++ b/pw_thread_freertos/BUILD.bazel
@@ -228,6 +228,7 @@
     deps = [
         "//pw_function",
         "//pw_log",
+        "//pw_span",
         "//pw_status",
         "@freertos",
     ],
diff --git a/pw_thread_threadx/docs.rst b/pw_thread_threadx/docs.rst
index 026ed2b..d49c133 100644
--- a/pw_thread_threadx/docs.rst
+++ b/pw_thread_threadx/docs.rst
@@ -6,7 +6,7 @@
 This is a set of backends for pw_thread based on ThreadX.
 
 .. Warning::
-  This module is still under construction, the API is not yet stable.
+   This module is still under construction, the API is not yet stable.
 
 -----------------------
 Thread Creation Backend
@@ -21,48 +21,48 @@
 
 .. code-block:: cpp
 
-  #include "pw_thread/detached_thread.h"
-  #include "pw_thread_threadx/config.h"
-  #include "pw_thread_threadx/context.h"
-  #include "pw_thread_threadx/options.h"
-  #include "tx_api.h"
+   #include "pw_thread/detached_thread.h"
+   #include "pw_thread_threadx/config.h"
+   #include "pw_thread_threadx/context.h"
+   #include "pw_thread_threadx/options.h"
+   #include "tx_api.h"
 
-  constexpr UINT kFooPriority =
-      pw::thread::threadx::config::kDefaultPriority;
-  constexpr ULONG kFooTimeSliceInterval =
-      pw::thread::threadx::config::kDefaultTimeSliceInterval;
-  constexpr size_t kFooStackSizeWords =
-      pw::thread::threadx::config::kDefaultStackSizeWords;
+   constexpr UINT kFooPriority =
+       pw::thread::threadx::config::kDefaultPriority;
+   constexpr ULONG kFooTimeSliceInterval =
+       pw::thread::threadx::config::kDefaultTimeSliceInterval;
+   constexpr size_t kFooStackSizeWords =
+       pw::thread::threadx::config::kDefaultStackSizeWords;
 
-  pw::thread::threadx::ContextWithStack<kFooStackSizeWords>
-      example_thread_context;
-  void StartExampleThread() {
-    pw::thread::DetachedThread(
-        pw::thread::threadx::Options()
-            .set_name("example_thread")
-            .set_priority(kFooPriority)
-            .set_time_slice_interval(kFooTimeSliceInterval)
-            .set_context(example_thread_context),
-        example_thread_function);
-  }
+   pw::thread::threadx::ContextWithStack<kFooStackSizeWords>
+       example_thread_context;
+   void StartExampleThread() {
+     pw::thread::DetachedThread(
+         pw::thread::threadx::Options()
+             .set_name("example_thread")
+             .set_priority(kFooPriority)
+             .set_time_slice_interval(kFooTimeSliceInterval)
+             .set_context(example_thread_context),
+         example_thread_function);
+   }
 
 .. list-table::
 
-  * - :ref:`module-pw_thread` Facade
-    - Backend Target
-    - Description
-  * - ``pw_thread:id``
-    - ``pw_thread_threadx:id``
-    - Thread identification.
-  * - ``pw_thread:yield``
-    - ``pw_thread_threadx:yield``
-    - Thread scheduler yielding.
-  * - ``pw_thread:sleep``
-    - ``pw_thread_threadx:sleep``
-    - Thread scheduler sleeping.
-  * - ``pw_thread:thread``
-    - ``pw_thread_threadx:thread``
-    - Thread creation.
+   * - :ref:`module-pw_thread` Facade
+     - Backend Target
+     - Description
+   * - ``pw_thread:id``
+     - ``pw_thread_threadx:id``
+     - Thread identification.
+   * - ``pw_thread:yield``
+     - ``pw_thread_threadx:yield``
+     - Thread scheduler yielding.
+   * - ``pw_thread:sleep``
+     - ``pw_thread_threadx:sleep``
+     - Thread scheduler sleeping.
+   * - ``pw_thread:thread``
+     - ``pw_thread_threadx:thread``
+     - Thread creation.
 
 Module Configuration Options
 ============================
@@ -73,111 +73,111 @@
 
 .. c:macro:: PW_THREAD_THREADX_CONFIG_JOINING_ENABLED
 
-  Whether thread joining is enabled. By default this is disabled.
+   Whether thread joining is enabled. By default this is disabled.
 
-  We suggest only enabling this when thread joining is required to minimize
-  the RAM and ROM cost of threads.
+   We suggest only enabling this when thread joining is required to minimize
+   the RAM and ROM cost of threads.
 
-  Enabling this grows the RAM footprint of every pw::thread::Thread as it adds
-  a TX_EVENT_FLAGS_GROUP to every thread's pw::thread::threadx::Context. In
-  addition, there is a minute ROM cost to construct and destroy this added
-  object.
+   Enabling this grows the RAM footprint of every pw::thread::Thread as it adds
+   a TX_EVENT_FLAGS_GROUP to every thread's pw::thread::threadx::Context. In
+   addition, there is a minute ROM cost to construct and destroy this added
+   object.
 
-  PW_THREAD_JOINING_ENABLED gets set to this value.
+   PW_THREAD_JOINING_ENABLED gets set to this value.
 
 .. c:macro:: PW_THREAD_THREADX_CONFIG_DEFAULT_STACK_SIZE_WORDS
 
-  The default stack size in words. By default this uses the minimal ThreadX
-  stack size.
+   The default stack size in words. By default this uses the minimal ThreadX
+   stack size.
 
 .. c:macro:: PW_THREAD_THREADX_CONFIG_MAX_THREAD_NAME_LEN
 
-  The maximum length of a thread's name, not including null termination. By
-  default this is arbitrarily set to 15. This results in an array of characters
-  which is this length + 1 bytes in every pw::thread::Thread's context.
+   The maximum length of a thread's name, not including null termination. By
+   default this is arbitrarily set to 15. This results in an array of characters
+   which is this length + 1 bytes in every pw::thread::Thread's context.
 
 .. c:macro:: PW_THREAD_THREADX_CONFIG_DEFAULT_TIME_SLICE_INTERVAL
 
-  The round robin time slice tick interval for threads at the same priority.
-  By default this is disabled as not all ports support this, using a value of 0
-  ticks.
+   The round robin time slice tick interval for threads at the same priority.
+   By default this is disabled as not all ports support this, using a value of 0
+   ticks.
 
 .. c:macro:: PW_THREAD_THREADX_CONFIG_MIN_PRIORITY
 
-  The minimum priority level, this is normally based on the number of priority
-  levels.
+   The minimum priority level, this is normally based on the number of priority
+   levels.
 
 .. c:macro:: PW_THREAD_THREADX_CONFIG_DEFAULT_PRIORITY
 
-  The default priority level. By default this uses the minimal ThreadX
-  priority level, given that 0 is the highest priority.
+   The default priority level. By default this uses the minimal ThreadX
+   priority level, given that 0 is the highest priority.
 
 .. c:macro:: PW_THREAD_THREADX_CONFIG_LOG_LEVEL
 
-  The log level to use for this module. Logs below this level are omitted.
+   The log level to use for this module. Logs below this level are omitted.
 
 ThreadX Thread Options
 ======================
 .. cpp:class:: pw::thread::threadx::Options
 
-  .. cpp:function:: set_name(const char* name)
+   .. cpp:function:: set_name(const char* name)
 
-     Sets the name for the ThreadX thread, note that this will be deep copied
-     into the context and may be truncated based on
-     ``PW_THREAD_THREADX_CONFIG_MAX_THREAD_NAME_LEN``.
+      Sets the name for the ThreadX thread, note that this will be deep copied
+      into the context and may be truncated based on
+      ``PW_THREAD_THREADX_CONFIG_MAX_THREAD_NAME_LEN``.
 
-  .. cpp:function:: set_priority(UINT priority)
+   .. cpp:function:: set_priority(UINT priority)
 
-     Sets the priority for the ThreadX thread from 0 through 31, where a value
-     of 0 represents the highest priority, see ThreadX tx_thread_create for
-     more detail.
+      Sets the priority for the ThreadX thread from 0 through 31, where a value
+      of 0 represents the highest priority, see ThreadX tx_thread_create for
+      more detail.
 
-     **Precondition**: priority <= ``PW_THREAD_THREADX_CONFIG_MIN_PRIORITY``.
+      **Precondition**: priority <= ``PW_THREAD_THREADX_CONFIG_MIN_PRIORITY``.
 
-  .. cpp:function:: set_preemption_threshold(UINT preemption_threshold)
+   .. cpp:function:: set_preemption_threshold(UINT preemption_threshold)
 
-     Optionally sets the preemption threshold for the ThreadX thread from 0
-     through 31.
+      Optionally sets the preemption threshold for the ThreadX thread from 0
+      through 31.
 
-     Only priorities higher than this level (i.e. lower number) are allowed to
-     preempt this thread. In other words this allows the thread to specify the
-     priority ceiling for disabling preemption. Threads that have a higher
-     priority than the ceiling are still allowed to preempt while those with
-     less than the ceiling are not allowed to preempt.
+      Only priorities higher than this level (i.e. lower number) are allowed to
+      preempt this thread. In other words this allows the thread to specify the
+      priority ceiling for disabling preemption. Threads that have a higher
+      priority than the ceiling are still allowed to preempt while those with
+      less than the ceiling are not allowed to preempt.
 
-     Not setting the preemption threshold or explicitly specifying a value
-     equal to the priority disables preemption threshold.
+      Not setting the preemption threshold or explicitly specifying a value
+      equal to the priority disables preemption threshold.
 
-     Time slicing is disabled while the preemption threshold is enabled, i.e.
-     not equal to the priority, even if a time slice interval was specified.
+      Time slicing is disabled while the preemption threshold is enabled, i.e.
+      not equal to the priority, even if a time slice interval was specified.
 
-     The preemption threshold can be adjusted at run time, this only sets the
-     initial threshold.
+      The preemption threshold can be adjusted at run time, this only sets the
+      initial threshold.
 
-     **Precondition**: preemption_threshold <= priority
+      **Precondition**: preemption_threshold <= priority
 
-  .. cpp:function:: set_time_slice_interval(UINT time_slice_interval)
+   .. cpp:function:: set_time_slice_interval(UINT time_slice_interval)
 
-     Sets the number of ticks this thread is allowed to run before other ready
-     threads of the same priority are given a chance to run.
+      Sets the number of ticks this thread is allowed to run before other ready
+      threads of the same priority are given a chance to run.
 
-     Time slicing is disabled while the preemption threshold is enabled, i.e.
-     not equal to the priority, even if a time slice interval was specified.
+      Time slicing is disabled while the preemption threshold is enabled, i.e.
+      not equal to the priority, even if a time slice interval was specified.
 
-     A value of ``TX_NO_TIME_SLICE`` (a value of 0) disables time-slicing of
-     this thread.
+      A value of ``TX_NO_TIME_SLICE`` (a value of 0) disables time-slicing of
+      this thread.
 
-     Using time slicing results in a slight amount of system overhead, threads
-     with a unique priority should consider ``TX_NO_TIME_SLICE``.
+      Using time slicing results in a slight amount of system overhead, threads
+      with a unique priority should consider ``TX_NO_TIME_SLICE``.
 
 
-  .. cpp:function:: set_context(pw::thread::embos::Context& context)
+   .. cpp:function:: set_context(pw::thread::embos::Context& context)
 
-     Set the pre-allocated context (all memory needed to run a thread). Note
-     that this is required for this thread creation backend! The Context can
-     either be constructed with an externally provided ``pw::span<ULONG>``
-     stack or the templated form of ``ContextWihtStack<kStackSizeWords`` can be
-     used.
+      Set the pre-allocated context (all memory needed to run a thread). Note
+      that this is required for this thread creation backend! The Context can
+      either be constructed with an externally provided ``pw::span<ULONG>``
+      stack or the templated form of ``ContextWihtStack<kStackSizeWords`` can be
+      used.
 
 -----------------------------
 Thread Identification Backend
@@ -234,18 +234,18 @@
 running thread must be provided for cases where the running thread is being
 captured. For ARM Cortex-M CPUs, you can do something like this:
 
-.. Code:: cpp
+.. code-block:: cpp
 
-  // Capture PSP.
-  void* stack_ptr = 0;
-  asm volatile("mrs %0, psp\n" : "=r"(stack_ptr));
-  pw::thread::ProcessThreadStackCallback cb =
-      [](pw::thread::proto::Thread::StreamEncoder& encoder,
-         pw::ConstByteSpan stack) -> pw::Status {
-    return encoder.WriteRawStack(stack);
-  };
-  pw::thread::threadx::SnapshotThread(my_thread, stack_ptr,
-                                      snapshot_encoder, cb);
+   // Capture PSP.
+   void* stack_ptr = 0;
+   asm volatile("mrs %0, psp\n" : "=r"(stack_ptr));
+   pw::thread::ProcessThreadStackCallback cb =
+       [](pw::thread::proto::Thread::StreamEncoder& encoder,
+          pw::ConstByteSpan stack) -> pw::Status {
+     return encoder.WriteRawStack(stack);
+   };
+   pw::thread::threadx::SnapshotThread(my_thread, stack_ptr,
+                                       snapshot_encoder, cb);
 
 ``SnapshotThreads()`` wraps the singular thread capture to instead captures
 all created threads to a ``pw::thread::proto::SnapshotThreadInfo`` message.
diff --git a/pw_tls_client/BUILD.bazel b/pw_tls_client/BUILD.bazel
index 42ac123..0aa1fa5 100644
--- a/pw_tls_client/BUILD.bazel
+++ b/pw_tls_client/BUILD.bazel
@@ -37,6 +37,7 @@
         "//pw_result",
         "//pw_status",
         "//pw_stream",
+        "//pw_string",
     ],
 )
 
diff --git a/pw_tls_client/docs.rst b/pw_tls_client/docs.rst
index 78c9cf0..774abdc 100644
--- a/pw_tls_client/docs.rst
+++ b/pw_tls_client/docs.rst
@@ -16,18 +16,18 @@
 connection options. The list of supported configurations currently include:
 
 1. Host name of the target server. This will be used as the Server Name
-Indication(SNI) extension during TLS handshake.
+   Indication(SNI) extension during TLS handshake.
 
 2. User-implemented transport. The underlying transport for the TLS
-communication. It is an object that implements the interface of
-``pw::stream::ReaderWriter``.
+   communication. It is an object that implements the interface of
+   ``pw::stream::ReaderWriter``.
 
 The module will also provide mechanisms/APIs for users to specify sources of
 trust anchors, time and entropy. These are under construction.
 
 .. warning::
-  This module is under construction, not ready for use, and the documentation
-  is incomplete.
+   This module is under construction, not ready for use, and the documentation
+   is incomplete.
 
 Prerequisites
 =============
@@ -50,7 +50,7 @@
 
 .. code-block:: sh
 
-  pw package install chromium_verifier
+   pw package install chromium_verifier
 
 Then follow instruction for setting ``dir_pw_third_party_chromium_verifier`` to
 the path of the downloaded repo.
@@ -90,7 +90,7 @@
 
 .. code-block:: sh
 
-  pw package install crlset --force
+   pw package install crlset --force
 
 The `--force` option forces CRLSet to be always re-downloaded so that it is
 up-to-date. Project that are concerned about up-to-date CRLSet should always
@@ -103,14 +103,14 @@
 =====
 This module requires the following setup:
 
-  1. Choose a ``pw_tls_client`` backend, or write one yourself.
-  2. If using GN build, Specify the ``pw_tls_client_BACKEND`` GN build arg to
-     point the library that provides a ``pw_tls_client`` backend. To use the
-     MbedTLS backend, set variable ``pw_tls_client_BACKEND`` to
-     ``//pw_tls_client_mbedtls``. To use the BoringSSL backend, set it to
-     ``//pw_tls_client_boringssl``.
-  3. Provide a `pw_tls_client:entropy` backend. If using GN build, specify the
-     backend with variable ``pw_tls_client_ENTROPY_BACKEND``.
+1. Choose a ``pw_tls_client`` backend, or write one yourself.
+2. If using GN build, Specify the ``pw_tls_client_BACKEND`` GN build arg to
+   point the library that provides a ``pw_tls_client`` backend. To use the
+   MbedTLS backend, set variable ``pw_tls_client_BACKEND`` to
+   ``//pw_tls_client_mbedtls``. To use the BoringSSL backend, set it to
+   ``//pw_tls_client_boringssl``.
+3. Provide a `pw_tls_client:entropy` backend. If using GN build, specify the
+   backend with variable ``pw_tls_client_ENTROPY_BACKEND``.
 
 Module usage
 ============
@@ -122,111 +122,109 @@
 
 .. code-block:: cpp
 
-  // Host domain name
-  constexpr char kHost[] = "www.google.com";
+   // Host domain name
+   constexpr char kHost[] = "www.google.com";
 
-  constexpr int kPort = 443;
+   constexpr int kPort = 443;
 
-  // Server Name Indication.
-  constexpr const char* kServerNameIndication = kHost;
+   // Server Name Indication.
+   constexpr const char* kServerNameIndication = kHost;
 
-  // An example message to send.
-  constexpr char kHTTPRequest[] = "GET / HTTP/1.1\r\n\r\n";
+   // An example message to send.
+   constexpr char kHTTPRequest[] = "GET / HTTP/1.1\r\n\r\n";
 
-  // pw::stream::SocketStream doesn't accept host domain name as input. Thus we
-  // introduce this helper function for getting the IP address
-  pw::Status GetIPAddrFromHostName(std::string_view host, pw::span<char> ip) {
-    char null_terminated_host_name[256] = {0};
-    auto host_copy_status = pw::string::Copy(host, null_terminated_host_name);
-    if (!host_copy_status.ok()) {
-      return host_copy_status.status();
-    }
+   // pw::stream::SocketStream doesn't accept host domain name as input. Thus we
+   // introduce this helper function for getting the IP address
+   pw::Status GetIPAddrFromHostName(std::string_view host, pw::span<char> ip) {
+     char null_terminated_host_name[256] = {0};
+     auto host_copy_status = pw::string::Copy(host, null_terminated_host_name);
+     if (!host_copy_status.ok()) {
+       return host_copy_status.status();
+     }
 
-    struct hostent* ent = gethostbyname(null_terminated_host_name);
-    if (ent == NULL) {
-      return PW_STATUS_INTERNAL;
-    }
+     struct hostent* ent = gethostbyname(null_terminated_host_name);
+     if (ent == NULL) {
+       return PW_STATUS_INTERNAL;
+     }
 
-    in_addr** addr_list = reinterpret_cast<in_addr**>(ent->h_addr_list);
-    if (addr_list[0] == nullptr) {
-      return PW_STATUS_INTERNAL;
-    }
+     in_addr** addr_list = reinterpret_cast<in_addr**>(ent->h_addr_list);
+     if (addr_list[0] == nullptr) {
+       return PW_STATUS_INTERNAL;
+     }
 
-    auto ip_copy_status = pw::string::Copy(inet_ntoa(*addr_list[0]), ip);
-    if (!ip_copy_status.ok()) {
-      return ip_copy_status.status();
-    }
+     auto ip_copy_status = pw::string::Copy(inet_ntoa(*addr_list[0]), ip);
+     if (!ip_copy_status.ok()) {
+       return ip_copy_status.status();
+     }
 
-    return pw::OkStatus();
-  }
+     return pw::OkStatus();
+   }
 
-  int main() {
-    // Get the IP address of the target host.
-    char ip_address[64] = {0};
-    auto get_ip_status = GetIPAddrFromHostName(kHost, ip_address);
-    if (!get_ip_status.ok()) {
-      return 1;
-    }
+   int main() {
+     // Get the IP address of the target host.
+     char ip_address[64] = {0};
+     auto get_ip_status = GetIPAddrFromHostName(kHost, ip_address);
+     if (!get_ip_status.ok()) {
+       return 1;
+     }
 
-    // Use a socket stream as the transport.
-    pw::stream::SocketStream socket_stream;
+     // Use a socket stream as the transport.
+     pw::stream::SocketStream socket_stream;
 
-    // Connect the socket to the remote host.
-    auto socket_connect_status = socket_stream.Connect(ip_address, kPort);
-    if (!socket_connect_status.ok()) {
-      return 1;
-    }
+     // Connect the socket to the remote host.
+     auto socket_connect_status = socket_stream.Connect(ip_address, kPort);
+     if (!socket_connect_status.ok()) {
+       return 1;
+     }
 
-    // Create a TLS session. Register the transport.
-    auto options = pw::tls_client::SessionOptions()
-            .set_server_name(kServerNameIndication)
-            .set_transport(socket_stream);
-    auto tls_conn = pw::tls_client::Session::Create(options);
-    if (!tls_conn.ok()) {
-      // Handle errors.
-      return 1;
-    }
+     // Create a TLS session. Register the transport.
+     auto options = pw::tls_client::SessionOptions()
+             .set_server_name(kServerNameIndication)
+             .set_transport(socket_stream);
+     auto tls_conn = pw::tls_client::Session::Create(options);
+     if (!tls_conn.ok()) {
+       // Handle errors.
+       return 1;
+     }
 
-    auto open_status = tls_conn.value()->Open();
-    if (!open_status.ok()) {
-      // Inspect/handle error with open_status.code() and
-      // tls_conn.value()->GetLastTLSStatus().
-      return 1;
-    }
+     auto open_status = tls_conn.value()->Open();
+     if (!open_status.ok()) {
+       // Inspect/handle error with open_status.code() and
+       // tls_conn.value()->GetLastTLSStatus().
+       return 1;
+     }
 
-    auto write_status = tls_conn.value()->Write(pw::as_bytes(pw::span{kHTTPRequest}));
-    if (!write_status.ok()) {
-      // Inspect/handle error with write_status.code() and
-      // tls_conn.value()->GetLastTLSStatus().
-      return 0;
-    }
+     auto write_status = tls_conn.value()->Write(pw::as_bytes(pw::span{kHTTPRequest}));
+     if (!write_status.ok()) {
+       // Inspect/handle error with write_status.code() and
+       // tls_conn.value()->GetLastTLSStatus().
+       return 0;
+     }
 
-    // Listen for incoming data.
-    std::array<std::byte, 4096> buffer;
-    while (true) {
-      auto res = tls_conn.value()->Read(buffer);
-      if (!res.ok()) {
-        // Inspect/handle error with res.status().code() and
-        // tls_conn.value()->GetLastTLSStatus().
-        return 1;
-      }
+     // Listen for incoming data.
+     std::array<std::byte, 4096> buffer;
+     while (true) {
+       auto res = tls_conn.value()->Read(buffer);
+       if (!res.ok()) {
+         // Inspect/handle error with res.status().code() and
+         // tls_conn.value()->GetLastTLSStatus().
+         return 1;
+       }
 
-      // Process data in |buffer|. res.value() gives the span of read bytes.
-      // The following simply print to console.
-      if (res.value().size()) {
-        auto print_status = pw::sys_io::WriteBytes(res.value());
-        if (!print_status.ok()) {
-          return 1;
-        }
-      }
+       // Process data in |buffer|. res.value() gives the span of read bytes.
+       // The following simply print to console.
+       if (res.value().size()) {
+         auto print_status = pw::sys_io::WriteBytes(res.value());
+         if (!print_status.ok()) {
+           return 1;
+         }
+       }
 
-    }
-  }
+     }
+   }
 
 A list of other demos will be provided in ``//pw_tls_client/examples/``
 
-Warning
-============
-
-Open()/Read() APIs are synchronous for now. Support for
-non-blocking/asynchronous usage will be added in the future.
+.. warning::
+   Open()/Read() APIs are synchronous for now. Support for
+   non-blocking/asynchronous usage will be added in the future.
diff --git a/pw_tokenizer/BUILD.bazel b/pw_tokenizer/BUILD.bazel
index 30cfe20..32322d5 100644
--- a/pw_tokenizer/BUILD.bazel
+++ b/pw_tokenizer/BUILD.bazel
@@ -18,8 +18,8 @@
     "pw_cc_library",
     "pw_cc_test",
 )
-load("//pw_fuzzer:fuzzer.bzl", "pw_cc_fuzz_test")
 load("//pw_build/bazel_internal:py_proto_library.bzl", "py_proto_library")
+load("//pw_fuzzer:fuzzer.bzl", "pw_cc_fuzz_test")
 
 package(default_visibility = ["//visibility:public"])
 
@@ -94,8 +94,11 @@
     ],
     includes = ["public"],
     deps = [
+        ":base64",
         "//pw_bytes",
+        "//pw_log_tokenized:headers",
         "//pw_span",
+        "//pw_string",
         "//pw_varint",
     ],
 )
diff --git a/pw_tokenizer/BUILD.gn b/pw_tokenizer/BUILD.gn
index ccb127d..a6e1163 100644
--- a/pw_tokenizer/BUILD.gn
+++ b/pw_tokenizer/BUILD.gn
@@ -77,8 +77,8 @@
     dir_pw_polyfill,
     dir_pw_preprocessor,
     dir_pw_span,
+    dir_pw_varint,
   ]
-  deps = [ dir_pw_varint ]
   public = [
     "public/pw_tokenizer/encode_args.h",
     "public/pw_tokenizer/hash.h",
@@ -119,8 +119,12 @@
     dir_pw_span,
   ]
   deps = [
+    ":base64",
     "$dir_pw_bytes:bit",
+    "$dir_pw_log_tokenized:config",
+    dir_pw_base64,
     dir_pw_bytes,
+    dir_pw_string,
     dir_pw_varint,
   ]
   public = [
@@ -163,20 +167,21 @@
     ":argument_types_test",
     ":base64_test",
     ":decode_test",
-    ":detokenize_fuzzer_test",
     ":detokenize_test",
     ":encode_args_test",
     ":hash_test",
     ":simple_tokenize_test",
-    ":token_database_fuzzer_test",
     ":token_database_test",
     ":tokenize_test",
   ]
-  group_deps = [ "$dir_pw_preprocessor:tests" ]
+  group_deps = [
+    ":fuzzers",
+    "$dir_pw_preprocessor:tests",
+  ]
 }
 
-group("fuzzers") {
-  deps = [
+pw_fuzzer_group("fuzzers") {
+  fuzzers = [
     ":detokenize_fuzzer",
     ":token_database_fuzzer",
   ]
@@ -318,10 +323,11 @@
 pw_doc_group("docs") {
   sources = [
     "api.rst",
-    "cli.rst",
-    "design.rst",
+    "detokenization.rst",
     "docs.rst",
-    "guides.rst",
+    "get_started.rst",
+    "token_databases.rst",
+    "tokenization.rst",
   ]
   inputs = [ "py/pw_tokenizer/encode.py" ]
   report_deps = [ ":tokenizer_size_report" ]
diff --git a/pw_tokenizer/CMakeLists.txt b/pw_tokenizer/CMakeLists.txt
index ad4bd18..a8f6bf1 100644
--- a/pw_tokenizer/CMakeLists.txt
+++ b/pw_tokenizer/CMakeLists.txt
@@ -38,6 +38,7 @@
     pw_span
     pw_preprocessor
     pw_tokenizer.config
+    pw_varint
   SOURCES
     encode_args.cc
     hash.cc
@@ -50,8 +51,6 @@
     public/pw_tokenizer/internal/pw_tokenizer_65599_fixed_length_96_hash_macro.h
     public/pw_tokenizer/internal/tokenize_string.h
     tokenize.cc
-  PRIVATE_DEPS
-    pw_varint
 )
 
 if(Zephyr_FOUND AND CONFIG_PIGWEED_TOKENIZER)
@@ -92,8 +91,11 @@
   PUBLIC_INCLUDES
     public
   PUBLIC_DEPS
+    pw_log_tokenized.config
     pw_span
+    pw_string
     pw_tokenizer
+    pw_tokenizer.base64
   SOURCES
     decode.cc
     detokenize.cc
diff --git a/pw_tokenizer/api.rst b/pw_tokenizer/api.rst
index a7d94af..e8693d9 100644
--- a/pw_tokenizer/api.rst
+++ b/pw_tokenizer/api.rst
@@ -1,39 +1,93 @@
+:tocdepth: 2
+
 .. _module-pw_tokenizer-api:
 
-=============
-API reference
-=============
+==========================
+pw_tokenizer API reference
+==========================
 .. pigweed-module-subpage::
    :name: pw_tokenizer
    :tagline: Compress strings to shrink logs by +75%
 
--------
-C++ / C
--------
-.. doxygenfunction:: pw::tokenizer::EncodeArgs
-.. doxygenclass:: pw::tokenizer::EncodedMessage
-   :members:
-.. doxygenfunction:: pw::tokenizer::MinEncodingBufferSizeBytes
-.. doxygendefine:: PW_TOKENIZE_FORMAT_STRING
-.. doxygendefine:: PW_TOKENIZE_STRING
-.. doxygendefine:: PW_TOKENIZE_STRING_DOMAIN
-.. doxygendefine:: PW_TOKENIZE_STRING_DOMAIN_EXPR
-.. doxygendefine:: PW_TOKENIZE_STRING_EXPR
-.. doxygendefine:: PW_TOKENIZE_STRING_MASK
-.. doxygendefine:: PW_TOKENIZE_STRING_MASK_EXPR
-.. doxygendefine:: PW_TOKENIZE_TO_BUFFER
-.. doxygendefine:: PW_TOKENIZE_TO_BUFFER_DOMAIN
-.. doxygendefine:: PW_TOKENIZE_TO_BUFFER_MASK
-.. doxygendefine:: PW_TOKENIZER_ARG_TYPES
-.. doxygenfunction:: pw_tokenizer_EncodeArgs
-.. doxygentypedef:: pw_tokenizer_Token
+.. _module-pw_tokenizer-api-configuration:
 
-------
-Python
-------
-.. autofunction:: pw_tokenizer.encode.encode_token_and_args
-   :noindex:
-.. automodule:: pw_tokenizer.proto
-   :members:
-.. autofunction:: pw_tokenizer.tokens.pw_tokenizer_65599_hash
-   :noindex:
+-------------
+Configuration
+-------------
+.. tab-set::
+
+   .. tab-item:: C++ / C
+      :sync: cpp
+
+      .. doxygenfile:: pw_tokenizer/config.h
+         :sections: define
+
+------------
+Tokenization
+------------
+.. tab-set::
+
+   .. tab-item:: C++ / C
+      :sync: cpp
+
+      .. doxygenfunction:: pw::tokenizer::EncodeArgs
+      .. doxygenclass:: pw::tokenizer::EncodedMessage
+         :members:
+      .. doxygenfunction:: pw::tokenizer::MinEncodingBufferSizeBytes
+      .. doxygendefine:: PW_TOKENIZE_FORMAT_STRING
+      .. doxygendefine:: PW_TOKENIZE_FORMAT_STRING_ANY_ARG_COUNT
+      .. doxygendefine:: PW_TOKENIZE_STRING
+      .. doxygendefine:: PW_TOKENIZE_STRING_DOMAIN
+      .. doxygendefine:: PW_TOKENIZE_STRING_DOMAIN_EXPR
+      .. doxygendefine:: PW_TOKENIZE_STRING_EXPR
+      .. doxygendefine:: PW_TOKENIZE_STRING_MASK
+      .. doxygendefine:: PW_TOKENIZE_STRING_MASK_EXPR
+      .. doxygendefine:: PW_TOKENIZE_TO_BUFFER
+      .. doxygendefine:: PW_TOKENIZE_TO_BUFFER_DOMAIN
+      .. doxygendefine:: PW_TOKENIZE_TO_BUFFER_MASK
+      .. doxygendefine:: PW_TOKENIZER_REPLACE_FORMAT_STRING
+      .. doxygendefine:: PW_TOKENIZER_ARG_TYPES
+      .. doxygenfunction:: pw_tokenizer_EncodeArgs
+      .. doxygenfunction:: pw_tokenizer_EncodeInt
+      .. doxygenfunction:: pw_tokenizer_EncodeInt64
+      .. doxygentypedef:: pw_tokenizer_Token
+
+   .. tab-item:: Python
+      :sync: py
+
+      .. autofunction:: pw_tokenizer.encode.encode_token_and_args
+      .. autofunction:: pw_tokenizer.tokens.pw_tokenizer_65599_hash
+
+   .. tab-item:: Rust
+      :sync: rs
+
+      See `Crate pw_tokenizer </rustdoc/pw_tokenizer/>`_.
+
+.. _module-pw_tokenizer-api-token-databases:
+
+---------------
+Token databases
+---------------
+.. tab-set::
+
+   .. tab-item:: C++ / C
+      :sync: cpp
+
+      .. doxygenclass:: pw::tokenizer::TokenDatabase
+         :members:
+
+.. _module-pw_tokenizer-api-detokenization:
+
+--------------
+Detokenization
+--------------
+.. tab-set::
+
+   .. tab-item:: Python
+      :sync: py
+
+      .. automodule:: pw_tokenizer.detokenize
+         :members:
+
+      .. automodule:: pw_tokenizer.proto
+         :members:
diff --git a/pw_tokenizer/base64_test.cc b/pw_tokenizer/base64_test.cc
index fee5a67..babc875 100644
--- a/pw_tokenizer/base64_test.cc
+++ b/pw_tokenizer/base64_test.cc
@@ -97,7 +97,7 @@
 
 TEST_F(PrefixedBase64, Encode_InlineString) {
   for (auto& [binary, base64] : kTestData) {
-    EXPECT_EQ(base64, PrefixedBase64Encode(binary));
+    EXPECT_EQ(base64, PrefixedBase64Encode<64>(binary));
   }
 }
 
diff --git a/pw_tokenizer/cli.rst b/pw_tokenizer/cli.rst
deleted file mode 100644
index 2e08ad3..0000000
--- a/pw_tokenizer/cli.rst
+++ /dev/null
@@ -1,57 +0,0 @@
-.. _module-pw_tokenizer-cli:
-
-=============
-CLI reference
-=============
-.. pigweed-module-subpage::
-   :name: pw_tokenizer
-   :tagline: Compress strings to shrink logs by +75%
-
-.. _module-pw_tokenizer-cli-encoding:
-
-pw_tokenizer.encode: Encoding command line utility
-==================================================
-The ``pw_tokenizer.encode`` command line tool can be used to encode
-format strings and optional arguments.
-
-.. code-block:: bash
-
-  python -m pw_tokenizer.encode [-h] FORMAT_STRING [ARG ...]
-
-Example:
-
-.. code-block:: text
-
-  $ python -m pw_tokenizer.encode "There's... %d many of %s!" 2 them
-        Raw input: "There's... %d many of %s!" % (2, 'them')
-  Formatted input: There's... 2 many of them!
-            Token: 0xb6ef8b2d
-          Encoded: b'-\x8b\xef\xb6\x04\x04them' (2d 8b ef b6 04 04 74 68 65 6d) [10 bytes]
-  Prefixed Base64: $LYvvtgQEdGhlbQ==
-
-See ``--help`` for full usage details.
-
-.. _module-pw_tokenizer-cli-detokenizing:
-
-Detokenizing command line utilties
-==================================
-``pw_tokenizer`` provides two standalone command line utilities for detokenizing
-Base64-encoded tokenized strings.
-
-* ``detokenize.py`` -- Detokenizes Base64-encoded strings in files or from
-  stdin.
-* ``serial_detokenizer.py`` -- Detokenizes Base64-encoded strings from a
-  connected serial device.
-
-If the ``pw_tokenizer`` Python package is installed, these tools may be executed
-as runnable modules. For example:
-
-.. code-block::
-
-   # Detokenize Base64-encoded strings in a file
-   python -m pw_tokenizer.detokenize -i input_file.txt
-
-   # Detokenize Base64-encoded strings in output from a serial device
-   python -m pw_tokenizer.serial_detokenizer --device /dev/ttyACM0
-
-See the ``--help`` options for these tools for full usage information.
diff --git a/pw_tokenizer/design.rst b/pw_tokenizer/design.rst
deleted file mode 100644
index 3a7e3be..0000000
--- a/pw_tokenizer/design.rst
+++ /dev/null
@@ -1,765 +0,0 @@
-.. _module-pw_tokenizer-design:
-
-======
-Design
-======
-.. pigweed-module-subpage::
-   :name: pw_tokenizer
-   :tagline: Compress strings to shrink logs by +75%
-
---------
-Encoding
---------
-The token is a 32-bit hash calculated during compilation. The string is encoded
-little-endian with the token followed by arguments, if any. For example, the
-31-byte string ``You can go about your business.`` hashes to 0xdac9a244.
-This is encoded as 4 bytes: ``44 a2 c9 da``.
-
-Arguments are encoded as follows:
-
-* **Integers**  (1--10 bytes) --
-  `ZagZag and varint encoded <https://developers.google.com/protocol-buffers/docs/encoding#signed-integers>`_,
-  similarly to Protocol Buffers. Smaller values take fewer bytes.
-* **Floating point numbers** (4 bytes) -- Single precision floating point.
-* **Strings** (1--128 bytes) -- Length byte followed by the string contents.
-  The top bit of the length whether the string was truncated or not. The
-  remaining 7 bits encode the string length, with a maximum of 127 bytes.
-
-.. TODO(hepler): insert diagram here!
-
-.. tip::
-   ``%s`` arguments can quickly fill a tokenization buffer. Keep ``%s``
-   arguments short or avoid encoding them as strings (e.g. encode an enum as an
-   integer instead of a string). See also
-   :ref:`module-pw_tokenizer-tokenized-strings-as-args`.
-
-------------------------------------------------------
-Token generation: fixed length hashing at compile time
-------------------------------------------------------
-String tokens are generated using a modified version of the x65599 hash used by
-the SDBM project. All hashing is done at compile time.
-
-In C code, strings are hashed with a preprocessor macro. For compatibility with
-macros, the hash must be limited to a fixed maximum number of characters. This
-value is set by ``PW_TOKENIZER_CFG_C_HASH_LENGTH``. Increasing
-``PW_TOKENIZER_CFG_C_HASH_LENGTH`` increases the compilation time for C due to
-the complexity of the hashing macros.
-
-C++ macros use a constexpr function instead of a macro. This function works with
-any length of string and has lower compilation time impact than the C macros.
-For consistency, C++ tokenization uses the same hash algorithm, but the
-calculated values will differ between C and C++ for strings longer than
-``PW_TOKENIZER_CFG_C_HASH_LENGTH`` characters.
-
-.. _module-pw_tokenizer-proto:
-
-------------------------------------
-Tokenized fields in protocol buffers
-------------------------------------
-Text may be represented in a few different ways:
-
-- Plain ASCII or UTF-8 text (``This is plain text``)
-- Base64-encoded tokenized message (``$ibafcA==``)
-- Binary-encoded tokenized message (``89 b6 9f 70``)
-- Little-endian 32-bit integer token (``0x709fb689``)
-
-``pw_tokenizer`` provides tools for working with protobuf fields that may
-contain tokenized text.
-
-See :ref:`module-pw_tokenizer-protobuf-tokenization-python` for guidance
-on tokenizing protobufs in Python.
-
-Tokenized field protobuf option
-===============================
-``pw_tokenizer`` provides the ``pw.tokenizer.format`` protobuf field option.
-This option may be applied to a protobuf field to indicate that it may contain a
-tokenized string. A string that is optionally tokenized is represented with a
-single ``bytes`` field annotated with ``(pw.tokenizer.format) =
-TOKENIZATION_OPTIONAL``.
-
-For example, the following protobuf has one field that may contain a tokenized
-string.
-
-.. code-block:: protobuf
-
-  message MessageWithOptionallyTokenizedField {
-    bytes just_bytes = 1;
-    bytes maybe_tokenized = 2 [(pw.tokenizer.format) = TOKENIZATION_OPTIONAL];
-    string just_text = 3;
-  }
-
-Decoding optionally tokenized strings
-=====================================
-The encoding used for an optionally tokenized field is not recorded in the
-protobuf. Despite this, the text can reliably be decoded. This is accomplished
-by attempting to decode the field as binary or Base64 tokenized data before
-treating it like plain text.
-
-The following diagram describes the decoding process for optionally tokenized
-fields in detail.
-
-.. mermaid::
-
-  flowchart TD
-     start([Received bytes]) --> binary
-
-     binary[Decode as<br>binary tokenized] --> binary_ok
-     binary_ok{Detokenizes<br>successfully?} -->|no| utf8
-     binary_ok -->|yes| done_binary([Display decoded binary])
-
-     utf8[Decode as UTF-8] --> utf8_ok
-     utf8_ok{Valid UTF-8?} -->|no| base64_encode
-     utf8_ok -->|yes| base64
-
-     base64_encode[Encode as<br>tokenized Base64] --> display
-     display([Display encoded Base64])
-
-     base64[Decode as<br>Base64 tokenized] --> base64_ok
-
-     base64_ok{Fully<br>or partially<br>detokenized?} -->|no| is_plain_text
-     base64_ok -->|yes| base64_results
-
-     is_plain_text{Text is<br>printable?} -->|no| base64_encode
-     is_plain_text-->|yes| plain_text
-
-     base64_results([Display decoded Base64])
-     plain_text([Display text])
-
-Potential decoding problems
----------------------------
-The decoding process for optionally tokenized fields will yield correct results
-in almost every situation. In rare circumstances, it is possible for it to fail,
-but these can be avoided with a low-overhead mitigation if desired.
-
-There are two ways in which the decoding process may fail.
-
-Accidentally interpreting plain text as tokenized binary
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-If a plain-text string happens to decode as a binary tokenized message, the
-incorrect message could be displayed. This is very unlikely to occur. While many
-tokens will incidentally end up being valid UTF-8 strings, it is highly unlikely
-that a device will happen to log one of these strings as plain text. The
-overwhelming majority of these strings will be nonsense.
-
-If an implementation wishes to guard against this extremely improbable
-situation, it is possible to prevent it. This situation is prevented by
-appending 0xFF (or another byte never valid in UTF-8) to binary tokenized data
-that happens to be valid UTF-8 (or all binary tokenized messages, if desired).
-When decoding, if there is an extra 0xFF byte, it is discarded.
-
-Displaying undecoded binary as plain text instead of Base64
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-If a message fails to decode as binary tokenized and it is not valid UTF-8, it
-is displayed as tokenized Base64. This makes it easily recognizable as a
-tokenized message and makes it simple to decode later from the text output (for
-example, with an updated token database).
-
-A binary message for which the token is not known may coincidentally be valid
-UTF-8 or ASCII. 6.25% of 4-byte sequences are composed only of ASCII characters.
-When decoding with an out-of-date token database, it is possible that some
-binary tokenized messages will be displayed as plain text rather than tokenized
-Base64.
-
-This situation is likely to occur, but should be infrequent. Even if it does
-happen, it is not a serious issue. A very small number of strings will be
-displayed incorrectly, but these strings cannot be decoded anyway. One nonsense
-string (e.g. ``a-D1``) would be displayed instead of another (``$YS1EMQ==``).
-Updating the token database would resolve the issue, though the non-Base64 logs
-would be difficult decode later from a log file.
-
-This situation can be avoided with the same approach described in
-`Accidentally interpreting plain text as tokenized binary`_. Appending
-an invalid UTF-8 character prevents the undecoded binary message from being
-interpreted as plain text.
-
-.. _module-pw_tokenizer-base64-format:
-
--------------
-Base64 format
--------------
-The tokenizer encodes messages to a compact binary representation. Applications
-may desire a textual representation of tokenized strings. This makes it easy to
-use tokenized messages alongside plain text messages, but comes at a small
-efficiency cost: encoded Base64 messages occupy about 4/3 (133%) as much memory
-as binary messages.
-
-The Base64 format is comprised of a ``$`` character followed by the
-Base64-encoded contents of the tokenized message. For example, consider
-tokenizing the string ``This is an example: %d!`` with the argument -1. The
-string's token is 0x4b016e66.
-
-.. code-block:: text
-
-   Source code: PW_LOG("This is an example: %d!", -1);
-
-    Plain text: This is an example: -1! [23 bytes]
-
-        Binary: 66 6e 01 4b 01          [ 5 bytes]
-
-        Base64: $Zm4BSwE=               [ 9 bytes]
-
-See :ref:`module-pw_tokenizer-base64-encoding` and :ref:`module-pw_tokenizer-base64-decoding`
-for guidance on encoding and decoding Base64.
-
-.. _module-pw_tokenizer-token-databases:
-
----------------
-Token databases
----------------
-Token databases store a mapping of tokens to the strings they represent. An ELF
-file can be used as a token database, but it only contains the strings for its
-exact build. A token database file aggregates tokens from multiple ELF files, so
-that a single database can decode tokenized strings from any known ELF.
-
-Token databases contain the token, removal date (if any), and string for each
-tokenized string.
-
-For help with using token databases, see
-:ref:`module-pw_tokenizer-managing-token-databases`.
-
-Token database formats
-======================
-Three token database formats are supported: CSV, binary, and directory. Tokens
-may also be read from ELF files or ``.a`` archives, but cannot be written to
-these formats.
-
-CSV database format
--------------------
-The CSV database format has three columns: the token in hexadecimal, the removal
-date (if any) in year-month-day format, and the string literal, surrounded by
-quotes. Quote characters within the string are represented as two quote
-characters.
-
-This example database contains six strings, three of which have removal dates.
-
-.. code-block::
-
-   141c35d5,          ,"The answer: ""%s"""
-   2e668cd6,2019-12-25,"Jello, world!"
-   7b940e2a,          ,"Hello %s! %hd %e"
-   851beeb6,          ,"%u %d"
-   881436a0,2020-01-01,"The answer is: %s"
-   e13b0f94,2020-04-01,"%llu"
-
-Binary database format
-----------------------
-The binary database format is comprised of a 16-byte header followed by a series
-of 8-byte entries. Each entry stores the token and the removal date, which is
-0xFFFFFFFF if there is none. The string literals are stored next in the same
-order as the entries. Strings are stored with null terminators. See
-`token_database.h <https://pigweed.googlesource.com/pigweed/pigweed/+/HEAD/pw_tokenizer/public/pw_tokenizer/token_database.h>`_
-for full details.
-
-The binary form of the CSV database is shown below. It contains the same
-information, but in a more compact and easily processed form. It takes 141 B
-compared with the CSV database's 211 B.
-
-.. code-block:: text
-
-   [header]
-   0x00: 454b4f54 0000534e  TOKENS..
-   0x08: 00000006 00000000  ........
-
-   [entries]
-   0x10: 141c35d5 ffffffff  .5......
-   0x18: 2e668cd6 07e30c19  ..f.....
-   0x20: 7b940e2a ffffffff  *..{....
-   0x28: 851beeb6 ffffffff  ........
-   0x30: 881436a0 07e40101  .6......
-   0x38: e13b0f94 07e40401  ..;.....
-
-   [string table]
-   0x40: 54 68 65 20 61 6e 73 77 65 72 3a 20 22 25 73 22  The answer: "%s"
-   0x50: 00 4a 65 6c 6c 6f 2c 20 77 6f 72 6c 64 21 00 48  .Jello, world!.H
-   0x60: 65 6c 6c 6f 20 25 73 21 20 25 68 64 20 25 65 00  ello %s! %hd %e.
-   0x70: 25 75 20 25 64 00 54 68 65 20 61 6e 73 77 65 72  %u %d.The answer
-   0x80: 20 69 73 3a 20 25 73 00 25 6c 6c 75 00            is: %s.%llu.
-
-.. _module-pw_tokenizer-directory-database-format:
-
-Directory database format
--------------------------
-pw_tokenizer can consume directories of CSV databases. A directory database
-will be searched recursively for files with a `.pw_tokenizer.csv` suffix, all
-of which will be used for subsequent detokenization lookups.
-
-An example directory database might look something like this:
-
-.. code-block:: text
-
-   token_database
-   ├── chuck_e_cheese.pw_tokenizer.csv
-   ├── fungi_ble.pw_tokenizer.csv
-   └── some_more
-       └── arcade.pw_tokenizer.csv
-
-This format is optimized for storage in a Git repository alongside source code.
-The token database commands randomly generate unique file names for the CSVs in
-the database to prevent merge conflicts. Running ``mark_removed`` or ``purge``
-commands in the database CLI consolidates the files to a single CSV.
-
-The database command line tool supports a ``--discard-temporary
-<upstream_commit>`` option for ``add``. In this mode, the tool attempts to
-discard temporary tokens. It identifies the latest CSV not present in the
-provided ``<upstream_commit>``, and tokens present that CSV that are not in the
-newly added tokens are discarded. This helps keep temporary tokens (e.g from
-debug logs) out of the database.
-
-JSON support
-============
-While pw_tokenizer doesn't specify a JSON database format, a token database can
-be created from a JSON formatted array of strings. This is useful for side-band
-token database generation for strings that are not embedded as parsable tokens
-in compiled binaries. See :ref:`module-pw_tokenizer-database-creation` for
-instructions on generating a token database from a JSON file.
-
-.. _module-pw_tokenizer-collisions:
-
-----------------
-Token collisions
-----------------
-Tokens are calculated with a hash function. It is possible for different
-strings to hash to the same token. When this happens, multiple strings will have
-the same token in the database, and it may not be possible to unambiguously
-decode a token.
-
-The detokenization tools attempt to resolve collisions automatically. Collisions
-are resolved based on two things:
-
-- whether the tokenized data matches the strings arguments' (if any), and
-- if / when the string was marked as having been removed from the database.
-
-See :ref:`module-pw_tokenizer-collisions-guide` for guidance on how to fix
-collisions.
-
-Probability of collisions
-=========================
-Hashes of any size have a collision risk. The probability of one at least
-one collision occurring for a given number of strings is unintuitively high
-(this is known as the `birthday problem
-<https://en.wikipedia.org/wiki/Birthday_problem>`_). If fewer than 32 bits are
-used for tokens, the probability of collisions increases substantially.
-
-This table shows the approximate number of strings that can be hashed to have a
-1% or 50% probability of at least one collision (assuming a uniform, random
-hash).
-
-+-------+---------------------------------------+
-| Token | Collision probability by string count |
-| bits  +--------------------+------------------+
-|       |         50%        |          1%      |
-+=======+====================+==================+
-|   32  |       77000        |        9300      |
-+-------+--------------------+------------------+
-|   31  |       54000        |        6600      |
-+-------+--------------------+------------------+
-|   24  |        4800        |         580      |
-+-------+--------------------+------------------+
-|   16  |         300        |          36      |
-+-------+--------------------+------------------+
-|    8  |          19        |           3      |
-+-------+--------------------+------------------+
-
-Keep this table in mind when masking tokens (see
-:ref:`module-pw_tokenizer-masks`). 16 bits might be acceptable when
-tokenizing a small set of strings, such as module names, but won't be suitable
-for large sets of strings, like log messages.
-
-.. _module-pw_tokenizer-detokenization:
-
---------------
-Detokenization
---------------
-Detokenization is the process of expanding a token to the string it represents
-and decoding its arguments. ``pw_tokenizer`` provides Python, C++ and
-TypeScript detokenization libraries.
-
-**Example: decoding tokenized logs**
-
-A project might tokenize its log messages with the
-:ref:`module-pw_tokenizer-base64-format`. Consider the following log file, which
-has four tokenized logs and one plain text log:
-
-.. code-block:: text
-
-   20200229 14:38:58 INF $HL2VHA==
-   20200229 14:39:00 DBG $5IhTKg==
-   20200229 14:39:20 DBG Crunching numbers to calculate probability of success
-   20200229 14:39:21 INF $EgFj8lVVAUI=
-   20200229 14:39:23 ERR $DFRDNwlOT1RfUkVBRFk=
-
-The project's log strings are stored in a database like the following:
-
-.. code-block::
-
-   1c95bd1c,          ,"Initiating retrieval process for recovery object"
-   2a5388e4,          ,"Determining optimal approach and coordinating vectors"
-   3743540c,          ,"Recovery object retrieval failed with status %s"
-   f2630112,          ,"Calculated acceptable probability of success (%.2f%%)"
-
-Using the detokenizing tools with the database, the logs can be decoded:
-
-.. code-block:: text
-
-   20200229 14:38:58 INF Initiating retrieval process for recovery object
-   20200229 14:39:00 DBG Determining optimal algorithm and coordinating approach vectors
-   20200229 14:39:20 DBG Crunching numbers to calculate probability of success
-   20200229 14:39:21 INF Calculated acceptable probability of success (32.33%)
-   20200229 14:39:23 ERR Recovery object retrieval failed with status NOT_READY
-
-.. note::
-
-   This example uses the :ref:`module-pw_tokenizer-base64-format`, which
-   occupies about 4/3 (133%) as much space as the default binary format when
-   encoded. For projects that wish to interleave tokenized with plain text,
-   using Base64 is a worthwhile tradeoff.
-
-See :ref:`module-pw_tokenizer-detokenization-guides` for detailed instructions
-on how to do detokenization in different programming languages.
-
-.. _module-pw_tokenizer-python-detokenization-c99-printf-notes:
-
-Python detokenization: C99 ``printf`` compatibility notes
-=========================================================
-This implementation is designed to align with the
-`C99 specification, section 7.19.6
-<https://www.dii.uchile.cl/~daespino/files/Iso_C_1999_definition.pdf>`_.
-Notably, this specification is slightly different than what is implemented
-in most compilers due to each compiler choosing to interpret undefined
-behavior in slightly different ways. Treat the following description as the
-source of truth.
-
-This implementation supports:
-
-- Overall Format: ``%[flags][width][.precision][length][specifier]``
-- Flags (Zero or More)
-   - ``-``: Left-justify within the given field width; Right justification is
-     the default (see Width modifier).
-   - ``+``: Forces to preceed the result with a plus or minus sign (``+`` or
-     ``-``) even for positive numbers. By default, only negative numbers are
-     preceded with a ``-`` sign.
-   - (space): If no sign is going to be written, a blank space is inserted
-     before the value.
-   - ``#``: Specifies an alternative print syntax should be used.
-      - Used with ``o``, ``x`` or ``X`` specifiers the value is preceeded with
-        ``0``, ``0x`` or ``0X``, respectively, for values different than zero.
-      - Used with ``a``, ``A``, ``e``, ``E``, ``f``, ``F``, ``g``, or ``G`` it
-        forces the written output to contain a decimal point even if no more
-        digits follow. By default, if no digits follow, no decimal point is
-        written.
-   - ``0``: Left-pads the number with zeroes (``0``) instead of spaces when
-     padding is specified (see width sub-specifier).
-- Width (Optional)
-   - ``(number)``: Minimum number of characters to be printed. If the value to
-     be printed is shorter than this number, the result is padded with blank
-     spaces or ``0`` if the ``0`` flag is present. The value is not truncated
-     even if the result is larger. If the value is negative and the ``0`` flag
-     is present, the ``0``\s are padded after the ``-`` symbol.
-   - ``*``: The width is not specified in the format string, but as an
-     additional integer value argument preceding the argument that has to be
-     formatted.
-- Precision (Optional)
-   - ``.(number)``
-      - For ``d``, ``i``, ``o``, ``u``, ``x``, ``X``, specifies the minimum
-        number of digits to be written. If the value to be written is shorter
-        than this number, the result is padded with leading zeros. The value is
-        not truncated even if the result is longer.
-
-        - A precision of ``0`` means that no character is written for the value
-          ``0``.
-
-      - For ``a``, ``A``, ``e``, ``E``, ``f``, and ``F``, specifies the number
-        of digits to be printed after the decimal point. By default, this is
-        ``6``.
-
-      - For ``g`` and ``G``, specifies the maximum number of significant digits
-        to be printed.
-
-      - For ``s``, specifies the maximum number of characters to be printed. By
-        default all characters are printed until the ending null character is
-        encountered.
-
-      - If the period is specified without an explicit value for precision,
-        ``0`` is assumed.
-   - ``.*``: The precision is not specified in the format string, but as an
-     additional integer value argument preceding the argument that has to be
-     formatted.
-- Length (Optional)
-   - ``hh``: Usable with ``d``, ``i``, ``o``, ``u``, ``x``, or ``X`` specifiers
-     to convey the argument will be a ``signed char`` or ``unsigned char``.
-     However, this is largely ignored in the implementation due to it not being
-     necessary for Python or argument decoding (since the argument is always
-     encoded at least as a 32-bit integer).
-   - ``h``: Usable with ``d``, ``i``, ``o``, ``u``, ``x``, or ``X`` specifiers
-     to convey the argument will be a ``signed short int`` or
-     ``unsigned short int``. However, this is largely ignored in the
-     implementation due to it not being necessary for Python or argument
-     decoding (since the argument is always encoded at least as a 32-bit
-     integer).
-   - ``l``: Usable with ``d``, ``i``, ``o``, ``u``, ``x``, or ``X`` specifiers
-     to convey the argument will be a ``signed long int`` or
-     ``unsigned long int``. Also is usable with ``c`` and ``s`` to specify that
-     the arguments will be encoded with ``wchar_t`` values (which isn't
-     different from normal ``char`` values). However, this is largely ignored in
-     the implementation due to it not being necessary for Python or argument
-     decoding (since the argument is always encoded at least as a 32-bit
-     integer).
-   - ``ll``: Usable with ``d``, ``i``, ``o``, ``u``, ``x``, or ``X`` specifiers
-     to convey the argument will be a ``signed long long int`` or
-     ``unsigned long long int``. This is required to properly decode the
-     argument as a 64-bit integer.
-   - ``L``: Usable with ``a``, ``A``, ``e``, ``E``, ``f``, ``F``, ``g``, or
-     ``G`` conversion specifiers applies to a long double argument. However,
-     this is ignored in the implementation due to floating point value encoded
-     that is unaffected by bit width.
-   - ``j``: Usable with ``d``, ``i``, ``o``, ``u``, ``x``, or ``X`` specifiers
-     to convey the argument will be a ``intmax_t`` or ``uintmax_t``.
-   - ``z``: Usable with ``d``, ``i``, ``o``, ``u``, ``x``, or ``X`` specifiers
-     to convey the argument will be a ``size_t``. This will force the argument
-     to be decoded as an unsigned integer.
-   - ``t``: Usable with ``d``, ``i``, ``o``, ``u``, ``x``, or ``X`` specifiers
-     to convey the argument will be a ``ptrdiff_t``.
-   - If a length modifier is provided for an incorrect specifier, it is ignored.
-- Specifier (Required)
-   - ``d`` / ``i``: Used for signed decimal integers.
-
-   - ``u``: Used for unsigned decimal integers.
-
-   - ``o``: Used for unsigned decimal integers and specifies formatting should
-     be as an octal number.
-
-   - ``x``: Used for unsigned decimal integers and specifies formatting should
-     be as a hexadecimal number using all lowercase letters.
-
-   - ``X``: Used for unsigned decimal integers and specifies formatting should
-     be as a hexadecimal number using all uppercase letters.
-
-   - ``f``: Used for floating-point values and specifies to use lowercase,
-     decimal floating point formatting.
-
-     - Default precision is ``6`` decimal places unless explicitly specified.
-
-   - ``F``: Used for floating-point values and specifies to use uppercase,
-     decimal floating point formatting.
-
-     - Default precision is ``6`` decimal places unless explicitly specified.
-
-   - ``e``: Used for floating-point values and specifies to use lowercase,
-     exponential (scientific) formatting.
-
-     - Default precision is ``6`` decimal places unless explicitly specified.
-
-   - ``E``: Used for floating-point values and specifies to use uppercase,
-     exponential (scientific) formatting.
-
-     - Default precision is ``6`` decimal places unless explicitly specified.
-
-   - ``g``: Used for floating-point values and specified to use ``f`` or ``e``
-     formatting depending on which would be the shortest representation.
-
-     - Precision specifies the number of significant digits, not just digits
-       after the decimal place.
-
-     - If the precision is specified as ``0``, it is interpreted to mean ``1``.
-
-     - ``e`` formatting is used if the the exponent would be less than ``-4`` or
-       is greater than or equal to the precision.
-
-     - Trailing zeros are removed unless the ``#`` flag is set.
-
-     - A decimal point only appears if it is followed by a digit.
-
-     - ``NaN`` or infinities always follow ``f`` formatting.
-
-   - ``G``: Used for floating-point values and specified to use ``f`` or ``e``
-     formatting depending on which would be the shortest representation.
-
-     - Precision specifies the number of significant digits, not just digits
-       after the decimal place.
-
-     - If the precision is specified as ``0``, it is interpreted to mean ``1``.
-
-     - ``E`` formatting is used if the the exponent would be less than ``-4`` or
-       is greater than or equal to the precision.
-
-     - Trailing zeros are removed unless the ``#`` flag is set.
-
-     - A decimal point only appears if it is followed by a digit.
-
-     - ``NaN`` or infinities always follow ``F`` formatting.
-
-   - ``c``: Used for formatting a ``char`` value.
-
-   - ``s``: Used for formatting a string of ``char`` values.
-
-     - If width is specified, the null terminator character is included as a
-       character for width count.
-
-     - If precision is specified, no more ``char``\s than that value will be
-       written from the string (padding is used to fill additional width).
-
-   - ``p``: Used for formatting a pointer address.
-
-   - ``%``: Prints a single ``%``. Only valid as ``%%`` (supports no flags,
-     width, precision, or length modifiers).
-
-Underspecified details:
-
-- If both ``+`` and (space) flags appear, the (space) is ignored.
-- The ``+`` and (space) flags will error if used with ``c`` or ``s``.
-- The ``#`` flag will error if used with ``d``, ``i``, ``u``, ``c``, ``s``, or
-  ``p``.
-- The ``0`` flag will error if used with ``c``, ``s``, or ``p``.
-- Both ``+`` and (space) can work with the unsigned integer specifiers ``u``,
-  ``o``, ``x``, and ``X``.
-- If a length modifier is provided for an incorrect specifier, it is ignored.
-- The ``z`` length modifier will decode arugments as signed as long as ``d`` or
-  ``i`` is used.
-- ``p`` is implementation defined.
-
-  - For this implementation, it will print with a ``0x`` prefix and then the
-    pointer value was printed using ``%08X``.
-
-  - ``p`` supports the ``+``, ``-``, and (space) flags, but not the ``#`` or
-    ``0`` flags.
-
-  - None of the length modifiers are usable with ``p``.
-
-  - This implementation will try to adhere to user-specified width (assuming the
-    width provided is larger than the guaranteed minimum of ``10``).
-
-  - Specifying precision for ``p`` is considered an error.
-- Only ``%%`` is allowed with no other modifiers. Things like ``%+%`` will fail
-  to decode. Some C stdlib implementations support any modifiers being
-  present between ``%``, but ignore any for the output.
-- If a width is specified with the ``0`` flag for a negative value, the padded
-  ``0``\s will appear after the ``-`` symbol.
-- A precision of ``0`` for ``d``, ``i``, ``u``, ``o``, ``x``, or ``X`` means
-  that no character is written for the value ``0``.
-- Precision cannot be specified for ``c``.
-- Using ``*`` or fixed precision with the ``s`` specifier still requires the
-  string argument to be null-terminated. This is due to argument encoding
-  happening on the C/C++-side while the precision value is not read or
-  otherwise used until decoding happens in this Python code.
-
-Non-conformant details:
-
-- ``n`` specifier: We do not support the ``n`` specifier since it is impossible
-  for us to retroactively tell the original program how many characters have
-  been printed since this decoding happens a great deal of time after the
-  device sent it, usually on a separate processing device entirely.
-
----------------------------
-Limitations and future work
----------------------------
-
-GCC bug: tokenization in template functions
-===========================================
-GCC incorrectly ignores the section attribute for template `functions
-<https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70435>`_ and `variables
-<https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88061>`_. For example, the
-following won't work when compiling with GCC and tokenized logging:
-
-.. code-block:: cpp
-
-   template <...>
-   void DoThings() {
-     int value = GetValue();
-     // This log won't work with tokenized logs due to the templated context.
-     PW_LOG_INFO("Got value: %d", value);
-     ...
-   }
-
-The bug causes tokenized strings in template functions to be emitted into
-``.rodata`` instead of the special tokenized string section. This causes two
-problems:
-
-1. Tokenized strings will not be discovered by the token database tools.
-2. Tokenized strings may not be removed from the final binary.
-
-There are two workarounds.
-
-#. **Use Clang.** Clang puts the string data in the requested section, as
-   expected. No extra steps are required.
-
-#. **Move tokenization calls to a non-templated context.** Creating a separate
-   non-templated function and invoking it from the template resolves the issue.
-   This enables tokenizing in most cases encountered in practice with
-   templates.
-
-   .. code-block:: cpp
-
-      // In .h file:
-      void LogThings(value);
-
-      template <...>
-      void DoThings() {
-        int value = GetValue();
-        // This log will work: calls non-templated helper.
-        LogThings(value);
-        ...
-      }
-
-      // In .cc file:
-      void LogThings(int value) {
-        // Tokenized logging works as expected in this non-templated context.
-        PW_LOG_INFO("Got value %d", value);
-      }
-
-There is a third option, which isn't implemented yet, which is to compile the
-binary twice: once to extract the tokens, and once for the production binary
-(without tokens). If this is interesting to you please get in touch.
-
-64-bit tokenization
-===================
-The Python and C++ detokenizing libraries currently assume that strings were
-tokenized on a system with 32-bit ``long``, ``size_t``, ``intptr_t``, and
-``ptrdiff_t``. Decoding may not work correctly for these types if a 64-bit
-device performed the tokenization.
-
-Supporting detokenization of strings tokenized on 64-bit targets would be
-simple. This could be done by adding an option to switch the 32-bit types to
-64-bit. The tokenizer stores the sizes of these types in the
-``.pw_tokenizer.info`` ELF section, so the sizes of these types can be verified
-by checking the ELF file, if necessary.
-
-Tokenization in headers
-=======================
-Tokenizing code in header files (inline functions or templates) may trigger
-warnings such as ``-Wlto-type-mismatch`` under certain conditions. That
-is because tokenization requires declaring a character array for each tokenized
-string. If the tokenized string includes macros that change value, the size of
-this character array changes, which means the same static variable is defined
-with different sizes. It should be safe to suppress these warnings, but, when
-possible, code that tokenizes strings with macros that can change value should
-be moved to source files rather than headers.
-
-.. _module-pw_tokenizer-tokenized-strings-as-args:
-
-Tokenized strings as ``%s`` arguments
-=====================================
-Encoding ``%s`` string arguments is inefficient, since ``%s`` strings are
-encoded 1:1, with no tokenization. It would be better to send a tokenized string
-literal as an integer instead of a string argument, but this is not yet
-supported.
-
-A string token could be sent by marking an integer % argument in a way
-recognized by the detokenization tools. The detokenizer would expand the
-argument to the string represented by the integer.
-
-.. code-block:: cpp
-
-   #define PW_TOKEN_ARG PRIx32 "<PW_TOKEN]"
-
-   constexpr uint32_t answer_token = PW_TOKENIZE_STRING("Uh, who is there");
-
-   PW_TOKENIZE_STRING("Knock knock: %" PW_TOKEN_ARG "?", answer_token);
-
-Strings with arguments could be encoded to a buffer, but since printf strings
-are null-terminated, a binary encoding would not work. These strings can be
-prefixed Base64-encoded and sent as ``%s`` instead. See
-:ref:`module-pw_tokenizer-base64-format`.
-
-Another possibility: encode strings with arguments to a ``uint64_t`` and send
-them as an integer. This would be efficient and simple, but only support a small
-number of arguments.
diff --git a/pw_tokenizer/detokenization.rst b/pw_tokenizer/detokenization.rst
new file mode 100644
index 0000000..7fbefec
--- /dev/null
+++ b/pw_tokenizer/detokenization.rst
@@ -0,0 +1,583 @@
+:tocdepth: 3
+
+.. _module-pw_tokenizer-detokenization:
+
+==============
+Detokenization
+==============
+.. pigweed-module-subpage::
+   :name: pw_tokenizer
+   :tagline: Compress strings to shrink logs by +75%
+
+Detokenization is the process of expanding a token to the string it represents
+and decoding its arguments. ``pw_tokenizer`` provides Python, C++ and
+TypeScript detokenization libraries.
+
+--------------------------------
+Example: decoding tokenized logs
+--------------------------------
+A project might tokenize its log messages with the
+:ref:`module-pw_tokenizer-base64-format`. Consider the following log file, which
+has four tokenized logs and one plain text log:
+
+.. code-block:: text
+
+   20200229 14:38:58 INF $HL2VHA==
+   20200229 14:39:00 DBG $5IhTKg==
+   20200229 14:39:20 DBG Crunching numbers to calculate probability of success
+   20200229 14:39:21 INF $EgFj8lVVAUI=
+   20200229 14:39:23 ERR $DFRDNwlOT1RfUkVBRFk=
+
+The project's log strings are stored in a database like the following:
+
+.. code-block::
+
+   1c95bd1c,          ,"Initiating retrieval process for recovery object"
+   2a5388e4,          ,"Determining optimal approach and coordinating vectors"
+   3743540c,          ,"Recovery object retrieval failed with status %s"
+   f2630112,          ,"Calculated acceptable probability of success (%.2f%%)"
+
+Using the detokenizing tools with the database, the logs can be decoded:
+
+.. code-block:: text
+
+   20200229 14:38:58 INF Initiating retrieval process for recovery object
+   20200229 14:39:00 DBG Determining optimal algorithm and coordinating approach vectors
+   20200229 14:39:20 DBG Crunching numbers to calculate probability of success
+   20200229 14:39:21 INF Calculated acceptable probability of success (32.33%)
+   20200229 14:39:23 ERR Recovery object retrieval failed with status NOT_READY
+
+.. note::
+
+   This example uses the :ref:`module-pw_tokenizer-base64-format`, which
+   occupies about 4/3 (133%) as much space as the default binary format when
+   encoded. For projects that wish to interleave tokenized with plain text,
+   using Base64 is a worthwhile tradeoff.
+
+------------------------
+Detokenization in Python
+------------------------
+To detokenize in Python, import ``Detokenizer`` from the ``pw_tokenizer``
+package, and instantiate it with paths to token databases or ELF files.
+
+.. code-block:: python
+
+   import pw_tokenizer
+
+   detokenizer = pw_tokenizer.Detokenizer('path/to/database.csv', 'other/path.elf')
+
+   def process_log_message(log_message):
+       result = detokenizer.detokenize(log_message.payload)
+       self._log(str(result))
+
+The ``pw_tokenizer`` package also provides the ``AutoUpdatingDetokenizer``
+class, which can be used in place of the standard ``Detokenizer``. This class
+monitors database files for changes and automatically reloads them when they
+change. This is helpful for long-running tools that use detokenization. The
+class also supports token domains for the given database files in the
+``<path>#<domain>`` format.
+
+For messages that are optionally tokenized and may be encoded as binary,
+Base64, or plaintext UTF-8, use
+:func:`pw_tokenizer.proto.decode_optionally_tokenized`. This will attempt to
+determine the correct method to detokenize and always provide a printable
+string.
+
+.. _module-pw_tokenizer-base64-decoding:
+
+Decoding Base64
+===============
+The Python ``Detokenizer`` class supports decoding and detokenizing prefixed
+Base64 messages with ``detokenize_base64`` and related methods.
+
+.. tip::
+   The Python detokenization tools support recursive detokenization for prefixed
+   Base64 text. Tokenized strings found in detokenized text are detokenized, so
+   prefixed Base64 messages can be passed as ``%s`` arguments.
+
+   For example, the tokenized string for "Wow!" is ``$RhYjmQ==``. This could be
+   passed as an argument to the printf-style string ``Nested message: %s``, which
+   encodes to ``$pEVTYQkkUmhZam1RPT0=``. The detokenizer would decode the message
+   as follows:
+
+   ::
+
+     "$pEVTYQkkUmhZam1RPT0=" → "Nested message: $RhYjmQ==" → "Nested message: Wow!"
+
+Base64 decoding is supported in C++ or C with the
+``pw::tokenizer::PrefixedBase64Decode`` or ``pw_tokenizer_PrefixedBase64Decode``
+functions.
+
+Investigating undecoded Base64 messages
+---------------------------------------
+Tokenized messages cannot be decoded if the token is not recognized. The Python
+package includes the ``parse_message`` tool, which parses tokenized Base64
+messages without looking up the token in a database. This tool attempts to guess
+the types of the arguments and displays potential ways to decode them.
+
+This tool can be used to extract argument information from an otherwise unusable
+message. It could help identify which statement in the code produced the
+message. This tool is not particularly helpful for tokenized messages without
+arguments, since all it can do is show the value of the unknown token.
+
+The tool is executed by passing Base64 tokenized messages, with or without the
+``$`` prefix, to ``pw_tokenizer.parse_message``. Pass ``-h`` or ``--help`` to
+see full usage information.
+
+Example
+^^^^^^^
+.. code-block::
+
+   $ python -m pw_tokenizer.parse_message '$329JMwA=' koSl524TRkFJTEVEX1BSRUNPTkRJVElPTgJPSw== --specs %s %d
+
+   INF Decoding arguments for '$329JMwA='
+   INF Binary: b'\xdfoI3\x00' [df 6f 49 33 00] (5 bytes)
+   INF Token:  0x33496fdf
+   INF Args:   b'\x00' [00] (1 bytes)
+   INF Decoding with up to 8 %s or %d arguments
+   INF   Attempt 1: [%s]
+   INF   Attempt 2: [%d] 0
+
+   INF Decoding arguments for '$koSl524TRkFJTEVEX1BSRUNPTkRJVElPTgJPSw=='
+   INF Binary: b'\x92\x84\xa5\xe7n\x13FAILED_PRECONDITION\x02OK' [92 84 a5 e7 6e 13 46 41 49 4c 45 44 5f 50 52 45 43 4f 4e 44 49 54 49 4f 4e 02 4f 4b] (28 bytes)
+   INF Token:  0xe7a58492
+   INF Args:   b'n\x13FAILED_PRECONDITION\x02OK' [6e 13 46 41 49 4c 45 44 5f 50 52 45 43 4f 4e 44 49 54 49 4f 4e 02 4f 4b] (24 bytes)
+   INF Decoding with up to 8 %s or %d arguments
+   INF   Attempt 1: [%d %s %d %d %d] 55 FAILED_PRECONDITION 1 -40 -38
+   INF   Attempt 2: [%d %s %s] 55 FAILED_PRECONDITION OK
+
+
+.. _module-pw_tokenizer-protobuf-tokenization-python:
+
+Detokenizing protobufs
+======================
+The :py:mod:`pw_tokenizer.proto` Python module defines functions that may be
+used to detokenize protobuf objects in Python. The function
+:py:func:`pw_tokenizer.proto.detokenize_fields` detokenizes all fields
+annotated as tokenized, replacing them with their detokenized version. For
+example:
+
+.. code-block:: python
+
+   my_detokenizer = pw_tokenizer.Detokenizer(some_database)
+
+   my_message = SomeMessage(tokenized_field=b'$YS1EMQ==')
+   pw_tokenizer.proto.detokenize_fields(my_detokenizer, my_message)
+
+   assert my_message.tokenized_field == b'The detokenized string! Cool!'
+
+Decoding optionally tokenized strings
+-------------------------------------
+The encoding used for an optionally tokenized field is not recorded in the
+protobuf. Despite this, the text can reliably be decoded. This is accomplished
+by attempting to decode the field as binary or Base64 tokenized data before
+treating it like plain text.
+
+The following diagram describes the decoding process for optionally tokenized
+fields in detail.
+
+.. mermaid::
+
+  flowchart TD
+     start([Received bytes]) --> binary
+
+     binary[Decode as<br>binary tokenized] --> binary_ok
+     binary_ok{Detokenizes<br>successfully?} -->|no| utf8
+     binary_ok -->|yes| done_binary([Display decoded binary])
+
+     utf8[Decode as UTF-8] --> utf8_ok
+     utf8_ok{Valid UTF-8?} -->|no| base64_encode
+     utf8_ok -->|yes| base64
+
+     base64_encode[Encode as<br>tokenized Base64] --> display
+     display([Display encoded Base64])
+
+     base64[Decode as<br>Base64 tokenized] --> base64_ok
+
+     base64_ok{Fully<br>or partially<br>detokenized?} -->|no| is_plain_text
+     base64_ok -->|yes| base64_results
+
+     is_plain_text{Text is<br>printable?} -->|no| base64_encode
+     is_plain_text-->|yes| plain_text
+
+     base64_results([Display decoded Base64])
+     plain_text([Display text])
+
+Potential decoding problems
+---------------------------
+The decoding process for optionally tokenized fields will yield correct results
+in almost every situation. In rare circumstances, it is possible for it to fail,
+but these can be avoided with a low-overhead mitigation if desired.
+
+There are two ways in which the decoding process may fail.
+
+Accidentally interpreting plain text as tokenized binary
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+If a plain-text string happens to decode as a binary tokenized message, the
+incorrect message could be displayed. This is very unlikely to occur. While many
+tokens will incidentally end up being valid UTF-8 strings, it is highly unlikely
+that a device will happen to log one of these strings as plain text. The
+overwhelming majority of these strings will be nonsense.
+
+If an implementation wishes to guard against this extremely improbable
+situation, it is possible to prevent it. This situation is prevented by
+appending 0xFF (or another byte never valid in UTF-8) to binary tokenized data
+that happens to be valid UTF-8 (or all binary tokenized messages, if desired).
+When decoding, if there is an extra 0xFF byte, it is discarded.
+
+Displaying undecoded binary as plain text instead of Base64
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+If a message fails to decode as binary tokenized and it is not valid UTF-8, it
+is displayed as tokenized Base64. This makes it easily recognizable as a
+tokenized message and makes it simple to decode later from the text output (for
+example, with an updated token database).
+
+A binary message for which the token is not known may coincidentally be valid
+UTF-8 or ASCII. 6.25% of 4-byte sequences are composed only of ASCII characters
+When decoding with an out-of-date token database, it is possible that some
+binary tokenized messages will be displayed as plain text rather than tokenized
+Base64.
+
+This situation is likely to occur, but should be infrequent. Even if it does
+happen, it is not a serious issue. A very small number of strings will be
+displayed incorrectly, but these strings cannot be decoded anyway. One nonsense
+string (e.g. ``a-D1``) would be displayed instead of another (``$YS1EMQ==``).
+Updating the token database would resolve the issue, though the non-Base64 logs
+would be difficult decode later from a log file.
+
+This situation can be avoided with the same approach described in
+`Accidentally interpreting plain text as tokenized binary`_. Appending
+an invalid UTF-8 character prevents the undecoded binary message from being
+interpreted as plain text.
+
+---------------------
+Detokenization in C++
+---------------------
+The C++ detokenization libraries can be used in C++ or any language that can
+call into C++ with a C-linkage wrapper, such as Java or Rust. A reference
+Java Native Interface (JNI) implementation is provided.
+
+The C++ detokenization library uses binary-format token databases (created with
+``database.py create --type binary``). Read a binary format database from a
+file or include it in the source code. Pass the database array to
+``TokenDatabase::Create``, and construct a detokenizer.
+
+.. code-block:: cpp
+
+   Detokenizer detokenizer(TokenDatabase::Create(token_database_array));
+
+   std::string ProcessLog(span<uint8_t> log_data) {
+     return detokenizer.Detokenize(log_data).BestString();
+   }
+
+The ``TokenDatabase`` class verifies that its data is valid before using it. If
+it is invalid, the ``TokenDatabase::Create`` returns an empty database for which
+``ok()`` returns false. If the token database is included in the source code,
+this check can be done at compile time.
+
+.. code-block:: cpp
+
+   // This line fails to compile with a static_assert if the database is invalid.
+   constexpr TokenDatabase kDefaultDatabase =  TokenDatabase::Create<kData>();
+
+   Detokenizer OpenDatabase(std::string_view path) {
+     std::vector<uint8_t> data = ReadWholeFile(path);
+
+     TokenDatabase database = TokenDatabase::Create(data);
+
+     // This checks if the file contained a valid database. It is safe to use a
+     // TokenDatabase that failed to load (it will be empty), but it may be
+     // desirable to provide a default database or otherwise handle the error.
+     if (database.ok()) {
+       return Detokenizer(database);
+     }
+     return Detokenizer(kDefaultDatabase);
+   }
+
+----------------------------
+Detokenization in TypeScript
+----------------------------
+To detokenize in TypeScript, import ``Detokenizer`` from the ``pigweedjs``
+package, and instantiate it with a CSV token database.
+
+.. code-block:: typescript
+
+   import { pw_tokenizer, pw_hdlc } from 'pigweedjs';
+   const { Detokenizer } = pw_tokenizer;
+   const { Frame } = pw_hdlc;
+
+   const detokenizer = new Detokenizer(String(tokenCsv));
+
+   function processLog(frame: Frame){
+     const result = detokenizer.detokenize(frame);
+     console.log(result);
+   }
+
+For messages that are encoded in Base64, use ``Detokenizer::detokenizeBase64``.
+`detokenizeBase64` will also attempt to detokenize nested Base64 tokens. There
+is also `detokenizeUint8Array` that works just like `detokenize` but expects
+`Uint8Array` instead of a `Frame` argument.
+
+
+
+.. _module-pw_tokenizer-cli-detokenizing:
+
+---------------------
+Detokenizing CLI tool
+---------------------
+``pw_tokenizer`` provides two standalone command line utilities for detokenizing
+Base64-encoded tokenized strings.
+
+* ``detokenize.py`` -- Detokenizes Base64-encoded strings in files or from
+  stdin.
+* ``serial_detokenizer.py`` -- Detokenizes Base64-encoded strings from a
+  connected serial device.
+
+If the ``pw_tokenizer`` Python package is installed, these tools may be executed
+as runnable modules. For example:
+
+.. code-block::
+
+   # Detokenize Base64-encoded strings in a file
+   python -m pw_tokenizer.detokenize -i input_file.txt
+
+   # Detokenize Base64-encoded strings in output from a serial device
+   python -m pw_tokenizer.serial_detokenizer --device /dev/ttyACM0
+
+See the ``--help`` options for these tools for full usage information.
+
+--------
+Appendix
+--------
+
+.. _module-pw_tokenizer-python-detokenization-c99-printf-notes:
+
+Python detokenization: C99 ``printf`` compatibility notes
+=========================================================
+This implementation is designed to align with the
+`C99 specification, section 7.19.6
+<https://www.dii.uchile.cl/~daespino/files/Iso_C_1999_definition.pdf>`_.
+Notably, this specification is slightly different than what is implemented
+in most compilers due to each compiler choosing to interpret undefined
+behavior in slightly different ways. Treat the following description as the
+source of truth.
+
+This implementation supports:
+
+- Overall Format: ``%[flags][width][.precision][length][specifier]``
+- Flags (Zero or More)
+   - ``-``: Left-justify within the given field width; Right justification is
+     the default (see Width modifier).
+   - ``+``: Forces to preceed the result with a plus or minus sign (``+`` or
+     ``-``) even for positive numbers. By default, only negative numbers are
+     preceded with a ``-`` sign.
+   - (space): If no sign is going to be written, a blank space is inserted
+     before the value.
+   - ``#``: Specifies an alternative print syntax should be used.
+      - Used with ``o``, ``x`` or ``X`` specifiers the value is preceeded with
+        ``0``, ``0x`` or ``0X``, respectively, for values different than zero.
+      - Used with ``a``, ``A``, ``e``, ``E``, ``f``, ``F``, ``g``, or ``G`` it
+        forces the written output to contain a decimal point even if no more
+        digits follow. By default, if no digits follow, no decimal point is
+        written.
+   - ``0``: Left-pads the number with zeroes (``0``) instead of spaces when
+     padding is specified (see width sub-specifier).
+- Width (Optional)
+   - ``(number)``: Minimum number of characters to be printed. If the value to
+     be printed is shorter than this number, the result is padded with blank
+     spaces or ``0`` if the ``0`` flag is present. The value is not truncated
+     even if the result is larger. If the value is negative and the ``0`` flag
+     is present, the ``0``\s are padded after the ``-`` symbol.
+   - ``*``: The width is not specified in the format string, but as an
+     additional integer value argument preceding the argument that has to be
+     formatted.
+- Precision (Optional)
+   - ``.(number)``
+      - For ``d``, ``i``, ``o``, ``u``, ``x``, ``X``, specifies the minimum
+        number of digits to be written. If the value to be written is shorter
+        than this number, the result is padded with leading zeros. The value is
+        not truncated even if the result is longer.
+
+        - A precision of ``0`` means that no character is written for the value
+          ``0``.
+
+      - For ``a``, ``A``, ``e``, ``E``, ``f``, and ``F``, specifies the number
+        of digits to be printed after the decimal point. By default, this is
+        ``6``.
+
+      - For ``g`` and ``G``, specifies the maximum number of significant digits
+        to be printed.
+
+      - For ``s``, specifies the maximum number of characters to be printed. By
+        default all characters are printed until the ending null character is
+        encountered.
+
+      - If the period is specified without an explicit value for precision,
+        ``0`` is assumed.
+   - ``.*``: The precision is not specified in the format string, but as an
+     additional integer value argument preceding the argument that has to be
+     formatted.
+- Length (Optional)
+   - ``hh``: Usable with ``d``, ``i``, ``o``, ``u``, ``x``, or ``X`` specifiers
+     to convey the argument will be a ``signed char`` or ``unsigned char``.
+     However, this is largely ignored in the implementation due to it not being
+     necessary for Python or argument decoding (since the argument is always
+     encoded at least as a 32-bit integer).
+   - ``h``: Usable with ``d``, ``i``, ``o``, ``u``, ``x``, or ``X`` specifiers
+     to convey the argument will be a ``signed short int`` or
+     ``unsigned short int``. However, this is largely ignored in the
+     implementation due to it not being necessary for Python or argument
+     decoding (since the argument is always encoded at least as a 32-bit
+     integer).
+   - ``l``: Usable with ``d``, ``i``, ``o``, ``u``, ``x``, or ``X`` specifiers
+     to convey the argument will be a ``signed long int`` or
+     ``unsigned long int``. Also is usable with ``c`` and ``s`` to specify that
+     the arguments will be encoded with ``wchar_t`` values (which isn't
+     different from normal ``char`` values). However, this is largely ignored in
+     the implementation due to it not being necessary for Python or argument
+     decoding (since the argument is always encoded at least as a 32-bit
+     integer).
+   - ``ll``: Usable with ``d``, ``i``, ``o``, ``u``, ``x``, or ``X`` specifiers
+     to convey the argument will be a ``signed long long int`` or
+     ``unsigned long long int``. This is required to properly decode the
+     argument as a 64-bit integer.
+   - ``L``: Usable with ``a``, ``A``, ``e``, ``E``, ``f``, ``F``, ``g``, or
+     ``G`` conversion specifiers applies to a long double argument. However,
+     this is ignored in the implementation due to floating point value encoded
+     that is unaffected by bit width.
+   - ``j``: Usable with ``d``, ``i``, ``o``, ``u``, ``x``, or ``X`` specifiers
+     to convey the argument will be a ``intmax_t`` or ``uintmax_t``.
+   - ``z``: Usable with ``d``, ``i``, ``o``, ``u``, ``x``, or ``X`` specifiers
+     to convey the argument will be a ``size_t``. This will force the argument
+     to be decoded as an unsigned integer.
+   - ``t``: Usable with ``d``, ``i``, ``o``, ``u``, ``x``, or ``X`` specifiers
+     to convey the argument will be a ``ptrdiff_t``.
+   - If a length modifier is provided for an incorrect specifier, it is ignored.
+- Specifier (Required)
+   - ``d`` / ``i``: Used for signed decimal integers.
+
+   - ``u``: Used for unsigned decimal integers.
+
+   - ``o``: Used for unsigned decimal integers and specifies formatting should
+     be as an octal number.
+
+   - ``x``: Used for unsigned decimal integers and specifies formatting should
+     be as a hexadecimal number using all lowercase letters.
+
+   - ``X``: Used for unsigned decimal integers and specifies formatting should
+     be as a hexadecimal number using all uppercase letters.
+
+   - ``f``: Used for floating-point values and specifies to use lowercase,
+     decimal floating point formatting.
+
+     - Default precision is ``6`` decimal places unless explicitly specified.
+
+   - ``F``: Used for floating-point values and specifies to use uppercase,
+     decimal floating point formatting.
+
+     - Default precision is ``6`` decimal places unless explicitly specified.
+
+   - ``e``: Used for floating-point values and specifies to use lowercase,
+     exponential (scientific) formatting.
+
+     - Default precision is ``6`` decimal places unless explicitly specified.
+
+   - ``E``: Used for floating-point values and specifies to use uppercase,
+     exponential (scientific) formatting.
+
+     - Default precision is ``6`` decimal places unless explicitly specified.
+
+   - ``g``: Used for floating-point values and specified to use ``f`` or ``e``
+     formatting depending on which would be the shortest representation.
+
+     - Precision specifies the number of significant digits, not just digits
+       after the decimal place.
+
+     - If the precision is specified as ``0``, it is interpreted to mean ``1``.
+
+     - ``e`` formatting is used if the the exponent would be less than ``-4`` or
+       is greater than or equal to the precision.
+
+     - Trailing zeros are removed unless the ``#`` flag is set.
+
+     - A decimal point only appears if it is followed by a digit.
+
+     - ``NaN`` or infinities always follow ``f`` formatting.
+
+   - ``G``: Used for floating-point values and specified to use ``f`` or ``e``
+     formatting depending on which would be the shortest representation.
+
+     - Precision specifies the number of significant digits, not just digits
+       after the decimal place.
+
+     - If the precision is specified as ``0``, it is interpreted to mean ``1``.
+
+     - ``E`` formatting is used if the the exponent would be less than ``-4`` or
+       is greater than or equal to the precision.
+
+     - Trailing zeros are removed unless the ``#`` flag is set.
+
+     - A decimal point only appears if it is followed by a digit.
+
+     - ``NaN`` or infinities always follow ``F`` formatting.
+
+   - ``c``: Used for formatting a ``char`` value.
+
+   - ``s``: Used for formatting a string of ``char`` values.
+
+     - If width is specified, the null terminator character is included as a
+       character for width count.
+
+     - If precision is specified, no more ``char``\s than that value will be
+       written from the string (padding is used to fill additional width).
+
+   - ``p``: Used for formatting a pointer address.
+
+   - ``%``: Prints a single ``%``. Only valid as ``%%`` (supports no flags,
+     width, precision, or length modifiers).
+
+Underspecified details:
+
+- If both ``+`` and (space) flags appear, the (space) is ignored.
+- The ``+`` and (space) flags will error if used with ``c`` or ``s``.
+- The ``#`` flag will error if used with ``d``, ``i``, ``u``, ``c``, ``s``, or
+  ``p``.
+- The ``0`` flag will error if used with ``c``, ``s``, or ``p``.
+- Both ``+`` and (space) can work with the unsigned integer specifiers ``u``,
+  ``o``, ``x``, and ``X``.
+- If a length modifier is provided for an incorrect specifier, it is ignored.
+- The ``z`` length modifier will decode arugments as signed as long as ``d`` or
+  ``i`` is used.
+- ``p`` is implementation defined.
+
+  - For this implementation, it will print with a ``0x`` prefix and then the
+    pointer value was printed using ``%08X``.
+
+  - ``p`` supports the ``+``, ``-``, and (space) flags, but not the ``#`` or
+    ``0`` flags.
+
+  - None of the length modifiers are usable with ``p``.
+
+  - This implementation will try to adhere to user-specified width (assuming the
+    width provided is larger than the guaranteed minimum of ``10``).
+
+  - Specifying precision for ``p`` is considered an error.
+- Only ``%%`` is allowed with no other modifiers. Things like ``%+%`` will fail
+  to decode. Some C stdlib implementations support any modifiers being
+  present between ``%``, but ignore any for the output.
+- If a width is specified with the ``0`` flag for a negative value, the padded
+  ``0``\s will appear after the ``-`` symbol.
+- A precision of ``0`` for ``d``, ``i``, ``u``, ``o``, ``x``, or ``X`` means
+  that no character is written for the value ``0``.
+- Precision cannot be specified for ``c``.
+- Using ``*`` or fixed precision with the ``s`` specifier still requires the
+  string argument to be null-terminated. This is due to argument encoding
+  happening on the C/C++-side while the precision value is not read or
+  otherwise used until decoding happens in this Python code.
+
+Non-conformant details:
+
+- ``n`` specifier: We do not support the ``n`` specifier since it is impossible
+  for us to retroactively tell the original program how many characters have
+  been printed since this decoding happens a great deal of time after the
+  device sent it, usually on a separate processing device entirely.
diff --git a/pw_tokenizer/detokenize.cc b/pw_tokenizer/detokenize.cc
index 5e3262f..95fdd09 100644
--- a/pw_tokenizer/detokenize.cc
+++ b/pw_tokenizer/detokenize.cc
@@ -19,6 +19,10 @@
 
 #include "pw_bytes/bit.h"
 #include "pw_bytes/endian.h"
+#include "pw_log_tokenized/config.h"
+#include "pw_string/string.h"
+#include "pw_string/string_builder.h"
+#include "pw_tokenizer/base64.h"
 #include "pw_tokenizer/internal/decode.h"
 
 namespace pw::tokenizer {
@@ -126,4 +130,40 @@
                                      : encoded.subspan(sizeof(token)));
 }
 
+DetokenizedString Detokenizer::DetokenizeBase64Message(
+    const std::string_view& encoded) const {
+  std::array<std::byte, log_tokenized::kEncodingBufferSizeBytes> token;
+  size_t token_size = PrefixedBase64Decode(encoded, token);
+
+  return Detokenize(token.data(), token_size);
+}
+
+std::string Detokenizer::DetokenizeBase64(
+    const std::string_view& encoded) const {
+  std::string b64_buffer_;
+  std::string message;
+  tokenizer::DetokenizedString temp;
+
+  for (const auto& x : encoded) {
+    if (base64::IsValidChar(x)) {
+      b64_buffer_.push_back(x);
+    } else {
+      temp = DetokenizeBase64Message(b64_buffer_);
+      message += temp.BestString();
+      b64_buffer_.clear();
+      // Store prefix of next base64 message.
+      if (x == kBase64Prefix) {
+        b64_buffer_.push_back(x);
+      }
+    }
+  }
+
+  if (!b64_buffer_.empty()) {
+    temp = DetokenizeBase64Message(b64_buffer_);
+    message += temp.BestString();
+  }
+
+  return message;
+}
+
 }  // namespace pw::tokenizer
diff --git a/pw_tokenizer/detokenize_test.cc b/pw_tokenizer/detokenize_test.cc
index bb45f71..496ece4 100644
--- a/pw_tokenizer/detokenize_test.cc
+++ b/pw_tokenizer/detokenize_test.cc
@@ -26,6 +26,13 @@
 // Use a shorter name for the error string macro.
 #define ERR PW_TOKENIZER_ARG_DECODING_ERROR
 
+using Case = std::pair<std::string_view, std::string_view>;
+
+template <typename... Args>
+auto TestCases(Args... args) {
+  return std::array<Case, sizeof...(Args)>{args...};
+}
+
 // Use alignas to ensure that the data is properly aligned to be read from a
 // token database entry struct. This avoids unaligned memory reads.
 constexpr char kBasicData[] =
@@ -102,6 +109,20 @@
             ERR("unknown token fedcba98"));
 }
 
+TEST_F(Detokenize, Base64) {
+  for (auto [data, expected] : TestCases(
+           Case{"$AQAAAA=="sv, "One"},
+           Case{"$BQAAAA=="sv, "TWO"},
+           Case{"$/wAAAA=="sv, "333"},
+           Case{"$/+7u3Q=="sv, "FOUR"},
+           Case{"$/+7u3Q==$AQAAAA==$AQAAAA=="sv, "FOUROneOne"},
+           Case{"$AQAAAA==$BQAAAA==$/wAAAA==$/+7u3Q=="sv, "OneTWO333FOUR"},
+           Case{"$AQAAAA==\r\n$BQAAAA==\r\n$/wAAAA==\r\n$/+7u3Q==\r\n"sv,
+                "OneTWO333FOUR"})) {
+    EXPECT_EQ(detok_.DetokenizeBase64(data), expected);
+  }
+}
+
 constexpr char kDataWithArguments[] =
     "TOKENS\0\0"
     "\x09\x00\x00\x00"
@@ -126,14 +147,6 @@
     "%llu!";   // FF
 
 constexpr TokenDatabase kWithArgs = TokenDatabase::Create<kDataWithArguments>();
-
-using Case = std::pair<std::string_view, std::string_view>;
-
-template <typename... Args>
-auto TestCases(Args... args) {
-  return std::array<Case, sizeof...(Args)>{args...};
-}
-
 class DetokenizeWithArgs : public ::testing::Test {
  protected:
   DetokenizeWithArgs() : detok_(kWithArgs) {}
diff --git a/pw_tokenizer/docs.rst b/pw_tokenizer/docs.rst
index bc66c8a..2b90827 100644
--- a/pw_tokenizer/docs.rst
+++ b/pw_tokenizer/docs.rst
@@ -7,7 +7,7 @@
    :name: pw_tokenizer
    :tagline: Compress strings to shrink logs by +75%
    :status: stable
-   :languages: C11, C++14, Python, TypeScript
+   :languages: C11, C++14, Python, Rust, TypeScript
    :code-size-impact: 50% reduction in binary log size
 
 Logging is critical, but developers are often forced to choose between
@@ -28,38 +28,46 @@
 * **Reduce CPU usage** by replacing snprintf calls with simple tokenization code.
 * **Remove potentially sensitive log, assert, and other strings** from binaries.
 
-.. grid:: 2
+.. grid:: 1
 
-   .. grid-item-card:: :octicon:`zap` Get started & guides
-      :link: module-pw_tokenizer-guides
+   .. grid-item-card:: :octicon:`rocket` Get started
+      :link: module-pw_tokenizer-get-started
       :link-type: ref
       :class-item: sales-pitch-cta-primary
 
-      Learn how to integrate pw_tokenizer into your project and implement
-      common use cases.
+      Integrate pw_tokenizer into your project.
+
+.. grid:: 2
+
+   .. grid-item-card:: :octicon:`code-square` Tokenization
+      :link: module-pw_tokenizer-tokenization
+      :link-type: ref
+      :class-item: sales-pitch-cta-secondary
+
+      Convert strings and arguments to tokens.
+
+   .. grid-item-card:: :octicon:`code-square` Token databases
+      :link: module-pw_tokenizer-token-databases
+      :link-type: ref
+      :class-item: sales-pitch-cta-secondary
+
+      Store a mapping of tokens to the strings and arguments they represent.
+
+.. grid:: 2
+
+   .. grid-item-card:: :octicon:`code-square` Detokenization
+      :link: module-pw_tokenizer-detokenization
+      :link-type: ref
+      :class-item: sales-pitch-cta-secondary
+
+      Expand tokens back to the strings and arguments they represent.
 
    .. grid-item-card:: :octicon:`info` API reference
       :link: module-pw_tokenizer-api
       :link-type: ref
       :class-item: sales-pitch-cta-secondary
 
-      Get detailed reference information about the pw_tokenizer API.
-
-.. grid:: 2
-
-   .. grid-item-card:: :octicon:`info` CLI reference
-      :link: module-pw_tokenizer-cli
-      :link-type: ref
-      :class-item: sales-pitch-cta-secondary
-
-      Get usage information about pw_tokenizer command line utilities.
-
-   .. grid-item-card:: :octicon:`table` Design
-      :link: module-pw_tokenizer-design
-      :link-type: ref
-      :class-item: sales-pitch-cta-secondary
-
-      Read up on how pw_tokenizer is designed.
+      Detailed reference information about the pw_tokenizer API.
 
 
 .. _module-pw_tokenizer-tokenized-logging-example:
@@ -102,14 +110,15 @@
   ``"Battery Voltage: %d mV"``. The last 2 bytes are the value of ``voltage``
   converted to a varint using :ref:`module-pw_varint`.
 * The logs are converted back to the original, human-readable message
-  via the :ref:`Detokenization API <module-pw_tokenizer-detokenization-guides>`
-  and a :ref:`token database <module-pw_tokenizer-managing-token-databases>`.
+  via the :ref:`Detokenization API <module-pw_tokenizer-detokenization>`
+  and a :ref:`token database <module-pw_tokenizer-token-databases>`.
 
 .. toctree::
    :hidden:
    :maxdepth: 1
 
-   guides
-   api
-   cli
-   design
+   Get started <get_started>
+   tokenization
+   token_databases
+   detokenization
+   API reference <api>
diff --git a/pw_tokenizer/encode_args.cc b/pw_tokenizer/encode_args.cc
index 444afb7..9939c1c 100644
--- a/pw_tokenizer/encode_args.cc
+++ b/pw_tokenizer/encode_args.cc
@@ -20,6 +20,10 @@
 #include "pw_preprocessor/compiler.h"
 #include "pw_varint/varint.h"
 
+static_assert((PW_TOKENIZER_CFG_ARG_TYPES_SIZE_BYTES == 4) ||
+                  (PW_TOKENIZER_CFG_ARG_TYPES_SIZE_BYTES == 8),
+              "PW_TOKENIZER_CFG_ARG_TYPES_SIZE_BYTES must be 4 or 8");
+
 namespace pw {
 namespace tokenizer {
 namespace {
@@ -33,11 +37,12 @@
 };
 
 size_t EncodeInt(int value, const span<std::byte>& output) {
-  return varint::Encode(value, as_writable_bytes(output));
+  // Use the 64-bit function to avoid instantiating both 32-bit and 64-bit.
+  return pw_tokenizer_EncodeInt64(value, output.data(), output.size());
 }
 
 size_t EncodeInt64(int64_t value, const span<std::byte>& output) {
-  return varint::Encode(value, as_writable_bytes(output));
+  return pw_tokenizer_EncodeInt64(value, output.data(), output.size());
 }
 
 size_t EncodeFloat(float value, const span<std::byte>& output) {
diff --git a/pw_tokenizer/encode_args_test.cc b/pw_tokenizer/encode_args_test.cc
index 131f27b..3947ac3 100644
--- a/pw_tokenizer/encode_args_test.cc
+++ b/pw_tokenizer/encode_args_test.cc
@@ -38,5 +38,17 @@
     MinEncodingBufferSizeBytes<const char*, long long, int, short>() ==
     4 + 1 + 10 + 5 + 3);
 
+TEST(TokenizerCEncodingFunctions, EncodeInt) {
+  uint8_t buffer[5] = {};
+  EXPECT_EQ(pw_tokenizer_EncodeInt(-1, buffer, sizeof(buffer)), 1u);
+  EXPECT_EQ(buffer[0], 1);  // -1 encodes to 1 with ZigZag
+}
+
+TEST(TokenizerCEncodingFunctions, EncodeInt64) {
+  uint8_t buffer[5] = {};
+  EXPECT_EQ(pw_tokenizer_EncodeInt(1, buffer, sizeof(buffer)), 1u);
+  EXPECT_EQ(buffer[0], 2);  // 1 encodes to 2 with ZigZag
+}
+
 }  // namespace tokenizer
 }  // namespace pw
diff --git a/pw_tokenizer/get_started.rst b/pw_tokenizer/get_started.rst
new file mode 100644
index 0000000..bc6e14e
--- /dev/null
+++ b/pw_tokenizer/get_started.rst
@@ -0,0 +1,83 @@
+.. _module-pw_tokenizer-get-started:
+
+=============================
+Get started with pw_tokenizer
+=============================
+.. pigweed-module-subpage::
+   :name: pw_tokenizer
+   :tagline: Compress strings to shrink logs by +75%
+
+.. _module-pw_tokenizer-get-started-overview:
+
+--------
+Overview
+--------
+There are two sides to ``pw_tokenizer``, which we call tokenization and
+detokenization.
+
+* **Tokenization** converts string literals in the source code to binary tokens
+  at compile time. If the string has printf-style arguments, these are encoded
+  to compact binary form at runtime.
+* **Detokenization** converts tokenized strings back to the original
+  human-readable strings.
+
+Here's an overview of what happens when ``pw_tokenizer`` is used:
+
+1. During compilation, the ``pw_tokenizer`` module hashes string literals to
+   generate stable 32-bit tokens.
+2. The tokenization macro removes these strings by declaring them in an ELF
+   section that is excluded from the final binary.
+3. After compilation, strings are extracted from the ELF to build a database of
+   tokenized strings for use by the detokenizer. The ELF file may also be used
+   directly.
+4. During operation, the device encodes the string token and its arguments, if
+   any.
+5. The encoded tokenized strings are sent off-device or stored.
+6. Off-device, the detokenizer tools use the token database to decode the
+   strings to human-readable form.
+
+Integrating with Bazel / GN / CMake projects
+============================================
+Integrating ``pw_tokenizer`` requires a few steps beyond building the code. This
+section describes one way ``pw_tokenizer`` might be integrated with a project.
+These steps can be adapted as needed.
+
+#. Add ``pw_tokenizer`` to your build. Build files for GN, CMake, and Bazel are
+   provided. For Make or other build systems, add the files specified in the
+   BUILD.gn's ``pw_tokenizer`` target to the build.
+#. Use the tokenization macros in your code. See
+   :ref:`module-pw_tokenizer-tokenization`.
+#. Add the contents of ``pw_tokenizer_linker_sections.ld`` to your project's
+   linker script. In GN and CMake, this step is done automatically.
+#. Compile your code to produce an ELF file.
+#. Run ``database.py create`` on the ELF file to generate a CSV token
+   database. See :ref:`module-pw_tokenizer-managing-token-databases`.
+#. Commit the token database to your repository. See notes in
+   :ref:`module-pw_tokenizer-database-management`.
+#. Integrate a ``database.py add`` command to your build to automatically update
+   the committed token database. In GN, use the ``pw_tokenizer_database``
+   template to do this. See :ref:`module-pw_tokenizer-update-token-database`.
+#. Integrate ``detokenize.py`` or the C++ detokenization library with your tools
+   to decode tokenized logs. See :ref:`module-pw_tokenizer-detokenization`.
+
+Using with Zephyr
+=================
+When building ``pw_tokenizer`` with Zephyr, 3 Kconfigs can be used currently:
+
+* ``CONFIG_PIGWEED_TOKENIZER`` will automatically link ``pw_tokenizer`` as well
+  as any dependencies.
+* ``CONFIG_PIGWEED_TOKENIZER_BASE64`` will automatically link
+  ``pw_tokenizer.base64`` as well as any dependencies.
+* ``CONFIG_PIGWEED_DETOKENIZER`` will automatically link
+  ``pw_tokenizer.decoder`` as well as any dependencies.
+
+Once enabled, the tokenizer headers can be included like any Zephyr headers:
+
+.. code-block:: cpp
+
+   #include <pw_tokenizer/tokenize.h>
+
+.. note::
+  Zephyr handles the additional linker sections via
+  ``pw_tokenizer_zephyr.ld`` which is added to the end of the linker file
+  via a call to ``zephyr_linker_sources(SECTIONS ...)``.
diff --git a/pw_tokenizer/guides.rst b/pw_tokenizer/guides.rst
deleted file mode 100644
index 297d022..0000000
--- a/pw_tokenizer/guides.rst
+++ /dev/null
@@ -1,915 +0,0 @@
-.. _module-pw_tokenizer-guides:
-
-======
-Guides
-======
-.. pigweed-module-subpage::
-   :name: pw_tokenizer
-   :tagline: Compress strings to shrink logs by +75%
-
-.. _module-pw_tokenizer-get-started:
-
----------------
-Getting started
----------------
-There are two sides to ``pw_tokenizer``, which we call tokenization and
-detokenization.
-
-* **Tokenization** converts string literals in the source code to binary tokens
-  at compile time. If the string has printf-style arguments, these are encoded
-  to compact binary form at runtime.
-* **Detokenization** converts tokenized strings back to the original
-  human-readable strings.
-
-Here's an overview of what happens when ``pw_tokenizer`` is used:
-
-1. During compilation, the ``pw_tokenizer`` module hashes string literals to
-   generate stable 32-bit tokens.
-2. The tokenization macro removes these strings by declaring them in an ELF
-   section that is excluded from the final binary.
-3. After compilation, strings are extracted from the ELF to build a database of
-   tokenized strings for use by the detokenizer. The ELF file may also be used
-   directly.
-4. During operation, the device encodes the string token and its arguments, if
-   any.
-5. The encoded tokenized strings are sent off-device or stored.
-6. Off-device, the detokenizer tools use the token database to decode the
-   strings to human-readable form.
-
-Integrating with Bazel / GN / CMake projects
-============================================
-Integrating ``pw_tokenizer`` requires a few steps beyond building the code. This
-section describes one way ``pw_tokenizer`` might be integrated with a project.
-These steps can be adapted as needed.
-
-#. Add ``pw_tokenizer`` to your build. Build files for GN, CMake, and Bazel are
-   provided. For Make or other build systems, add the files specified in the
-   BUILD.gn's ``pw_tokenizer`` target to the build.
-#. Use the tokenization macros in your code. See
-   :ref:`module-pw_tokenizer-tokenization-guides`.
-#. Add the contents of ``pw_tokenizer_linker_sections.ld`` to your project's
-   linker script. In GN and CMake, this step is done automatically.
-#. Compile your code to produce an ELF file.
-#. Run ``database.py create`` on the ELF file to generate a CSV token
-   database. See :ref:`module-pw_tokenizer-managing-token-databases`.
-#. Commit the token database to your repository. See notes in
-   :ref:`module-pw_tokenizer-database-management`.
-#. Integrate a ``database.py add`` command to your build to automatically update
-   the committed token database. In GN, use the ``pw_tokenizer_database``
-   template to do this. See :ref:`module-pw_tokenizer-update-token-database`.
-#. Integrate ``detokenize.py`` or the C++ detokenization library with your tools
-   to decode tokenized logs. See :ref:`module-pw_tokenizer-detokenization`.
-
-Using with Zephyr
-=================
-When building ``pw_tokenizer`` with Zephyr, 3 Kconfigs can be used currently:
-
-* ``CONFIG_PIGWEED_TOKENIZER`` will automatically link ``pw_tokenizer`` as well
-  as any dependencies.
-* ``CONFIG_PIGWEED_TOKENIZER_BASE64`` will automatically link
-  ``pw_tokenizer.base64`` as well as any dependencies.
-* ``CONFIG_PIGWEED_DETOKENIZER`` will automatically link
-  ``pw_tokenizer.decoder`` as well as any dependencies.
-
-Once enabled, the tokenizer headers can be included like any Zephyr headers:
-
-.. code-block:: cpp
-
-   #include <pw_tokenizer/tokenize.h>
-
-.. note::
-  Zephyr handles the additional linker sections via
-  ``pw_tokenizer_zephyr.ld`` which is added to the end of the linker file
-  via a call to ``zephyr_linker_sources(SECTIONS ...)``.
-
-.. _module-pw_tokenizer-tokenization-guides:
-
-------------
-Tokenization
-------------
-Tokenization converts a string literal to a token. If it's a printf-style
-string, its arguments are encoded along with it. The results of tokenization can
-be sent off device or stored in place of a full string.
-
-.. Note: pw_tokenizer_Token is a C typedef so you would expect to reference it
-.. as :c:type:`pw_tokenizer_Token`. That doesn't work because it's defined in
-.. a header file that mixes C and C++.
-
-* :cpp:type:`pw_tokenizer_Token`
-
-To tokenize a string, include ``pw_tokenizer/tokenize.h`` and invoke one of the
-``PW_TOKENIZE_*`` macros.
-
-Tokenize string literals outside of expressions
-===============================================
-``pw_tokenizer`` provides macros for tokenizing string literals with no
-arguments:
-
-* :c:macro:`PW_TOKENIZE_STRING`
-* :c:macro:`PW_TOKENIZE_STRING_DOMAIN`
-* :c:macro:`PW_TOKENIZE_STRING_MASK`
-
-The tokenization macros above cannot be used inside other expressions.
-
-.. admonition:: **Yes**: Assign :c:macro:`PW_TOKENIZE_STRING` to a ``constexpr`` variable.
-  :class: checkmark
-
-  .. code:: cpp
-
-    constexpr uint32_t kGlobalToken = PW_TOKENIZE_STRING("Wowee Zowee!");
-
-    void Function() {
-      constexpr uint32_t local_token = PW_TOKENIZE_STRING("Wowee Zowee?");
-    }
-
-.. admonition:: **No**: Use :c:macro:`PW_TOKENIZE_STRING` in another expression.
-  :class: error
-
-  .. code:: cpp
-
-   void BadExample() {
-     ProcessToken(PW_TOKENIZE_STRING("This won't compile!"));
-   }
-
-  Use :c:macro:`PW_TOKENIZE_STRING_EXPR` instead.
-
-Tokenize inside expressions
-===========================
-An alternate set of macros are provided for use inside expressions. These make
-use of lambda functions, so while they can be used inside expressions, they
-require C++ and cannot be assigned to constexpr variables or be used with
-special function variables like ``__func__``.
-
-* :c:macro:`PW_TOKENIZE_STRING_EXPR`
-* :c:macro:`PW_TOKENIZE_STRING_DOMAIN_EXPR`
-* :c:macro:`PW_TOKENIZE_STRING_MASK_EXPR`
-
-.. admonition:: When to use these macros
-
-  Use :c:macro:`PW_TOKENIZE_STRING` and related macros to tokenize string
-  literals that do not need %-style arguments encoded.
-
-.. admonition:: **Yes**: Use :c:macro:`PW_TOKENIZE_STRING_EXPR` within other expressions.
-  :class: checkmark
-
-  .. code:: cpp
-
-    void GoodExample() {
-      ProcessToken(PW_TOKENIZE_STRING_EXPR("This will compile!"));
-    }
-
-.. admonition:: **No**: Assign :c:macro:`PW_TOKENIZE_STRING_EXPR` to a ``constexpr`` variable.
-  :class: error
-
-  .. code:: cpp
-
-     constexpr uint32_t wont_work = PW_TOKENIZE_STRING_EXPR("This won't compile!"));
-
-  Instead, use :c:macro:`PW_TOKENIZE_STRING` to assign to a ``constexpr`` variable.
-
-.. admonition:: **No**: Tokenize ``__func__`` in :c:macro:`PW_TOKENIZE_STRING_EXPR`.
-  :class: error
-
-  .. code:: cpp
-
-    void BadExample() {
-      // This compiles, but __func__ will not be the outer function's name, and
-      // there may be compiler warnings.
-      constexpr uint32_t wont_work = PW_TOKENIZE_STRING_EXPR(__func__);
-    }
-
-  Instead, use :c:macro:`PW_TOKENIZE_STRING` to tokenize ``__func__`` or similar macros.
-
-Tokenize a message with arguments to a buffer
-=============================================
-* :c:macro:`PW_TOKENIZE_TO_BUFFER`
-* :c:macro:`PW_TOKENIZE_TO_BUFFER_DOMAIN`
-* :c:macro:`PW_TOKENIZE_TO_BUFFER_MASK`
-
-.. admonition:: Why use this macro
-
-   - Encode a tokenized message for consumption within a function.
-   - Encode a tokenized message into an existing buffer.
-
-   Avoid using ``PW_TOKENIZE_TO_BUFFER`` in widely expanded macros, such as a
-   logging macro, because it will result in larger code size than passing the
-   tokenized data to a function.
-
-.. _module-pw_tokenizer-custom-macro:
-
-Tokenize a message with arguments in a custom macro
-===================================================
-Projects can leverage the tokenization machinery in whichever way best suits
-their needs. The most efficient way to use ``pw_tokenizer`` is to pass tokenized
-data to a global handler function. A project's custom tokenization macro can
-handle tokenized data in a function of their choosing.
-
-``pw_tokenizer`` provides two low-level macros for projects to use
-to create custom tokenization macros:
-
-* :c:macro:`PW_TOKENIZE_FORMAT_STRING`
-* :c:macro:`PW_TOKENIZER_ARG_TYPES`
-
-.. caution::
-
-   Note the spelling difference! The first macro begins with ``PW_TOKENIZE_``
-   (no ``R``) whereas the second begins with ``PW_TOKENIZER`` (``R`` included).
-
-The outputs of these macros are typically passed to an encoding function. That
-function encodes the token, argument types, and argument data to a buffer using
-helpers provided by ``pw_tokenizer/encode_args.h``:
-
-.. Note: pw_tokenizer_EncodeArgs is a C function so you would expect to
-.. reference it as :c:func:`pw_tokenizer_EncodeArgs`. That doesn't work because
-.. it's defined in a header file that mixes C and C++.
-
-* :cpp:func:`pw::tokenizer::EncodeArgs`
-* :cpp:class:`pw::tokenizer::EncodedMessage`
-* :cpp:func:`pw_tokenizer_EncodeArgs`
-
-Tokenizing function names
-=========================
-The string literal tokenization functions support tokenizing string literals or
-constexpr character arrays (``constexpr const char[]``). In GCC and Clang, the
-special ``__func__`` variable and ``__PRETTY_FUNCTION__`` extension are declared
-as ``static constexpr char[]`` in C++ instead of the standard ``static const
-char[]``. This means that ``__func__`` and ``__PRETTY_FUNCTION__`` can be
-tokenized while compiling C++ with GCC or Clang.
-
-.. code-block:: cpp
-
-   // Tokenize the special function name variables.
-   constexpr uint32_t function = PW_TOKENIZE_STRING(__func__);
-   constexpr uint32_t pretty_function = PW_TOKENIZE_STRING(__PRETTY_FUNCTION__);
-
-Note that ``__func__`` and ``__PRETTY_FUNCTION__`` are not string literals.
-They are defined as static character arrays, so they cannot be implicitly
-concatentated with string literals. For example, ``printf(__func__ ": %d",
-123);`` will not compile.
-
-Calculate minimum required buffer size
-======================================
-* :cpp:func:`pw::tokenizer::MinEncodingBufferSizeBytes`
-
-Tokenize a message with arguments in a custom macro
-===================================================
-Projects can leverage the tokenization machinery in whichever way best suits
-their needs. The most efficient way to use ``pw_tokenizer`` is to pass tokenized
-data to a global handler function. A project's custom tokenization macro can
-handle tokenized data in a function of their choosing.
-
-``pw_tokenizer`` provides two low-level macros for projects to use
-to create custom tokenization macros:
-
-* :c:macro:`PW_TOKENIZE_FORMAT_STRING`
-* :c:macro:`PW_TOKENIZER_ARG_TYPES`
-
-.. caution::
-
-   Note the spelling difference! The first macro begins with ``PW_TOKENIZE_``
-   (no ``R``) whereas the second begins with ``PW_TOKENIZER`` (``R`` included).
-
-The outputs of these macros are typically passed to an encoding function. That
-function encodes the token, argument types, and argument data to a buffer using
-helpers provided by ``pw_tokenizer/encode_args.h``:
-
-* :cpp:func:`pw::tokenizer::EncodeArgs`
-* :cpp:class:`pw::tokenizer::EncodedMessage`
-* :cpp:func:`pw_tokenizer_EncodeArgs`
-
-Example
--------
-
-The following example implements a custom tokenization macro similar to
-:ref:`module-pw_log_tokenized`.
-
-.. code-block:: cpp
-
-   #include "pw_tokenizer/tokenize.h"
-
-   #ifndef __cplusplus
-   extern "C" {
-   #endif
-
-   void EncodeTokenizedMessage(uint32_t metadata,
-                               pw_tokenizer_Token token,
-                               pw_tokenizer_ArgTypes types,
-                               ...);
-
-   #ifndef __cplusplus
-   }  // extern "C"
-   #endif
-
-   #define PW_LOG_TOKENIZED_ENCODE_MESSAGE(metadata, format, ...)         \
-     do {                                                                 \
-       PW_TOKENIZE_FORMAT_STRING(                                         \
-           PW_TOKENIZER_DEFAULT_DOMAIN, UINT32_MAX, format, __VA_ARGS__); \
-       EncodeTokenizedMessage(payload,                                    \
-                              _pw_tokenizer_token,                        \
-                              PW_TOKENIZER_ARG_TYPES(__VA_ARGS__)         \
-                                  PW_COMMA_ARGS(__VA_ARGS__));            \
-     } while (0)
-
-In this example, the ``EncodeTokenizedMessage`` function would handle encoding
-and processing the message. Encoding is done by the
-:cpp:class:`pw::tokenizer::EncodedMessage` class or
-:cpp:func:`pw::tokenizer::EncodeArgs` function from
-``pw_tokenizer/encode_args.h``. The encoded message can then be transmitted or
-stored as needed.
-
-.. code-block:: cpp
-
-   #include "pw_log_tokenized/log_tokenized.h"
-   #include "pw_tokenizer/encode_args.h"
-
-   void HandleTokenizedMessage(pw::log_tokenized::Metadata metadata,
-                               pw::span<std::byte> message);
-
-   extern "C" void EncodeTokenizedMessage(const uint32_t metadata,
-                                          const pw_tokenizer_Token token,
-                                          const pw_tokenizer_ArgTypes types,
-                                          ...) {
-     va_list args;
-     va_start(args, types);
-     pw::tokenizer::EncodedMessage<> encoded_message(token, types, args);
-     va_end(args);
-
-     HandleTokenizedMessage(metadata, encoded_message);
-   }
-
-.. admonition:: Why use a custom macro
-
-   - Optimal code size. Invoking a free function with the tokenized data results
-     in the smallest possible call site.
-   - Pass additional arguments, such as metadata, with the tokenized message.
-   - Integrate ``pw_tokenizer`` with other systems.
-
-.. _module-pw_tokenizer-masks:
-
-Smaller tokens with masking
-===========================
-``pw_tokenizer`` uses 32-bit tokens. On 32-bit or 64-bit architectures, using
-fewer than 32 bits does not improve runtime or code size efficiency. However,
-when tokens are packed into data structures or stored in arrays, the size of the
-token directly affects memory usage. In those cases, every bit counts, and it
-may be desireable to use fewer bits for the token.
-
-``pw_tokenizer`` allows users to provide a mask to apply to the token. This
-masked token is used in both the token database and the code. The masked token
-is not a masked version of the full 32-bit token, the masked token is the token.
-This makes it trivial to decode tokens that use fewer than 32 bits.
-
-Masking functionality is provided through the ``*_MASK`` versions of the macros:
-
-* :c:macro:`PW_TOKENIZE_STRING_MASK`
-* :c:macro:`PW_TOKENIZE_STRING_MASK_EXPR`
-* :c:macro:`PW_TOKENIZE_TO_BUFFER_MASK`
-
-For example, the following generates 16-bit tokens and packs them into an
-existing value.
-
-.. code-block:: cpp
-
-   constexpr uint32_t token = PW_TOKENIZE_STRING_MASK("domain", 0xFFFF, "Pigweed!");
-   uint32_t packed_word = (other_bits << 16) | token;
-
-Tokens are hashes, so tokens of any size have a collision risk. The fewer bits
-used for tokens, the more likely two strings are to hash to the same token. See
-:ref:`module-pw_tokenizer-collisions`.
-
-Masked tokens without arguments may be encoded in fewer bytes. For example, the
-16-bit token ``0x1234`` may be encoded as two little-endian bytes (``34 12``)
-rather than four (``34 12 00 00``). The detokenizer tools zero-pad data smaller
-than four bytes. Tokens with arguments must always be encoded as four bytes.
-
-.. _module-pw_tokenizer-base64-encoding:
-
-Encoding Base64
-===============
-See :ref:`module-pw_tokenizer-base64-format` for a conceptual overview of
-Base64.
-
-To encode with the Base64 format, add a call to
-``pw::tokenizer::PrefixedBase64Encode`` or ``pw_tokenizer_PrefixedBase64Encode``
-in the tokenizer handler function. For example,
-
-.. code-block:: cpp
-
-   void TokenizedMessageHandler(const uint8_t encoded_message[],
-                                size_t size_bytes) {
-     pw::InlineBasicString base64 = pw::tokenizer::PrefixedBase64Encode(
-         pw::span(encoded_message, size_bytes));
-
-     TransmitLogMessage(base64.data(), base64.size());
-   }
-
-Tokenization in Python
-======================
-The Python ``pw_tokenizer.encode`` module has limited support for encoding
-tokenized messages with the ``encode_token_and_args`` function.
-
-.. autofunction:: pw_tokenizer.encode.encode_token_and_args
-
-This function requires a string's token is already calculated. Typically these
-tokens are provided by a database, but they can be manually created using the
-tokenizer hash.
-
-.. autofunction:: pw_tokenizer.tokens.pw_tokenizer_65599_hash
-
-This is particularly useful for offline token database generation in cases where
-tokenized strings in a binary cannot be embedded as parsable pw_tokenizer
-entries.
-
-.. note::
-   In C, the hash length of a string has a fixed limit controlled by
-   ``PW_TOKENIZER_CFG_C_HASH_LENGTH``. To match tokens produced by C (as opposed
-   to C++) code, ``pw_tokenizer_65599_hash()`` should be called with a matching
-   hash length limit. When creating an offline database, it's a good idea to
-   generate tokens for both, and merge the databases.
-
-.. _module-pw_tokenizer-protobuf-tokenization-python:
-
-Protobuf detokenization library
--------------------------------
-The :py:mod:`pw_tokenizer.proto` Python module defines functions that may be
-used to detokenize protobuf objects in Python. The function
-:py:func:`pw_tokenizer.proto.detokenize_fields` detokenizes all fields
-annotated as tokenized, replacing them with their detokenized version. For
-example:
-
-.. code-block:: python
-
-   my_detokenizer = pw_tokenizer.Detokenizer(some_database)
-
-   my_message = SomeMessage(tokenized_field=b'$YS1EMQ==')
-   pw_tokenizer.proto.detokenize_fields(my_detokenizer, my_message)
-
-   assert my_message.tokenized_field == b'The detokenized string! Cool!'
-
-.. _module-pw_tokenizer-managing-token-databases:
-
----------------
-Token databases
----------------
-Background: :ref:`module-pw_tokenizer-token-databases`
-
-Token databases are managed with the ``database.py`` script. This script can be
-used to extract tokens from compilation artifacts and manage database files.
-Invoke ``database.py`` with ``-h`` for full usage information.
-
-An example ELF file with tokenized logs is provided at
-``pw_tokenizer/py/example_binary_with_tokenized_strings.elf``. You can use that
-file to experiment with the ``database.py`` commands.
-
-.. _module-pw_tokenizer-database-creation:
-
-Create a database
-=================
-The ``create`` command makes a new token database from ELF files (.elf, .o, .so,
-etc.), archives (.a), existing token databases (CSV or binary), or a JSON file
-containing an array of strings.
-
-.. code-block:: sh
-
-   ./database.py create --database DATABASE_NAME ELF_OR_DATABASE_FILE...
-
-Two database output formats are supported: CSV and binary. Provide
-``--type binary`` to ``create`` to generate a binary database instead of the
-default CSV. CSV databases are great for checking into a source control or for
-human review. Binary databases are more compact and simpler to parse. The C++
-detokenizer library only supports binary databases currently.
-
-.. _module-pw_tokenizer-update-token-database:
-
-Update a database
-=================
-As new tokenized strings are added, update the database with the ``add``
-command.
-
-.. code-block:: sh
-
-   ./database.py add --database DATABASE_NAME ELF_OR_DATABASE_FILE...
-
-This command adds new tokens from ELF files or other databases to the database.
-Adding tokens already present in the database updates the date removed, if any,
-to the latest.
-
-A CSV token database can be checked into a source repository and updated as code
-changes are made. The build system can invoke ``database.py`` to update the
-database after each build.
-
-GN integration
-==============
-Token databases may be updated or created as part of a GN build. The
-``pw_tokenizer_database`` template provided by
-``$dir_pw_tokenizer/database.gni`` automatically updates an in-source tokenized
-strings database or creates a new database with artifacts from one or more GN
-targets or other database files.
-
-To create a new database, set the ``create`` variable to the desired database
-type (``"csv"`` or ``"binary"``). The database will be created in the output
-directory. To update an existing database, provide the path to the database with
-the ``database`` variable.
-
-.. code-block::
-
-   import("//build_overrides/pigweed.gni")
-
-   import("$dir_pw_tokenizer/database.gni")
-
-   pw_tokenizer_database("my_database") {
-     database = "database_in_the_source_tree.csv"
-     targets = [ "//firmware/image:foo(//targets/my_board:some_toolchain)" ]
-     input_databases = [ "other_database.csv" ]
-   }
-
-Instead of specifying GN targets, paths or globs to output files may be provided
-with the ``paths`` option.
-
-.. code-block::
-
-   pw_tokenizer_database("my_database") {
-     database = "database_in_the_source_tree.csv"
-     deps = [ ":apps" ]
-     optional_paths = [ "$root_build_dir/**/*.elf" ]
-   }
-
-.. note::
-
-   The ``paths`` and ``optional_targets`` arguments do not add anything to
-   ``deps``, so there is no guarantee that the referenced artifacts will exist
-   when the database is updated. Provide ``targets`` or ``deps`` or build other
-   GN targets first if this is a concern.
-
-CMake integration
-=================
-Token databases may be updated or created as part of a CMake build. The
-``pw_tokenizer_database`` template provided by
-``$dir_pw_tokenizer/database.cmake`` automatically updates an in-source tokenized
-strings database or creates a new database with artifacts from a CMake target.
-
-To create a new database, set the ``CREATE`` variable to the desired database
-type (``"csv"`` or ``"binary"``). The database will be created in the output
-directory.
-
-.. code-block::
-
-   include("$dir_pw_tokenizer/database.cmake")
-
-   pw_tokenizer_database("my_database") {
-     CREATE binary
-     TARGET my_target.ext
-     DEPS ${deps_list}
-   }
-
-To update an existing database, provide the path to the database with
-the ``database`` variable.
-
-.. code-block::
-
-   pw_tokenizer_database("my_database") {
-     DATABASE database_in_the_source_tree.csv
-     TARGET my_target.ext
-     DEPS ${deps_list}
-   }
-
-.. _module-pw_tokenizer-detokenization-guides:
-
---------------
-Detokenization
---------------
-See :ref:`module-pw_tokenizer-detokenization` for a conceptual overview
-of detokenization.
-
-Detokenizing command line utilities
-===================================
-See :ref:`module-pw_tokenizer-cli-detokenizing`.
-
-Python
-======
-To detokenize in Python, import ``Detokenizer`` from the ``pw_tokenizer``
-package, and instantiate it with paths to token databases or ELF files.
-
-.. code-block:: python
-
-   import pw_tokenizer
-
-   detokenizer = pw_tokenizer.Detokenizer('path/to/database.csv', 'other/path.elf')
-
-   def process_log_message(log_message):
-       result = detokenizer.detokenize(log_message.payload)
-       self._log(str(result))
-
-The ``pw_tokenizer`` package also provides the ``AutoUpdatingDetokenizer``
-class, which can be used in place of the standard ``Detokenizer``. This class
-monitors database files for changes and automatically reloads them when they
-change. This is helpful for long-running tools that use detokenization. The
-class also supports token domains for the given database files in the
-``<path>#<domain>`` format.
-
-For messages that are optionally tokenized and may be encoded as binary,
-Base64, or plaintext UTF-8, use
-:func:`pw_tokenizer.proto.decode_optionally_tokenized`. This will attempt to
-determine the correct method to detokenize and always provide a printable
-string. For more information on this feature, see
-:ref:`module-pw_tokenizer-proto`.
-
-C++
-===
-The C++ detokenization libraries can be used in C++ or any language that can
-call into C++ with a C-linkage wrapper, such as Java or Rust. A reference
-Java Native Interface (JNI) implementation is provided.
-
-The C++ detokenization library uses binary-format token databases (created with
-``database.py create --type binary``). Read a binary format database from a
-file or include it in the source code. Pass the database array to
-``TokenDatabase::Create``, and construct a detokenizer.
-
-.. code-block:: cpp
-
-   Detokenizer detokenizer(TokenDatabase::Create(token_database_array));
-
-   std::string ProcessLog(span<uint8_t> log_data) {
-     return detokenizer.Detokenize(log_data).BestString();
-   }
-
-The ``TokenDatabase`` class verifies that its data is valid before using it. If
-it is invalid, the ``TokenDatabase::Create`` returns an empty database for which
-``ok()`` returns false. If the token database is included in the source code,
-this check can be done at compile time.
-
-.. code-block:: cpp
-
-   // This line fails to compile with a static_assert if the database is invalid.
-   constexpr TokenDatabase kDefaultDatabase =  TokenDatabase::Create<kData>();
-
-   Detokenizer OpenDatabase(std::string_view path) {
-     std::vector<uint8_t> data = ReadWholeFile(path);
-
-     TokenDatabase database = TokenDatabase::Create(data);
-
-     // This checks if the file contained a valid database. It is safe to use a
-     // TokenDatabase that failed to load (it will be empty), but it may be
-     // desirable to provide a default database or otherwise handle the error.
-     if (database.ok()) {
-       return Detokenizer(database);
-     }
-     return Detokenizer(kDefaultDatabase);
-   }
-
-TypeScript
-==========
-To detokenize in TypeScript, import ``Detokenizer`` from the ``pigweedjs``
-package, and instantiate it with a CSV token database.
-
-.. code-block:: typescript
-
-   import { pw_tokenizer, pw_hdlc } from 'pigweedjs';
-   const { Detokenizer } = pw_tokenizer;
-   const { Frame } = pw_hdlc;
-
-   const detokenizer = new Detokenizer(String(tokenCsv));
-
-   function processLog(frame: Frame){
-     const result = detokenizer.detokenize(frame);
-     console.log(result);
-   }
-
-For messages that are encoded in Base64, use ``Detokenizer::detokenizeBase64``.
-`detokenizeBase64` will also attempt to detokenize nested Base64 tokens. There
-is also `detokenizeUint8Array` that works just like `detokenize` but expects
-`Uint8Array` instead of a `Frame` argument.
-
-Protocol buffers
-================
-``pw_tokenizer`` provides utilities for handling tokenized fields in protobufs.
-See :ref:`module-pw_tokenizer-proto` for details.
-
-.. _module-pw_tokenizer-base64-decoding:
-
-Decoding Base64
-===============
-The Python ``Detokenizer`` class supports decoding and detokenizing prefixed
-Base64 messages with ``detokenize_base64`` and related methods.
-
-.. tip::
-   The Python detokenization tools support recursive detokenization for prefixed
-   Base64 text. Tokenized strings found in detokenized text are detokenized, so
-   prefixed Base64 messages can be passed as ``%s`` arguments.
-
-   For example, the tokenized string for "Wow!" is ``$RhYjmQ==``. This could be
-   passed as an argument to the printf-style string ``Nested message: %s``, which
-   encodes to ``$pEVTYQkkUmhZam1RPT0=``. The detokenizer would decode the message
-   as follows:
-
-   ::
-
-     "$pEVTYQkkUmhZam1RPT0=" → "Nested message: $RhYjmQ==" → "Nested message: Wow!"
-
-Base64 decoding is supported in C++ or C with the
-``pw::tokenizer::PrefixedBase64Decode`` or ``pw_tokenizer_PrefixedBase64Decode``
-functions.
-
-Investigating undecoded Base64 messages
----------------------------------------
-Tokenized messages cannot be decoded if the token is not recognized. The Python
-package includes the ``parse_message`` tool, which parses tokenized Base64
-messages without looking up the token in a database. This tool attempts to guess
-the types of the arguments and displays potential ways to decode them.
-
-This tool can be used to extract argument information from an otherwise unusable
-message. It could help identify which statement in the code produced the
-message. This tool is not particularly helpful for tokenized messages without
-arguments, since all it can do is show the value of the unknown token.
-
-The tool is executed by passing Base64 tokenized messages, with or without the
-``$`` prefix, to ``pw_tokenizer.parse_message``. Pass ``-h`` or ``--help`` to
-see full usage information.
-
-Example
-^^^^^^^
-.. code-block::
-
-   $ python -m pw_tokenizer.parse_message '$329JMwA=' koSl524TRkFJTEVEX1BSRUNPTkRJVElPTgJPSw== --specs %s %d
-
-   INF Decoding arguments for '$329JMwA='
-   INF Binary: b'\xdfoI3\x00' [df 6f 49 33 00] (5 bytes)
-   INF Token:  0x33496fdf
-   INF Args:   b'\x00' [00] (1 bytes)
-   INF Decoding with up to 8 %s or %d arguments
-   INF   Attempt 1: [%s]
-   INF   Attempt 2: [%d] 0
-
-   INF Decoding arguments for '$koSl524TRkFJTEVEX1BSRUNPTkRJVElPTgJPSw=='
-   INF Binary: b'\x92\x84\xa5\xe7n\x13FAILED_PRECONDITION\x02OK' [92 84 a5 e7 6e 13 46 41 49 4c 45 44 5f 50 52 45 43 4f 4e 44 49 54 49 4f 4e 02 4f 4b] (28 bytes)
-   INF Token:  0xe7a58492
-   INF Args:   b'n\x13FAILED_PRECONDITION\x02OK' [6e 13 46 41 49 4c 45 44 5f 50 52 45 43 4f 4e 44 49 54 49 4f 4e 02 4f 4b] (24 bytes)
-   INF Decoding with up to 8 %s or %d arguments
-   INF   Attempt 1: [%d %s %d %d %d] 55 FAILED_PRECONDITION 1 -40 -38
-   INF   Attempt 2: [%d %s %s] 55 FAILED_PRECONDITION OK
-
-.. _module-pw_tokenizer-collisions-guide:
-
-----------------
-Token collisions
-----------------
-See :ref:`module-pw_tokenizer-collisions` for a conceptual overview of token
-collisions.
-
-Collisions may occur occasionally. Run the command
-``python -m pw_tokenizer.database report <database>`` to see information about a
-token database, including any collisions.
-
-If there are collisions, take the following steps to resolve them.
-
-- Change one of the colliding strings slightly to give it a new token.
-- In C (not C++), artificial collisions may occur if strings longer than
-  ``PW_TOKENIZER_CFG_C_HASH_LENGTH`` are hashed. If this is happening, consider
-  setting ``PW_TOKENIZER_CFG_C_HASH_LENGTH`` to a larger value.  See
-  ``pw_tokenizer/public/pw_tokenizer/config.h``.
-- Run the ``mark_removed`` command with the latest version of the build
-  artifacts to mark missing strings as removed. This deprioritizes them in
-  collision resolution.
-
-  .. code-block:: sh
-
-     python -m pw_tokenizer.database mark_removed --database <database> <ELF files>
-
-  The ``purge`` command may be used to delete these tokens from the database.
-
-.. _module-pw_tokenizer-domains:
-
---------------------
-Tokenization domains
---------------------
-``pw_tokenizer`` supports having multiple tokenization domains. Domains are a
-string label associated with each tokenized string. This allows projects to keep
-tokens from different sources separate. Potential use cases include the
-following:
-
-* Keep large sets of tokenized strings separate to avoid collisions.
-* Create a separate database for a small number of strings that use truncated
-  tokens, for example only 10 or 16 bits instead of the full 32 bits.
-
-If no domain is specified, the domain is empty (``""``). For many projects, this
-default domain is sufficient, so no additional configuration is required.
-
-.. code-block:: cpp
-
-   // Tokenizes this string to the default ("") domain.
-   PW_TOKENIZE_STRING("Hello, world!");
-
-   // Tokenizes this string to the "my_custom_domain" domain.
-   PW_TOKENIZE_STRING_DOMAIN("my_custom_domain", "Hello, world!");
-
-The database and detokenization command line tools default to reading from the
-default domain. The domain may be specified for ELF files by appending
-``#DOMAIN_NAME`` to the file path. Use ``#.*`` to read from all domains. For
-example, the following reads strings in ``some_domain`` from ``my_image.elf``.
-
-.. code-block:: sh
-
-   ./database.py create --database my_db.csv path/to/my_image.elf#some_domain
-
-See :ref:`module-pw_tokenizer-managing-token-databases` for information about
-the ``database.py`` command line tool.
-
-----------
-Case study
-----------
-.. note:: This section discusses the implementation, results, and lessons
-   learned from a real-world deployment of ``pw_tokenizer``.
-
-The tokenizer module was developed to bring tokenized logging to an
-in-development product. The product already had an established text-based
-logging system. Deploying tokenization was straightforward and had substantial
-benefits.
-
-Results
-=======
-* Log contents shrunk by over 50%, even with Base64 encoding.
-
-  * Significant size savings for encoded logs, even using the less-efficient
-    Base64 encoding required for compatibility with the existing log system.
-  * Freed valuable communication bandwidth.
-  * Allowed storing many more logs in crash dumps.
-
-* Substantial flash savings.
-
-  * Reduced the size firmware images by up to 18%.
-
-* Simpler logging code.
-
-  * Removed CPU-heavy ``snprintf`` calls.
-  * Removed complex code for forwarding log arguments to a low-priority task.
-
-This section describes the tokenizer deployment process and highlights key
-insights.
-
-Firmware deployment
-===================
-* In the project's logging macro, calls to the underlying logging function were
-  replaced with a tokenized log macro invocation.
-* The log level was passed as the payload argument to facilitate runtime log
-  level control.
-* For this project, it was necessary to encode the log messages as text. In
-  the handler function the log messages were encoded in the $-prefixed
-  :ref:`module-pw_tokenizer-base64-format`, then dispatched as normal log messages.
-* Asserts were tokenized a callback-based API that has been removed (a
-  :ref:`custom macro <module-pw_tokenizer-custom-macro>` is a better
-  alternative).
-
-.. attention::
-  Do not encode line numbers in tokenized strings. This results in a huge
-  number of lines being added to the database, since every time code moves,
-  new strings are tokenized. If :ref:`module-pw_log_tokenized` is used, line
-  numbers are encoded in the log metadata. Line numbers may also be included by
-  by adding ``"%d"`` to the format string and passing ``__LINE__``.
-
-.. _module-pw_tokenizer-database-management:
-
-Database management
-===================
-* The token database was stored as a CSV file in the project's Git repo.
-* The token database was automatically updated as part of the build, and
-  developers were expected to check in the database changes alongside their code
-  changes.
-* A presubmit check verified that all strings added by a change were added to
-  the token database.
-* The token database included logs and asserts for all firmware images in the
-  project.
-* No strings were purged from the token database.
-
-.. tip::
-   Merge conflicts may be a frequent occurrence with an in-source CSV database.
-   Use the :ref:`module-pw_tokenizer-directory-database-format` instead.
-
-Decoding tooling deployment
-===========================
-* The Python detokenizer in ``pw_tokenizer`` was deployed to two places:
-
-  * Product-specific Python command line tools, using
-    ``pw_tokenizer.Detokenizer``.
-  * Standalone script for decoding prefixed Base64 tokens in files or
-    live output (e.g. from ``adb``), using ``detokenize.py``'s command line
-    interface.
-
-* The C++ detokenizer library was deployed to two Android apps with a Java
-  Native Interface (JNI) layer.
-
-  * The binary token database was included as a raw resource in the APK.
-  * In one app, the built-in token database could be overridden by copying a
-    file to the phone.
-
-.. tip::
-   Make the tokenized logging tools simple to use for your project.
-
-   * Provide simple wrapper shell scripts that fill in arguments for the
-     project. For example, point ``detokenize.py`` to the project's token
-     databases.
-   * Use ``pw_tokenizer.AutoUpdatingDetokenizer`` to decode in
-     continuously-running tools, so that users don't have to restart the tool
-     when the token database updates.
-   * Integrate detokenization everywhere it is needed. Integrating the tools
-     takes just a few lines of code, and token databases can be embedded in APKs
-     or binaries.
diff --git a/pw_tokenizer/public/pw_tokenizer/base64.h b/pw_tokenizer/public/pw_tokenizer/base64.h
index 36acaf2..5cca014 100644
--- a/pw_tokenizer/public/pw_tokenizer/base64.h
+++ b/pw_tokenizer/public/pw_tokenizer/base64.h
@@ -91,11 +91,6 @@
   return Base64EncodedStringSize(message_size) + sizeof('\0');
 }
 
-// The minimum buffer size that can hold a tokenized message that is
-// PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES long encoded as prefixed Base64.
-inline constexpr size_t kDefaultBase64EncodedBufferSize =
-    Base64EncodedBufferSize(PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES);
-
 // Encodes a binary tokenized message as prefixed Base64 with a null terminator.
 // Returns the encoded string length (excluding the null terminator). Returns 0
 // if the buffer is too small. Always null terminates if the output buffer is
@@ -128,8 +123,7 @@
 // Encodes a binary tokenized message as prefixed Base64 to a pw::InlineString.
 // The pw::InlineString is sized to fit messages up to
 // kMaxBinaryMessageSizeBytes long. Asserts if the message is larger.
-template <size_t kMaxBinaryMessageSizeBytes =
-              PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES>
+template <size_t kMaxBinaryMessageSizeBytes>
 auto PrefixedBase64Encode(span<const std::byte> binary_message) {
   static_assert(kMaxBinaryMessageSizeBytes >= 1, "Messages cannot be empty");
   InlineString<Base64EncodedStringSize(kMaxBinaryMessageSizeBytes)> string(
@@ -138,8 +132,7 @@
   return string;
 }
 
-template <size_t kMaxBinaryMessageSizeBytes =
-              PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES>
+template <size_t kMaxBinaryMessageSizeBytes>
 auto PrefixedBase64Encode(span<const uint8_t> binary_message) {
   return PrefixedBase64Encode<kMaxBinaryMessageSizeBytes>(
       as_bytes(binary_message));
diff --git a/pw_tokenizer/public/pw_tokenizer/config.h b/pw_tokenizer/public/pw_tokenizer/config.h
index d48ce53..2376854 100644
--- a/pw_tokenizer/public/pw_tokenizer/config.h
+++ b/pw_tokenizer/public/pw_tokenizer/config.h
@@ -15,54 +15,46 @@
 // Configuration macros for the tokenizer module.
 #pragma once
 
-#include <assert.h>
-
-// For a tokenized string that has arguments, the types of the arguments are
-// encoded in either a 4-byte (uint32_t) or a 8-byte (uint64_t) value. The 4 or
-// 6 least-significant bits, respectively, store the number of arguments, while
-// the remaining bits encode the argument types. Argument types are encoded
-// two-bits per argument, in little-endian order. Up to 14 arguments in 4 bytes
-// or 29 arguments in 8 bytes are supported.
+/// For a tokenized string with arguments, the types of the arguments are
+/// encoded in either 4 bytes (`uint32_t`) or 8 bytes (`uint64_t`). 4 bytes
+/// supports up to 14 tokenized string arguments; 8 bytes supports up to 29
+/// arguments. Using 8 bytes increases code size for 32-bit machines.
+///
+/// Argument types are encoded two bits per argument, in little-endian order.
+/// The 4 or 6 least-significant bits, respectively, store the number of
+/// arguments, while the remaining bits encode the argument types.
 #ifndef PW_TOKENIZER_CFG_ARG_TYPES_SIZE_BYTES
 #define PW_TOKENIZER_CFG_ARG_TYPES_SIZE_BYTES 4
 #endif  // PW_TOKENIZER_CFG_ARG_TYPES_SIZE_BYTES
 
-static_assert(PW_TOKENIZER_CFG_ARG_TYPES_SIZE_BYTES == 4 ||
-                  PW_TOKENIZER_CFG_ARG_TYPES_SIZE_BYTES == 8,
-              "PW_TOKENIZER_CFG_ARG_TYPES_SIZE_BYTES must be 4 or 8");
-
-// Maximum number of characters to hash in C. In C code, strings shorter than
-// this length are treated as if they were zero-padded up to the length. Strings
-// that are the same length and share a common prefix longer than this value
-// hash to the same value. Increasing PW_TOKENIZER_CFG_C_HASH_LENGTH increases
-// the compilation time for C due to the complexity of the hashing macros.
-//
-// PW_TOKENIZER_CFG_C_HASH_LENGTH has no effect on C++ code. In C++, hashing is
-// done with a constexpr function instead of a macro. There are no string length
-// limitations and compilation times are unaffected by this macro.
-//
-// Only hash lengths for which there is a corresponding macro header
-// (pw_tokenizer/internal/mash_macro_#.h) are supported. Additional macros may
-// be generated with the generate_hash_macro.py function. New macro headers must
-// then be added to pw_tokenizer/internal/hash.h.
-//
-// This MUST match the value of DEFAULT_C_HASH_LENGTH in
-// pw_tokenizer/py/pw_tokenizer/tokens.py.
+/// Maximum number of characters to hash in C. In C code, strings shorter than
+/// this length are treated as if they were zero-padded up to the length.
+/// Strings that are the same length and share a common prefix longer than this
+/// value hash to the same value. Increasing `PW_TOKENIZER_CFG_C_HASH_LENGTH`
+/// increases the compilation time for C due to the complexity of the hashing
+/// macros.
+///
+/// `PW_TOKENIZER_CFG_C_HASH_LENGTH` has no effect on C++ code. In C++, hashing
+/// is done with a `constexpr` function instead of a macro. There are no string
+/// length limitations and compilation times are unaffected by this macro.
+///
+/// Only hash lengths for which there is a corresponding macro header
+/// (`pw_tokenizer/internal/pw_tokenizer_65599_fixed_length_#_hash_macro.`) are
+/// supported. Additional macros may be generated with the
+/// `generate_hash_macro.py` function. New macro headers must then be added to
+/// `pw_tokenizer/internal/tokenize_string.h`.
+///
+/// This MUST match the value of `DEFAULT_C_HASH_LENGTH` in
+/// `pw_tokenizer/py/pw_tokenizer/tokens.py`.
 #ifndef PW_TOKENIZER_CFG_C_HASH_LENGTH
 #define PW_TOKENIZER_CFG_C_HASH_LENGTH 128
 #endif  // PW_TOKENIZER_CFG_C_HASH_LENGTH
 
-// The size of the stack-allocated argument encoding buffer to use by default.
-// This only affects tokenization macros that use the
-// pw::tokenizer::EncodedMessage class. A buffer of this size is allocated and
-// used for the 4-byte token and for encoding all arguments. It must be at least
-// large enough for the token (4 bytes).
-//
-// This buffer does not need to be large to accommodate a good number of
-// tokenized string arguments. Integer arguments are usually encoded smaller
-// than their native size (e.g. 1 or 2 bytes for smaller numbers). All floating
-// point types are encoded as four bytes. Null-terminated strings are encoded
-// 1:1 in size.
+/// `PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES` is deprecated. It is used as
+/// the default value for pw_log_tokenized's
+/// @c_macro{PW_LOG_TOKENIZED_ENCODING_BUFFER_SIZE_BYTES}. This value should not
+/// be configured; set @c_macro{PW_LOG_TOKENIZED_ENCODING_BUFFER_SIZE_BYTES}
+/// instead.
 #ifndef PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES
 #define PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES 52
 #endif  // PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES
diff --git a/pw_tokenizer/public/pw_tokenizer/detokenize.h b/pw_tokenizer/public/pw_tokenizer/detokenize.h
index 9a3e588..1cdaa87 100644
--- a/pw_tokenizer/public/pw_tokenizer/detokenize.h
+++ b/pw_tokenizer/public/pw_tokenizer/detokenize.h
@@ -84,6 +84,15 @@
   // that stores all possible detokenized string results.
   DetokenizedString Detokenize(const span<const uint8_t>& encoded) const;
 
+  // Decodes and detokenizes a Base64 encoded message. Returns a
+  // DetokenizedString that stores all possible detokenized string results.
+  DetokenizedString DetokenizeBase64Message(
+      const std::string_view& encoded) const;
+
+  // Decodes and detokenizes a Base64 encoded stream. Returns a
+  // std::string that contains all detokenized string results.
+  std::string DetokenizeBase64(const std::string_view& encoded) const;
+
   DetokenizedString Detokenize(const std::string_view& encoded) const {
     return Detokenize(encoded.data(), encoded.size());
   }
diff --git a/pw_tokenizer/public/pw_tokenizer/encode_args.h b/pw_tokenizer/public/pw_tokenizer/encode_args.h
index 07e6e31..e5b585c 100644
--- a/pw_tokenizer/public/pw_tokenizer/encode_args.h
+++ b/pw_tokenizer/public/pw_tokenizer/encode_args.h
@@ -20,6 +20,7 @@
 #include "pw_polyfill/standard.h"
 #include "pw_preprocessor/util.h"
 #include "pw_tokenizer/internal/argument_types.h"
+#include "pw_varint/varint.h"
 
 #if PW_CXX_STANDARD_IS_SUPPORTED(17)
 
@@ -82,10 +83,8 @@
                   va_list args,
                   span<std::byte> output);
 
-/// Encodes a tokenized message to a fixed size buffer. By default, the buffer
-/// size is set by the @c_macro{PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES}
-/// config macro. This class is used to encode tokenized messages passed in from
-/// tokenization macros.
+/// Encodes a tokenized message to a fixed size buffer. This class is used to
+/// encode tokenized messages passed in from tokenization macros.
 ///
 /// To use `pw::tokenizer::EncodedMessage`, construct it with the token,
 /// argument types, and `va_list` from the variadic arguments:
@@ -104,7 +103,7 @@
 ///     SendLogMessage(encoded_message);  // EncodedMessage converts to span
 ///   }
 /// @endcode
-template <size_t kMaxSizeBytes = PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES>
+template <size_t kMaxSizeBytes>
 class EncodedMessage {
  public:
   // Encodes a tokenized message to an internal buffer.
@@ -149,4 +148,23 @@
                                va_list args,
                                void* output_buffer,
                                size_t output_buffer_size);
+
+/// Encodes an `int` with the standard integer encoding: zig-zag + LEB128.
+/// This function is only necessary when manually encoding tokenized messages.
+static inline size_t pw_tokenizer_EncodeInt(int value,
+                                            void* output,
+                                            size_t output_size_bytes) {
+  return pw_varint_Encode32(
+      pw_varint_ZigZagEncode32(value), output, output_size_bytes);
+}
+
+/// Encodes an `int64_t` with the standard integer encoding: zig-zag + LEB128.
+/// This function is only necessary when manually encoding tokenized messages.
+static inline size_t pw_tokenizer_EncodeInt64(int64_t value,
+                                              void* output,
+                                              size_t output_size_bytes) {
+  return pw_varint_Encode64(
+      pw_varint_ZigZagEncode64(value), output, output_size_bytes);
+}
+
 PW_EXTERN_C_END
diff --git a/pw_tokenizer/public/pw_tokenizer/token_database.h b/pw_tokenizer/public/pw_tokenizer/token_database.h
index 26ae7c0..e885339 100644
--- a/pw_tokenizer/public/pw_tokenizer/token_database.h
+++ b/pw_tokenizer/public/pw_tokenizer/token_database.h
@@ -20,37 +20,58 @@
 
 namespace pw::tokenizer {
 
-// Reads entries from a binary token string database. This class does not copy
-// or modify the contents of the database.
-//
-// A binary token database is comprised of a 16-byte header followed by an array
-// of 8-byte entries and a table of null-terminated strings. The header
-// specifies the number of entries. Each entry contains information about a
-// tokenized string: the token and removal date, if any. All fields are
-// little-endian.
-//
-//            Header
-//            ======
-//   Offset  Size  Field
-//   -----------------------------------
-//        0     6  Magic number (TOKENS)
-//        6     2  Version (00 00)
-//        8     4  Entry count
-//       12     4  Reserved
-//
-//             Entry
-//             =====
-//   Offset  Size  Field
-//   -----------------------------------
-//        0     4  Token
-//        4     4  Removal date (d m yy)
-//
-// Entries are sorted by token. A string table with a null-terminated string for
-// each entry in order follows the entries.
-//
-// Entries are accessed by iterating over the database. A O(n) Find function is
-// also provided. In typical use, a TokenDatabase is preprocessed by a
-// Detokenizer into a std::unordered_map.
+/// Reads entries from a v0 binary token string database. This class does not
+/// copy or modify the contents of the database.
+///
+/// The v0 token database has two significant shortcomings:
+///
+///   - Strings cannot contain null terminators (`\0`). If a string contains a
+///     `\0`, the database will not work correctly.
+///   - The domain is not included in entries. All tokens belong to a single
+///     domain, which must be known independently.
+///
+/// A v0 binary token database is comprised of a 16-byte header followed by an
+/// array of 8-byte entries and a table of null-terminated strings. The header
+/// specifies the number of entries. Each entry contains information about a
+/// tokenized string: the token and removal date, if any. All fields are little-
+/// endian.
+///
+/// The token removal date is stored within an unsigned 32-bit integer. It is
+/// stored as `<day> <month> <year>`, where `<day>` and `<month>` are 1 byte
+/// each and `<year>` is two bytes. The fields are set to their maximum value
+/// (`0xFF` or `0xFFFF`) if they are unset. With this format, dates may be
+/// compared naturally as unsigned integers.
+///
+/// @rst
+///   ======  ====  =========================
+///   Header (16 bytes)
+///   ---------------------------------------
+///   Offset  Size  Field
+///   ======  ====  =========================
+///        0     6  Magic number (``TOKENS``)
+///        6     2  Version (``00 00``)
+///        8     4  Entry count
+///       12     4  Reserved
+///   ======  ====  =========================
+///
+///   ======  ====  ==================================
+///   Entry (8 bytes)
+///   ------------------------------------------------
+///   Offset  Size  Field
+///   ======  ====  ==================================
+///        0     4  Token
+///        4     1  Removal day (1-31, 255 if unset)
+///        5     1  Removal month (1-12, 255 if unset)
+///        6     2  Removal year (65535 if unset)
+///   ======  ====  ==================================
+/// @endrst
+///
+/// Entries are sorted by token. A string table with a null-terminated string
+/// for each entry in order follows the entries.
+///
+/// Entries are accessed by iterating over the database. A O(n) `Find` function
+/// is also provided. In typical use, a `TokenDatabase` is preprocessed by a
+/// `pw::tokenizer::Detokenizer` into a `std::unordered_map`.
 class TokenDatabase {
  private:
   // Internal struct that describes how the underlying binary token database
@@ -64,23 +85,23 @@
   static_assert(sizeof(RawEntry) == 8u);
 
  public:
-  // An entry in the token database. This struct adds the string to a RawEntry.
+  /// An entry in the token database.
   struct Entry {
-    // The token calculated for this string.
+    /// The token that represents this string.
     uint32_t token;
 
-    // The date the token and string was removed from the database, or
-    // 0xFFFFFFFF if it was never removed. Dates are encoded such that natural
-    // integer sorting sorts from oldest to newest dates. The day is stored an
-    // an 8-bit day, 8-bit month, and 16-bit year, packed into a little-endian
-    // uint32_t.
+    /// The date the token and string was removed from the database, or
+    /// `0xFFFFFFFF` if it was never removed. Dates are encoded such that
+    /// natural integer sorting sorts from oldest to newest dates. The day is
+    /// stored an an 8-bit day, 8-bit month, and 16-bit year, packed into a
+    /// little-endian `uint32_t`.
     uint32_t date_removed;
 
-    // The null-terminated string represented by this token.
+    /// The null-terminated string represented by this token.
     const char* string;
   };
 
-  // Iterator for TokenDatabase values.
+  /// Iterator for `TokenDatabase` values.
   class iterator {
    public:
     using difference_type = std::ptrdiff_t;
@@ -89,6 +110,8 @@
     using reference = const Entry&;
     using iterator_category = std::forward_iterator_tag;
 
+    constexpr iterator() : entry_{}, raw_(nullptr) {}
+
     constexpr iterator(const iterator& other) = default;
     constexpr iterator& operator=(const iterator& other) = default;
 
@@ -126,15 +149,13 @@
     // Constructs a new iterator to a valid entry.
     constexpr iterator(const char* raw_entry, const char* string)
         : entry_{0, 0, string}, raw_{raw_entry} {
-      if (raw_entry != string) {
+      if (raw_entry != string) {  // raw_entry == string if the DB is empty
         ReadRawEntry();
       }
     }
 
     explicit constexpr iterator(const char* end) : entry_{}, raw_(end) {}
 
-    constexpr iterator() : entry_{}, raw_(nullptr) {}
-
     constexpr void ReadRawEntry() {
       entry_.token = ReadUint32(raw_);
       entry_.date_removed = ReadUint32(raw_ + sizeof(entry_.token));
@@ -155,8 +176,8 @@
   using reverse_iterator = std::reverse_iterator<iterator>;
   using const_reverse_iterator = std::reverse_iterator<const_iterator>;
 
-  // A list of token entries returned from a Find operation. This object can be
-  // iterated over or indexed as an array.
+  /// A list of token entries returned from a `Find` operation. This object can
+  /// be iterated over or indexed as an array.
   class Entries {
    public:
     constexpr Entries(const iterator& begin, const iterator& end)
@@ -168,8 +189,7 @@
     // True of the list is empty.
     constexpr bool empty() const { return begin_ == end_; }
 
-    // Accesses the specified entry in this set. Returns an Entry object, which
-    // is constructed from the underlying raw entry. The index must be less than
+    // Accesses the specified entry in this set. The index must be less than
     // size(). This operation is O(n) in size().
     Entry operator[](size_type index) const;
 
@@ -181,22 +201,25 @@
     iterator end_;
   };
 
-  // Returns true if the provided data is a valid token database. This checks
-  // the magic number ("TOKENS"), version (which must be 0), and that there is
-  // is one string for each entry in the database. A database with extra strings
-  // or other trailing data is considered valid.
+  /// Returns true if the provided data is a valid token database. This checks
+  /// the magic number (`TOKENS`), version (which must be `0`), and that there
+  /// is is one string for each entry in the database. A database with extra
+  /// strings or other trailing data is considered valid.
   template <typename ByteArray>
   static constexpr bool IsValid(const ByteArray& bytes) {
     return HasValidHeader(bytes) && EachEntryHasAString(bytes);
   }
 
-  // Creates a TokenDatabase and checks if the provided data is valid at compile
-  // time. Accepts references to constexpr containers (array, span, string_view,
-  // etc.) with static storage duration. For example:
-  //
-  //   constexpr char kMyData[] = ...;
-  //   constexpr TokenDatabase db = TokenDatabase::Create<kMyData>();
-  //
+  /// Creates a `TokenDatabase` and checks if the provided data is valid at
+  /// compile time. Accepts references to constexpr containers (`array`, `span`,
+  /// `string_view`, etc.) with static storage duration. For example:
+  ///
+  ///  @code{.cpp}
+  ///
+  ///    constexpr char kMyData[] = ...;
+  ///    constexpr TokenDatabase db = TokenDatabase::Create<kMyData>();
+  ///
+  ///  @endcode
   template <const auto& kDatabaseBytes>
   static constexpr TokenDatabase Create() {
     static_assert(
@@ -209,35 +232,38 @@
     return TokenDatabase(std::data(kDatabaseBytes));
   }
 
-  // Creates a TokenDatabase from the provided byte array. The array may be a
-  // span, array, or other container type. If the data is not valid, returns a
-  // default-constructed database for which ok() is false.
-  //
-  // Prefer the Create overload that takes the data as a template parameter
-  // whenever possible, since that function checks the integrity of the data at
-  // compile time.
+  /// Creates a `TokenDatabase` from the provided byte array. The array may be a
+  /// span, array, or other container type. If the data is not valid, returns a
+  /// default-constructed database for which ok() is false.
+  ///
+  /// Prefer the `Create` overload that takes the data as a template parameter
+  /// when possible, since that overload verifies data integrity at compile
+  /// time.
   template <typename ByteArray>
   static constexpr TokenDatabase Create(const ByteArray& database_bytes) {
     return IsValid<ByteArray>(database_bytes)
                ? TokenDatabase(std::data(database_bytes))
                : TokenDatabase();  // Invalid database.
   }
-  // Creates a database with no data. ok() returns false.
+  /// Creates a database with no data. `ok()` returns false.
   constexpr TokenDatabase() : begin_{.data = nullptr}, end_{.data = nullptr} {}
 
-  // Returns all entries associated with this token. This is a O(n) operation.
+  /// Returns all entries associated with this token. This is `O(n)`.
   Entries Find(uint32_t token) const;
 
-  // Returns the total number of entries (unique token-string pairs).
+  /// Returns the total number of entries (unique token-string pairs).
   constexpr size_type size() const {
     return (end_.data - begin_.data) / sizeof(RawEntry);
   }
 
-  // True if this database was constructed with valid data. The database might
-  // be empty, but it has an intact header and a string for each entry.
+  /// True if this database was constructed with valid data. The database might
+  /// be empty, but it has an intact header and a string for each entry.
   constexpr bool ok() const { return begin_.data != nullptr; }
 
+  /// Returns an iterator for the first token entry.
   constexpr iterator begin() const { return iterator(begin_.data, end_.data); }
+
+  /// Returns an iterator for one past the last token entry.
   constexpr iterator end() const { return iterator(end_.data); }
 
  private:
diff --git a/pw_tokenizer/public/pw_tokenizer/tokenize.h b/pw_tokenizer/public/pw_tokenizer/tokenize.h
index 6b8e62c..1f3ac73 100644
--- a/pw_tokenizer/public/pw_tokenizer/tokenize.h
+++ b/pw_tokenizer/public/pw_tokenizer/tokenize.h
@@ -75,7 +75,7 @@
   }()
 
 /// Tokenizes a string literal in a standalone statement using the specified
-/// @rstref{domain <module-pw_tokenizer-domains>}. C and C++ compatible.
+/// @rstref{domain<module-pw_tokenizer-domains>}. C and C++ compatible.
 #define PW_TOKENIZE_STRING_DOMAIN(domain, string_literal) \
   PW_TOKENIZE_STRING_MASK(domain, UINT32_MAX, string_literal)
 
@@ -111,7 +111,7 @@
   }()
 
 #define _PW_TOKENIZER_MASK_TOKEN(mask, string_literal) \
-  ((pw_tokenizer_Token)(mask)&PW_TOKENIZER_STRING_TOKEN(string_literal))
+  ((pw_tokenizer_Token)(mask) & PW_TOKENIZER_STRING_TOKEN(string_literal))
 
 /// Encodes a tokenized string and arguments to the provided buffer. The size of
 /// the buffer is passed via a pointer to a `size_t`. After encoding is
@@ -159,23 +159,56 @@
 
 /// Same as @c_macro{PW_TOKENIZE_TO_BUFFER_DOMAIN}, but applies a
 /// @rstref{bit mask <module-pw_tokenizer-masks>} to the token.
-#define PW_TOKENIZE_TO_BUFFER_MASK(                               \
-    domain, mask, buffer, buffer_size_pointer, format, ...)       \
-  do {                                                            \
-    PW_TOKENIZE_FORMAT_STRING(domain, mask, format, __VA_ARGS__); \
-    _pw_tokenizer_ToBuffer(buffer,                                \
-                           buffer_size_pointer,                   \
-                           _pw_tokenizer_token,                   \
-                           PW_TOKENIZER_ARG_TYPES(__VA_ARGS__)    \
-                               PW_COMMA_ARGS(__VA_ARGS__));       \
+#define PW_TOKENIZE_TO_BUFFER_MASK(                                          \
+    domain, mask, buffer, buffer_size_pointer, format, ...)                  \
+  do {                                                                       \
+    PW_TOKENIZE_FORMAT_STRING(domain, mask, format, __VA_ARGS__);            \
+    _pw_tokenizer_ToBuffer(buffer,                                           \
+                           buffer_size_pointer,                              \
+                           PW_TOKENIZER_REPLACE_FORMAT_STRING(__VA_ARGS__)); \
   } while (0)
 
+/// @brief Low-level macro for calling functions that handle tokenized strings.
+///
+/// Functions that work with tokenized format strings must take the following
+/// arguments:
+///
+/// - The 32-bit token (@cpp_type{pw_tokenizer_Token})
+/// - The 32- or 64-bit argument types (@cpp_type{pw_tokenizer_ArgTypes})
+/// - Variadic arguments, if any
+///
+/// This macro expands to those arguments. Custom tokenization macros should use
+/// this macro to pass these arguments to a function or other macro.
+///
+/** @code{cpp}
+ *    EncodeMyTokenizedString(uint32_t token,
+ *                            pw_tokenier_ArgTypes arg_types,
+ *                            ...);
+ *
+ *    #define CUSTOM_TOKENIZATION_MACRO(format, ...)                  \
+ *      PW_TOKENIZE_FORMAT_STRING(domain, mask, format, __VA_ARGS__); \
+ *      EncodeMyTokenizedString(PW_TOKENIZER_REPLACE_FORMAT_STRING(__VA_ARGS__))
+ *  @endcode
+ */
+#define PW_TOKENIZER_REPLACE_FORMAT_STRING(...) \
+  _PW_TOKENIZER_REPLACE_FORMAT_STRING(PW_EMPTY_ARGS(__VA_ARGS__), __VA_ARGS__)
+
+#define _PW_TOKENIZER_REPLACE_FORMAT_STRING(empty_args, ...) \
+  _PW_CONCAT_2(_PW_TOKENIZER_REPLACE_FORMAT_STRING_, empty_args)(__VA_ARGS__)
+
+#define _PW_TOKENIZER_REPLACE_FORMAT_STRING_1() _pw_tokenizer_token, 0u
+#define _PW_TOKENIZER_REPLACE_FORMAT_STRING_0(...) \
+  _pw_tokenizer_token, PW_TOKENIZER_ARG_TYPES(__VA_ARGS__), __VA_ARGS__
+
 /// Converts a series of arguments to a compact format that replaces the format
 /// string literal. Evaluates to a `pw_tokenizer_ArgTypes` value.
 ///
 /// Depending on the size of `pw_tokenizer_ArgTypes`, the bottom 4 or 6 bits
 /// store the number of arguments and the remaining bits store the types, two
 /// bits per type. The arguments are not evaluated; only their types are used.
+///
+/// In general, @c_macro{PW_TOKENIZER_ARG_TYPES} should not be used directly.
+/// Instead, use @c_macro{PW_TOKENIZER_REPLACE_FORMAT_STRING}.
 #define PW_TOKENIZER_ARG_TYPES(...) \
   PW_DELEGATE_BY_ARG_COUNT(_PW_TOKENIZER_TYPES_, __VA_ARGS__)
 
@@ -204,25 +237,40 @@
 /// since the same variable is used in every invocation.
 ///
 /// The tokenized string uses the specified @rstref{tokenization domain
-/// <module-pw_tokenizer-domains>}.  Use `PW_TOKENIZER_DEFAULT_DOMAIN` for the
+/// <module-pw_tokenizer-domains>}. Use `PW_TOKENIZER_DEFAULT_DOMAIN` for the
 /// default. The token also may be masked; use `UINT32_MAX` to keep all bits.
 ///
-/// This macro checks that the printf-style format string matches the arguments,
-/// stores the format string in a special section, and calculates the string's
-/// token at compile time.
+/// This macro checks that the printf-style format string matches the arguments
+/// and that no more than @c_macro{PW_TOKENIZER_MAX_SUPPORTED_ARGS} are
+/// provided. It then stores the format string in a special section, and
+/// calculates the string's token at compile time.
 // clang-format off
-#define PW_TOKENIZE_FORMAT_STRING(domain, mask, format, ...)                  \
-  if (0) { /* Do not execute to prevent double evaluation of the arguments. */ \
-    pw_tokenizer_CheckFormatString(format PW_COMMA_ARGS(__VA_ARGS__));         \
-  }                                                                            \
-                                                                               \
-  /* Check that the macro is invoked with a supported number of arguments. */  \
+#define PW_TOKENIZE_FORMAT_STRING(domain, mask, format, ...)                   \
   static_assert(                                                               \
       PW_FUNCTION_ARG_COUNT(__VA_ARGS__) <= PW_TOKENIZER_MAX_SUPPORTED_ARGS,   \
       "Tokenized strings cannot have more than "                               \
       PW_STRINGIFY(PW_TOKENIZER_MAX_SUPPORTED_ARGS) " arguments; "             \
       PW_STRINGIFY(PW_FUNCTION_ARG_COUNT(__VA_ARGS__))                         \
       " arguments were used for " #format " (" #__VA_ARGS__ ")");              \
+  PW_TOKENIZE_FORMAT_STRING_ANY_ARG_COUNT(domain, mask, format, __VA_ARGS__)
+// clang-format on
+
+/// Equivalent to `PW_TOKENIZE_FORMAT_STRING`, but supports any number of
+/// arguments.
+///
+/// This is a low-level macro that should rarely be used directly. It is
+/// intended for situations when @cpp_type{pw_tokenizer_ArgTypes} is not used.
+/// There are two situations where @cpp_type{pw_tokenizer_ArgTypes} is
+/// unnecessary:
+///
+/// - The exact format string argument types and count are fixed.
+/// - The format string supports a variable number of arguments of only one
+///   type. In this case, @c_macro{PW_FUNCTION_ARG_COUNT} may be used to pass
+///   the argument count to the function.
+#define PW_TOKENIZE_FORMAT_STRING_ANY_ARG_COUNT(domain, mask, format, ...)     \
+  if (0) { /* Do not execute to prevent double evaluation of the arguments. */ \
+    pw_tokenizer_CheckFormatString(format PW_COMMA_ARGS(__VA_ARGS__));         \
+  }                                                                            \
                                                                                \
   /* Tokenize the string to a pw_tokenizer_Token at compile time. */           \
   static _PW_TOKENIZER_CONST pw_tokenizer_Token _pw_tokenizer_token =          \
@@ -230,8 +278,6 @@
                                                                                \
   _PW_TOKENIZER_RECORD_ORIGINAL_STRING(_pw_tokenizer_token, domain, format)
 
-// clang-format on
-
 // Creates unique names to use for tokenized string entries and linker sections.
 #define _PW_TOKENIZER_UNIQUE(prefix) PW_CONCAT(prefix, __LINE__, _, __COUNTER__)
 
diff --git a/pw_tokenizer/py/BUILD.bazel b/pw_tokenizer/py/BUILD.bazel
index c30db93..9f65874 100644
--- a/pw_tokenizer/py/BUILD.bazel
+++ b/pw_tokenizer/py/BUILD.bazel
@@ -102,7 +102,6 @@
     ],
     deps = [
         ":pw_tokenizer",
-        "@rules_python//python/runfiles",
     ],
 )
 
@@ -114,7 +113,6 @@
     ],
     deps = [
         ":pw_tokenizer",
-        "@rules_python//python/runfiles",
     ],
 )
 
diff --git a/pw_tokenizer/py/detokenize_test.py b/pw_tokenizer/py/detokenize_test.py
index df710c7..6e2bcfb 100755
--- a/pw_tokenizer/py/detokenize_test.py
+++ b/pw_tokenizer/py/detokenize_test.py
@@ -15,12 +15,15 @@
 """Tests for detokenize."""
 
 import base64
+import concurrent
 import datetime as dt
+import functools
 import io
 import os
 from pathlib import Path
 import struct
 import tempfile
+from typing import Any, Callable, NamedTuple, Tuple
 import unittest
 from unittest import mock
 
@@ -451,6 +454,35 @@
         self.assertIn('#0 -1', repr(unambiguous))
 
 
+class ManualPoolExecutor(concurrent.futures.Executor):
+    """A stubbed pool executor that captures the most recent work request
+    and holds it until the public process method is manually called."""
+
+    def __init__(self):
+        super().__init__()
+        self._func = None
+
+    # pylint: disable=arguments-differ
+    def submit(self, func, *args, **kwargs):
+        """Submits work to the pool, stashing the partial for later use."""
+        self._func = functools.partial(func, *args, **kwargs)
+
+    def process(self):
+        """Processes the latest func submitted to the pool."""
+        if self._func is not None:
+            self._func()
+            self._func = None
+
+
+class InlinePoolExecutor(concurrent.futures.Executor):
+    """A stubbed pool executor that runs work immediately, inline."""
+
+    # pylint: disable=arguments-differ
+    def submit(self, func, *args, **kwargs):
+        """Submits work to the pool, stashing the partial for later use."""
+        func(*args, **kwargs)
+
+
 @mock.patch('os.path.getmtime')
 class AutoUpdatingDetokenizerTest(unittest.TestCase):
     """Tests the AutoUpdatingDetokenizer class."""
@@ -478,18 +510,79 @@
             try:
                 file.close()
 
+                pool = ManualPoolExecutor()
                 detok = detokenize.AutoUpdatingDetokenizer(
-                    file.name, min_poll_period_s=0
+                    file.name, min_poll_period_s=0, pool=pool
                 )
                 self.assertFalse(detok.detokenize(JELLO_WORLD_TOKEN).ok())
 
                 with open(file.name, 'wb') as fd:
                     tokens.write_binary(db, fd)
 
+                # After the change but before the pool runs in another thread,
+                # the token should not exist.
+                self.assertFalse(detok.detokenize(JELLO_WORLD_TOKEN).ok())
+
+                # After the pool is allowed to process, it should.
+                pool.process()
                 self.assertTrue(detok.detokenize(JELLO_WORLD_TOKEN).ok())
             finally:
                 os.unlink(file.name)
 
+    def test_update_with_directory(self, mock_getmtime):
+        """Tests the update command with a directory format database."""
+        db = database.load_token_database(
+            io.BytesIO(ELF_WITH_TOKENIZER_SECTIONS)
+        )
+        self.assertEqual(len(db), TOKENS_IN_ELF)
+
+        the_time = [100]
+
+        def move_back_time_if_file_exists(path):
+            if os.path.exists(path):
+                the_time[0] -= 1
+                return the_time[0]
+
+            raise FileNotFoundError
+
+        mock_getmtime.side_effect = move_back_time_if_file_exists
+
+        with tempfile.TemporaryDirectory() as dbdir:
+            with tempfile.NamedTemporaryFile(
+                'wb', delete=False, suffix='.pw_tokenizer.csv', dir=dbdir
+            ) as matching_suffix_file, tempfile.NamedTemporaryFile(
+                'wb', delete=False, suffix='.not.right', dir=dbdir
+            ) as mismatched_suffix_file:
+                try:
+                    matching_suffix_file.close()
+                    mismatched_suffix_file.close()
+
+                    pool = ManualPoolExecutor()
+                    detok = detokenize.AutoUpdatingDetokenizer(
+                        dbdir, min_poll_period_s=0, pool=pool
+                    )
+                    self.assertFalse(detok.detokenize(JELLO_WORLD_TOKEN).ok())
+
+                    with open(mismatched_suffix_file.name, 'wb') as fd:
+                        tokens.write_csv(db, fd)
+                    pool.process()
+                    self.assertFalse(detok.detokenize(JELLO_WORLD_TOKEN).ok())
+
+                    with open(matching_suffix_file.name, 'wb') as fd:
+                        tokens.write_csv(db, fd)
+
+                    # After the change but before the pool runs in another
+                    # thread, the token should not exist.
+                    self.assertFalse(detok.detokenize(JELLO_WORLD_TOKEN).ok())
+                    pool.process()
+
+                    # After the pool is allowed to process, it should.
+                    self.assertTrue(detok.detokenize(JELLO_WORLD_TOKEN).ok())
+                finally:
+                    os.unlink(mismatched_suffix_file.name)
+                    os.unlink(matching_suffix_file.name)
+                    os.rmdir(dbdir)
+
         # The database stays around if the file is deleted.
         self.assertTrue(detok.detokenize(JELLO_WORLD_TOKEN).ok())
 
@@ -507,7 +600,7 @@
                 file.close()
 
                 detok = detokenize.AutoUpdatingDetokenizer(
-                    file.name, min_poll_period_s=0
+                    file.name, min_poll_period_s=0, pool=InlinePoolExecutor()
                 )
                 self.assertTrue(detok.detokenize(JELLO_WORLD_TOKEN).ok())
 
@@ -527,7 +620,9 @@
     def test_token_domain_in_str(self, _) -> None:
         """Tests a str containing a domain"""
         detok = detokenize.AutoUpdatingDetokenizer(
-            f'{ELF_WITH_TOKENIZER_SECTIONS_PATH}#.*', min_poll_period_s=0
+            f'{ELF_WITH_TOKENIZER_SECTIONS_PATH}#.*',
+            min_poll_period_s=0,
+            pool=InlinePoolExecutor(),
         )
         self.assertEqual(
             len(detok.database), TOKENS_IN_ELF_WITH_TOKENIZER_SECTIONS
@@ -536,7 +631,9 @@
     def test_token_domain_in_path(self, _) -> None:
         """Tests a Path() containing a domain"""
         detok = detokenize.AutoUpdatingDetokenizer(
-            Path(f'{ELF_WITH_TOKENIZER_SECTIONS_PATH}#.*'), min_poll_period_s=0
+            Path(f'{ELF_WITH_TOKENIZER_SECTIONS_PATH}#.*'),
+            min_poll_period_s=0,
+            pool=InlinePoolExecutor(),
         )
         self.assertEqual(
             len(detok.database), TOKENS_IN_ELF_WITH_TOKENIZER_SECTIONS
@@ -545,14 +642,18 @@
     def test_token_no_domain_in_str(self, _) -> None:
         """Tests a str without a domain"""
         detok = detokenize.AutoUpdatingDetokenizer(
-            str(ELF_WITH_TOKENIZER_SECTIONS_PATH), min_poll_period_s=0
+            str(ELF_WITH_TOKENIZER_SECTIONS_PATH),
+            min_poll_period_s=0,
+            pool=InlinePoolExecutor(),
         )
         self.assertEqual(len(detok.database), TOKENS_IN_ELF)
 
     def test_token_no_domain_in_path(self, _) -> None:
         """Tests a Path() without a domain"""
         detok = detokenize.AutoUpdatingDetokenizer(
-            ELF_WITH_TOKENIZER_SECTIONS_PATH, min_poll_period_s=0
+            ELF_WITH_TOKENIZER_SECTIONS_PATH,
+            min_poll_period_s=0,
+            pool=InlinePoolExecutor(),
         )
         self.assertEqual(len(detok.database), TOKENS_IN_ELF)
 
@@ -561,40 +662,122 @@
     return bytes(b + 1 for b in message)
 
 
-class PrefixedMessageDecoderTest(unittest.TestCase):
-    def setUp(self):
-        super().setUp()
-        self.decode = detokenize.PrefixedMessageDecoder('$', 'abcdefg')
+class NestedMessageParserTest(unittest.TestCase):
+    """Tests parsing prefixed messages."""
 
-    def test_transform_single_message(self):
-        self.assertEqual(
-            b'%bcde',
-            b''.join(self.decode.transform(io.BytesIO(b'$abcd'), _next_char)),
-        )
+    class _Case(NamedTuple):
+        data: bytes
+        expected: bytes
+        title: str
+        transform: Callable[[bytes], bytes] = _next_char
 
-    def test_transform_message_amidst_other_only_affects_message(self):
-        self.assertEqual(
+    TRANSFORM_TEST_CASES = (
+        _Case(b'$abcd', b'%bcde', 'single message'),
+        _Case(
+            b'$$WHAT?$abc$WHY? is this $ok $',
             b'%%WHAT?%bcd%WHY? is this %ok %',
-            b''.join(
-                self.decode.transform(
-                    io.BytesIO(b'$$WHAT?$abc$WHY? is this $ok $'), _next_char
-                )
-            ),
+            'message and non-message',
+        ),
+        _Case(b'$1$', b'%1%', 'empty message'),
+        _Case(b'$abc$defgh', b'%bcd%efghh', 'sequential message'),
+        _Case(
+            b'w$abcx$defygh$$abz',
+            b'w$ABCx$DEFygh$$ABz',
+            'interspersed start/end non-message',
+            bytes.upper,
+        ),
+        _Case(
+            b'$abcx$defygh$$ab',
+            b'$ABCx$DEFygh$$AB',
+            'interspersed start/end message ',
+            bytes.upper,
+        ),
+    )
+
+    def setUp(self) -> None:
+        self.decoder = detokenize.NestedMessageParser('$', 'abcdefg')
+
+    def test_transform_io(self) -> None:
+        for data, expected, title, transform in self.TRANSFORM_TEST_CASES:
+            self.assertEqual(
+                expected,
+                b''.join(
+                    self.decoder.transform_io(io.BytesIO(data), transform)
+                ),
+                f'{title}: {data!r}',
+            )
+
+    def test_transform_bytes_with_flush(self) -> None:
+        for data, expected, title, transform in self.TRANSFORM_TEST_CASES:
+            self.assertEqual(
+                expected,
+                self.decoder.transform(data, transform, flush=True),
+                f'{title}: {data!r}',
+            )
+
+    def test_transform_bytes_sequential(self) -> None:
+        transform = lambda message: message.upper().replace(b'$', b'*')
+
+        self.assertEqual(self.decoder.transform(b'abc$abcd', transform), b'abc')
+        self.assertEqual(self.decoder.transform(b'$', transform), b'*ABCD')
+        self.assertEqual(self.decoder.transform(b'$b', transform), b'*')
+        self.assertEqual(self.decoder.transform(b'', transform), b'')
+        self.assertEqual(self.decoder.transform(b' ', transform), b'*B ')
+        self.assertEqual(self.decoder.transform(b'hello', transform), b'hello')
+        self.assertEqual(self.decoder.transform(b'?? $ab', transform), b'?? ')
+        self.assertEqual(
+            self.decoder.transform(b'123$ab4$56$a', transform), b'*AB123*AB4*56'
+        )
+        self.assertEqual(
+            self.decoder.transform(b'bc', transform, flush=True), b'*ABC'
         )
 
-    def test_transform_empty_message(self):
+    MESSAGES_TEST: Any = (
+        (b'123$abc456$a', (False, b'123'), (True, b'$abc'), (False, b'456')),
+        (b'7$abcd', (True, b'$a'), (False, b'7')),
+        (b'e',),
+        (b'',),
+        (b'$', (True, b'$abcde')),
+        (b'$', (True, b'$')),
+        (b'$a$b$c', (True, b'$'), (True, b'$a'), (True, b'$b')),
+        (b'1', (True, b'$c'), (False, b'1')),
+        (b'',),
+        (b'?', (False, b'?')),
+        (b'!@', (False, b'!@')),
+        (b'%^&', (False, b'%^&')),
+    )
+
+    def test_read_messages(self) -> None:
+        for step in self.MESSAGES_TEST:
+            data: bytes = step[0]
+            pieces: Tuple[Tuple[bool, bytes], ...] = step[1:]
+            self.assertEqual(tuple(self.decoder.read_messages(data)), pieces)
+
+    def test_read_messages_flush(self) -> None:
         self.assertEqual(
-            b'%1%',
-            b''.join(self.decode.transform(io.BytesIO(b'$1$'), _next_char)),
+            list(self.decoder.read_messages(b'123$a')), [(False, b'123')]
+        )
+        self.assertEqual(list(self.decoder.read_messages(b'b')), [])
+        self.assertEqual(
+            list(self.decoder.read_messages(b'', flush=True)), [(True, b'$ab')]
         )
 
-    def test_transform_sequential_messages(self):
-        self.assertEqual(
-            b'%bcd%efghh',
-            b''.join(
-                self.decode.transform(io.BytesIO(b'$abc$defgh'), _next_char)
-            ),
-        )
+    def test_read_messages_io(self) -> None:
+        # Rework the read_messages test data for stream input.
+        data = io.BytesIO(b''.join(step[0] for step in self.MESSAGES_TEST))
+        expected_pieces = sum((step[1:] for step in self.MESSAGES_TEST), ())
+
+        result = self.decoder.read_messages_io(data)
+        for expected_is_message, expected_data in expected_pieces:
+            if expected_is_message:
+                is_message, piece = next(result)
+                self.assertTrue(is_message)
+                self.assertEqual(expected_data, piece)
+            else:  # the IO version yields non-messages byte by byte
+                for byte in expected_data:
+                    is_message, piece = next(result)
+                    self.assertFalse(is_message)
+                    self.assertEqual(bytes([byte]), piece)
 
 
 class DetokenizeBase64(unittest.TestCase):
@@ -627,6 +810,10 @@
         (JELLO + b'$a' + JELLO + b'bcd', b'Jello, world!$aJello, world!bcd'),
         (b'$3141', b'$3141'),
         (JELLO + b'$3141', b'Jello, world!$3141'),
+        (
+            JELLO + b'$a' + JELLO + b'b' + JELLO + b'c',
+            b'Jello, world!$aJello, world!bJello, world!c',
+        ),
         (RECURSION, b'The secret message is "Jello, world!"'),
         (
             RECURSION_2,
@@ -650,7 +837,7 @@
             output = io.BytesIO()
             self.detok.detokenize_base64_live(io.BytesIO(data), output, '$')
 
-            self.assertEqual(expected, output.getvalue())
+            self.assertEqual(expected, output.getvalue(), f'Input: {data!r}')
 
     def test_detokenize_base64_to_file(self):
         for data, expected in self.TEST_CASES:
diff --git a/pw_tokenizer/py/pw_tokenizer/database.py b/pw_tokenizer/py/pw_tokenizer/database.py
index 26a32a7..54d142e 100755
--- a/pw_tokenizer/py/pw_tokenizer/database.py
+++ b/pw_tokenizer/py/pw_tokenizer/database.py
@@ -297,6 +297,9 @@
             f'The file {database} already exists! Use --force to overwrite.'
         )
 
+    if not database.parent.exists():
+        database.parent.mkdir(parents=True)
+
     if output_type == 'directory':
         if str(database) == '-':
             raise ValueError(
diff --git a/pw_tokenizer/py/pw_tokenizer/detokenize.py b/pw_tokenizer/py/pw_tokenizer/detokenize.py
index 34f3a31..f753458 100755
--- a/pw_tokenizer/py/pw_tokenizer/detokenize.py
+++ b/pw_tokenizer/py/pw_tokenizer/detokenize.py
@@ -20,11 +20,11 @@
 encoded messages, one at a time. The detokenize method returns a
 DetokenizedString object with the result.
 
-For example,
+For example::
 
   from pw_tokenizer import detokenize
 
-  detok = detokenize.Detokenizer('path/to/my/image.elf')
+  detok = detokenize.Detokenizer('path/to/firmware/image.elf')
   print(detok.detokenize(b'\x12\x34\x56\x78\x03hi!'))
 
 This module also provides a command line interface for decoding and detokenizing
@@ -34,6 +34,8 @@
 import argparse
 import base64
 import binascii
+from concurrent.futures import Executor, ThreadPoolExecutor
+import enum
 import io
 import logging
 import os
@@ -42,6 +44,7 @@
 import string
 import struct
 import sys
+import threading
 import time
 from typing import (
     AnyStr,
@@ -71,9 +74,11 @@
 
 ENCODED_TOKEN = struct.Struct('<I')
 BASE64_PREFIX = encode.BASE64_PREFIX.encode()
+_BASE64_CHARS = string.ascii_letters + string.digits + '+/-_='
 DEFAULT_RECURSION = 9
 
 _RawIo = Union[io.RawIOBase, BinaryIO]
+_RawIoOrBytes = Union[_RawIo, bytes]
 
 
 class DetokenizedString:
@@ -186,26 +191,32 @@
         """
         self.show_errors = show_errors
 
+        self._database_lock = threading.Lock()
+
         # Cache FormatStrings for faster lookup & formatting.
         self._cache: Dict[int, List[_TokenizedFormatString]] = {}
 
         self._initialize_database(token_database_or_elf)
 
     def _initialize_database(self, token_sources: Iterable) -> None:
-        self.database = database.load_token_database(*token_sources)
-        self._cache.clear()
+        with self._database_lock:
+            self.database = database.load_token_database(*token_sources)
+            self._cache.clear()
 
     def lookup(self, token: int) -> List[_TokenizedFormatString]:
         """Returns (TokenizedStringEntry, FormatString) list for matches."""
-        try:
-            return self._cache[token]
-        except KeyError:
-            format_strings = [
-                _TokenizedFormatString(entry, decode.FormatString(str(entry)))
-                for entry in self.database.token_to_entries[token]
-            ]
-            self._cache[token] = format_strings
-            return format_strings
+        with self._database_lock:
+            try:
+                return self._cache[token]
+            except KeyError:
+                format_strings = [
+                    _TokenizedFormatString(
+                        entry, decode.FormatString(str(entry))
+                    )
+                    for entry in self.database.token_to_entries[token]
+                ]
+                self._cache[token] = format_strings
+                return format_strings
 
     def detokenize(self, encoded_message: bytes) -> DetokenizedString:
         """Decodes and detokenizes a message as a DetokenizedString."""
@@ -281,9 +292,9 @@
                 self._detokenize_prefixed_base64(prefix_bytes, recursion), data
             )
 
-        for message in PrefixedMessageDecoder(
-            prefix, string.ascii_letters + string.digits + '+/-_='
-        ).transform(input_file, transform):
+        for message in NestedMessageParser(prefix, _BASE64_CHARS).transform_io(
+            input_file, transform
+        ):
             output.write(message)
 
             # Flush each line to prevent delays when piping between processes.
@@ -364,6 +375,12 @@
             return True
 
         def _last_modified_time(self) -> Optional[float]:
+            if self.path.is_dir():
+                mtime = -1.0
+                for child in self.path.glob(tokens.DIR_DB_GLOB):
+                    mtime = max(mtime, os.path.getmtime(child))
+                return mtime if mtime >= 0 else None
+
             try:
                 return os.path.getmtime(self.path)
             except FileNotFoundError:
@@ -380,96 +397,169 @@
                 return database.load_token_database()
 
     def __init__(
-        self, *paths_or_files: _PathOrStr, min_poll_period_s: float = 1.0
+        self,
+        *paths_or_files: _PathOrStr,
+        min_poll_period_s: float = 1.0,
+        pool: Executor = ThreadPoolExecutor(max_workers=1),
     ) -> None:
         self.paths = tuple(self._DatabasePath(path) for path in paths_or_files)
         self.min_poll_period_s = min_poll_period_s
         self._last_checked_time: float = time.time()
+        # Thread pool to use for loading the databases. Limit to a single
+        # worker since this is low volume and not time critical.
+        self._pool = pool
         super().__init__(*(path.load() for path in self.paths))
 
+    def __del__(self) -> None:
+        self._pool.shutdown(wait=False)
+
+    def _reload_paths(self) -> None:
+        self._initialize_database([path.load() for path in self.paths])
+
     def _reload_if_changed(self) -> None:
         if time.time() - self._last_checked_time >= self.min_poll_period_s:
             self._last_checked_time = time.time()
 
             if any(path.updated() for path in self.paths):
                 _LOG.info('Changes detected; reloading token database')
-                self._initialize_database(path.load() for path in self.paths)
+                self._pool.submit(self._reload_paths)
 
     def lookup(self, token: int) -> List[_TokenizedFormatString]:
         self._reload_if_changed()
         return super().lookup(token)
 
 
-class PrefixedMessageDecoder:
-    """Parses messages that start with a prefix character from a byte stream."""
+class NestedMessageParser:
+    """Parses nested tokenized messages from a byte stream or string."""
 
-    def __init__(self, prefix: Union[str, bytes], chars: Union[str, bytes]):
-        """Parses prefixed messages.
+    class _State(enum.Enum):
+        MESSAGE = 1
+        NON_MESSAGE = 2
+
+    def __init__(
+        self,
+        prefix: Union[str, bytes] = BASE64_PREFIX,
+        chars: Union[str, bytes] = _BASE64_CHARS,
+    ) -> None:
+        """Initializes a parser.
 
         Args:
-          prefix: one character that signifies the start of a message
-          chars: characters allowed in a message
+            prefix: one character that signifies the start of a message (``$``).
+            chars: characters allowed in a message
         """
-        self._prefix = prefix.encode() if isinstance(prefix, str) else prefix
+        self._prefix = ord(prefix)
 
         if isinstance(chars, str):
             chars = chars.encode()
 
-        # Store the valid message bytes as a set of binary strings.
-        self._message_bytes = frozenset(
-            chars[i : i + 1] for i in range(len(chars))
-        )
+        # Store the valid message bytes as a set of byte values.
+        self._message_bytes = frozenset(chars)
 
-        if len(self._prefix) != 1 or self._prefix in self._message_bytes:
+        if len(prefix) != 1 or self._prefix in self._message_bytes:
             raise ValueError(
-                'Invalid prefix {!r}: the prefix must be a single '
-                'character that is not a valid message character.'.format(
-                    prefix
-                )
+                f'Invalid prefix {prefix!r}: the prefix must be a single '
+                'character that is not a valid message character.'
             )
 
-        self.data = bytearray()
+        self._buffer = bytearray()
+        self._state: NestedMessageParser._State = self._State.NON_MESSAGE
 
-    def _read_next(self, fd: _RawIo) -> Tuple[bytes, int]:
-        """Returns the next character and its index."""
-        char = fd.read(1) or b''
-        index = len(self.data)
-        self.data += char
-        return char, index
+    def read_messages_io(
+        self, binary_io: _RawIo
+    ) -> Iterator[Tuple[bool, bytes]]:
+        """Reads prefixed messages from a byte stream (BinaryIO object).
 
-    def read_messages(self, binary_fd: _RawIo) -> Iterator[Tuple[bool, bytes]]:
-        """Parses prefixed messages; yields (is_message, contents) chunks."""
-        message_start = None
+        Reads until EOF. If the stream is nonblocking (``read(1)`` returns
+        ``None``), then this function returns and may be called again with the
+        same IO object to continue parsing. Partial messages are preserved
+        between calls.
 
-        while True:
-            # This reads the file character-by-character. Non-message characters
-            # are yielded right away; message characters are grouped.
-            char, index = self._read_next(binary_fd)
-
-            # If in a message, keep reading until the message completes.
-            if message_start is not None:
-                if char in self._message_bytes:
-                    continue
-
-                yield True, self.data[message_start:index]
-                message_start = None
-
-            # Handle a non-message character.
-            if not char:
+        Yields:
+            ``(is_message, contents)`` chunks.
+        """
+        # The read may block indefinitely, depending on the IO object.
+        while (read_byte := binary_io.read(1)) != b'':
+            # Handle non-blocking IO by returning when no bytes are available.
+            if read_byte is None:
                 return
 
-            if char == self._prefix:
-                message_start = index
-            else:
-                yield False, char
+            for byte in read_byte:
+                yield from self._handle_byte(byte)
 
-    def transform(
-        self, binary_fd: _RawIo, transform: Callable[[bytes], bytes]
+            if self._state is self._State.NON_MESSAGE:  # yield non-message byte
+                yield from self._flush()
+
+        yield from self._flush()  # Always flush after EOF
+        self._state = self._State.NON_MESSAGE
+
+    def read_messages(
+        self, chunk: bytes, *, flush: bool = False
+    ) -> Iterator[Tuple[bool, bytes]]:
+        """Reads prefixed messages from a byte string.
+
+        This function may be called repeatedly with chunks of a stream. Partial
+        messages are preserved between calls, unless ``flush=True``.
+
+        Args:
+            chunk: byte string that may contain nested messagses
+            flush: whether to flush any incomplete messages after processing
+                this chunk
+
+        Yields:
+            ``(is_message, contents)`` chunks.
+        """
+        for byte in chunk:
+            yield from self._handle_byte(byte)
+
+        if flush or self._state is self._State.NON_MESSAGE:
+            yield from self._flush()
+
+    def _handle_byte(self, byte: int) -> Iterator[Tuple[bool, bytes]]:
+        if self._state is self._State.MESSAGE:
+            if byte not in self._message_bytes:
+                yield from self._flush()
+                if byte != self._prefix:
+                    self._state = self._State.NON_MESSAGE
+        elif self._state is self._State.NON_MESSAGE:
+            if byte == self._prefix:
+                yield from self._flush()
+                self._state = self._State.MESSAGE
+        else:
+            raise NotImplementedError(f'Unsupported state: {self._state}')
+
+        self._buffer.append(byte)
+
+    def _flush(self) -> Iterator[Tuple[bool, bytes]]:
+        data = bytes(self._buffer)
+        self._buffer.clear()
+        if data:
+            yield self._state is self._State.MESSAGE, data
+
+    def transform_io(
+        self,
+        binary_io: _RawIo,
+        transform: Callable[[bytes], bytes],
     ) -> Iterator[bytes]:
         """Yields the file with a transformation applied to the messages."""
-        for is_message, chunk in self.read_messages(binary_fd):
+        for is_message, chunk in self.read_messages_io(binary_io):
             yield transform(chunk) if is_message else chunk
 
+    def transform(
+        self,
+        chunk: bytes,
+        transform: Callable[[bytes], bytes],
+        *,
+        flush: bool = False,
+    ) -> bytes:
+        """Yields the chunk with a transformation applied to the messages.
+
+        Partial messages are preserved between calls unless ``flush=True``.
+        """
+        return b''.join(
+            transform(data) if is_message else data
+            for is_message, data in self.read_messages(chunk, flush=flush)
+        )
+
 
 def _base64_message_regex(prefix: bytes) -> Pattern[bytes]:
     """Returns a regular expression for prefixed base64 tokenized strings."""
@@ -492,7 +582,10 @@
     prefix: Union[str, bytes] = BASE64_PREFIX,
     recursion: int = DEFAULT_RECURSION,
 ) -> bytes:
-    """Alias for detokenizer.detokenize_base64 for backwards compatibility."""
+    """Alias for detokenizer.detokenize_base64 for backwards compatibility.
+
+    This function is deprecated; do not call it.
+    """
     return detokenizer.detokenize_base64(data, prefix, recursion)
 
 
diff --git a/pw_tokenizer/py/pw_tokenizer/tokens.py b/pw_tokenizer/py/pw_tokenizer/tokens.py
index b7ebac8..ba40fe1 100644
--- a/pw_tokenizer/py/pw_tokenizer/tokens.py
+++ b/pw_tokenizer/py/pw_tokenizer/tokens.py
@@ -607,12 +607,12 @@
 
 # The suffix used for CSV files in a directory database.
 DIR_DB_SUFFIX = '.pw_tokenizer.csv'
-_DIR_DB_GLOB = '*' + DIR_DB_SUFFIX
+DIR_DB_GLOB = '*' + DIR_DB_SUFFIX
 
 
 def _parse_directory(directory: Path) -> Iterable[TokenizedStringEntry]:
     """Parses TokenizedStringEntries tokenizer CSV files in the directory."""
-    for path in directory.glob(_DIR_DB_GLOB):
+    for path in directory.glob(DIR_DB_GLOB):
         yield from _CSVDatabase(path).entries()
 
 
@@ -633,7 +633,7 @@
                 write_csv(self, fd)
 
             # Delete all CSV files except for the new CSV with everything.
-            for csv_file in self.path.glob(_DIR_DB_GLOB):
+            for csv_file in self.path.glob(DIR_DB_GLOB):
                 if csv_file != new_file:
                     csv_file.unlink()
         else:
@@ -648,7 +648,7 @@
         """Returns a list of files from a Git command, filtered to matc."""
         try:
             output = subprocess.run(
-                ['git', *commands, _DIR_DB_GLOB],
+                ['git', *commands, DIR_DB_GLOB],
                 capture_output=True,
                 check=True,
                 cwd=self.path,
diff --git a/pw_tokenizer/rust/BUILD.bazel b/pw_tokenizer/rust/BUILD.bazel
index ca9b46a..bdaefed 100644
--- a/pw_tokenizer/rust/BUILD.bazel
+++ b/pw_tokenizer/rust/BUILD.bazel
@@ -12,7 +12,28 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-load("@rules_rust//rust:defs.bzl", "rust_doc", "rust_doc_test", "rust_library", "rust_test")
+load("@rules_rust//rust:defs.bzl", "rust_doc", "rust_doc_test", "rust_library", "rust_proc_macro", "rust_test")
+
+rust_proc_macro(
+    name = "pw_tokenizer_macro",
+    srcs = [
+        "pw_tokenizer_macro.rs",
+    ],
+    visibility = ["//visibility:public"],
+    deps = [
+        ":pw_tokenizer_core",
+        ":pw_tokenizer_printf",
+        "//pw_status/rust:pw_status",
+        "@rust_crates//:proc-macro2",
+        "@rust_crates//:quote",
+        "@rust_crates//:syn",
+    ],
+)
+
+rust_test(
+    name = "pw_tokenizer_macro_test",
+    crate = ":pw_tokenizer_macro",
+)
 
 rust_library(
     name = "pw_tokenizer_core",
@@ -65,3 +86,33 @@
     name = "pw_tokenizer_printf_doc",
     crate = ":pw_tokenizer_printf",
 )
+
+rust_library(
+    name = "pw_tokenizer",
+    srcs = [
+        "pw_tokenizer/internal.rs",
+        "pw_tokenizer/lib.rs",
+    ],
+    proc_macro_deps = [":pw_tokenizer_macro"],
+    visibility = ["//visibility:public"],
+    deps = [
+        ":pw_tokenizer_core",  # Added for rustdoc linking support.
+        "//pw_status/rust:pw_status",
+        "//pw_stream/rust:pw_stream",
+    ],
+)
+
+rust_test(
+    name = "pw_tokenizer_test",
+    crate = ":pw_tokenizer",
+)
+
+rust_doc_test(
+    name = "pw_tokenizer_doc_test",
+    crate = ":pw_tokenizer",
+)
+
+rust_doc(
+    name = "pw_tokenizer_doc",
+    crate = ":pw_tokenizer",
+)
diff --git a/pw_tokenizer/rust/pw_tokenizer/internal.rs b/pw_tokenizer/rust/pw_tokenizer/internal.rs
new file mode 100644
index 0000000..4b798b9
--- /dev/null
+++ b/pw_tokenizer/rust/pw_tokenizer/internal.rs
@@ -0,0 +1,79 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+use core::cmp::min;
+
+use pw_status::Result;
+use pw_stream::{Cursor, Write};
+
+pub fn encode_string(cursor: &mut Cursor<&mut [u8]>, value: &str) -> Result<()> {
+    const MAX_STRING_LENGTH: usize = 0x7f;
+
+    let string_bytes = value.as_bytes();
+
+    // Limit the encoding to the lesser of 127 or the available space in the buffer.
+    let max_len = min(MAX_STRING_LENGTH, cursor.remaining() - 1);
+    let overflow = max_len < string_bytes.len();
+    let len = min(max_len, string_bytes.len());
+
+    // First byte of an encoded string is it's length.
+    let mut header = len as u8;
+
+    // The high bit of the first byte is used to indicate if the string was
+    // truncated.
+    if overflow {
+        header |= 0x80;
+    }
+    cursor.write_all(&[header as u8])?;
+
+    cursor.write_all(&string_bytes[..len])
+}
+
+#[cfg(test)]
+mod test {
+    use pw_stream::{Cursor, Seek};
+
+    use super::encode_string;
+
+    fn do_string_encode_test<const BUFFER_LEN: usize>(value: &str, expected: &[u8]) {
+        let mut buffer = [0u8; BUFFER_LEN];
+        let mut cursor = Cursor::new(&mut buffer[..]);
+        encode_string(&mut cursor, value).unwrap();
+
+        let len = cursor.stream_position().unwrap() as usize;
+        let buffer = cursor.into_inner();
+
+        assert_eq!(len, expected.len());
+        assert_eq!(&buffer[..len], expected);
+    }
+
+    #[test]
+    fn test_string_encode() {
+        do_string_encode_test::<64>("test", b"\x04test");
+        do_string_encode_test::<4>("test", b"\x83tes");
+        do_string_encode_test::<1>("test", b"\x80");
+
+        // Truncates when the string does not fit.
+        do_string_encode_test::<64>(
+            "testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttest",
+            b"\xbftesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttes",
+        );
+
+        // Truncates when string is over 127 bytes.
+        do_string_encode_test::<1024>(
+            "testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest",
+            b"\xfftesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttes",
+        );
+    }
+}
diff --git a/pw_tokenizer/rust/pw_tokenizer/lib.rs b/pw_tokenizer/rust/pw_tokenizer/lib.rs
new file mode 100644
index 0000000..1700fdd
--- /dev/null
+++ b/pw_tokenizer/rust/pw_tokenizer/lib.rs
@@ -0,0 +1,267 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+//! `pw_tokenizer` - Efficient string handling and printf style encoding.
+//!
+//! Logging is critical, but developers are often forced to choose between
+//! additional logging or saving crucial flash space. The `pw_tokenizer` crate
+//! helps address this by replacing printf-style strings with binary tokens
+//! during compilation. This enables extensive logging with substantially less
+//! memory usage.
+//!
+//! For a more in depth explanation of the systems design and motivations,
+//! see [Pigweed's pw_tokenizer module documentation](https://pigweed.dev/pw_tokenizer/).
+//!
+//! # Example
+//!
+//! ```
+//! use pw_tokenizer::tokenize_to_buffer;
+//!
+//! # fn doctest() -> pw_status::Result<()> {
+//! let mut buffer = [0u8; 1024];
+//! let len = tokenize_to_buffer!(&mut buffer, "The answer is %d", 42)?;
+//!
+//! // 4 bytes used to encode the token and one to encode the value 42.  This
+//! // is a **3.5x** reduction in size compared to the raw string!
+//! assert_eq!(len, 5);
+//! # Ok(())
+//! # }
+//! # doctest().unwrap();
+//! ```
+
+#![no_std]
+#![deny(missing_docs)]
+
+#[doc(hidden)]
+pub mod internal;
+
+#[doc(hidden)]
+// Creating a __private namespace allows us a way to get to the modules
+// we need from macros by doing:
+//     use $crate::__private as __pw_tokenizer_crate;
+//
+// This is how proc macro generated code can reliably reference back to
+// `pw_tokenizer` while still allowing a user to import it under a different
+// name.
+pub mod __private {
+    pub use crate::*;
+    pub use pw_stream::{Cursor, Seek, WriteInteger, WriteVarint};
+    pub use pw_tokenizer_macro::{_token, _tokenize_to_buffer};
+}
+
+/// Return the [`u32`] token for the specified string and add it to the token
+/// database.
+///
+/// This is where the magic happens in `pw_tokenizer`!   ... and by magic
+/// we mean hiding information in a special linker section that ends up in the
+/// final elf binary but does not get flashed to the device.
+///
+/// Two things are accomplished here:
+/// 1) The string is hashed into its stable `u32` token.  This is the value that
+///    is returned from the macro.
+/// 2) A [token database entry](https://pigweed.dev/pw_tokenizer/design.html#binary-database-format)
+///   is generated, assigned to a unique static symbol, placed in a linker
+///   section named `pw_tokenizer.entries.<TOKEN_HASH>`.  A
+///   [linker script](https://pigweed.googlesource.com/pigweed/pigweed/+/refs/heads/main/pw_tokenizer/pw_tokenizer_linker_sections.ld)
+///   is responsible for picking these symbols up and aggregating them into a
+///   single `.pw_tokenizer.entries` section in the final binary.
+///
+/// # Example
+/// ```
+/// use pw_tokenizer::token;
+///
+/// let token = token!("hello, \"world\"");
+/// assert_eq!(token, 3537412730);
+/// ```
+///
+/// Currently there is no support for encoding tokens to specific domains
+/// or with "fixed lengths" per [`pw_tokenizer_core::hash_bytes_fixed`].
+#[macro_export]
+macro_rules! token {
+    ($string:literal) => {{
+        $crate::__private::_token!($string)
+    }};
+}
+
+/// Tokenize a format string and arguments to an [`AsMut<u8>`] buffer and add
+/// the format string's token to the token database.
+///
+/// See [`token`] for an explanation on how strings are tokenized and entries
+/// are added to the token database.
+///
+/// Returns a [`pw_status::Result<usize>`] the number of bytes written to the buffer.
+///
+/// # Errors
+/// - [`pw_status::Error::OutOfRange`] - Buffer is not large enough to fit
+///   tokenized data.
+/// - [`pw_status::Error::InvalidArgument`] - Invalid buffer was provided.
+///
+/// # Example
+///
+/// ```
+/// use pw_tokenizer::tokenize_to_buffer;
+///
+/// # fn doctest() -> pw_status::Result<()> {
+/// let mut buffer = [0u8; 1024];
+/// let len = tokenize_to_buffer!(&mut buffer, "The answer is %d", 42)?;
+///
+/// // 4 bytes used to encode the token and one to encode the value 42.
+/// assert_eq!(len, 5);
+/// # Ok(())
+/// # }
+/// # doctest().unwrap();
+/// ```
+#[macro_export]
+macro_rules! tokenize_to_buffer {
+    ($buffer:expr, $format_string:literal) => {{
+      use $crate::__private as __pw_tokenizer_crate;
+      __pw_tokenizer_crate::_tokenize_to_buffer!($buffer, $format_string)
+    }};
+
+    ($buffer:expr, $format_string:literal, $($args:expr),*) => {{
+      use $crate::__private as __pw_tokenizer_crate;
+      __pw_tokenizer_crate::_tokenize_to_buffer!($buffer, $format_string, $($args),*)
+    }};
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    extern crate self as pw_tokenizer;
+
+    // This is not meant to be an exhaustive test of tokenization which is
+    // covered by `pw_tokenizer_core`'s unit tests.  Rather, this is testing
+    // that the `tokenize!` macro connects to that correctly.
+    #[test]
+    fn test_token() {}
+
+    macro_rules! tokenize_to_buffer_test {
+      ($expected_data:expr, $buffer_len:expr, $fmt:expr) => {
+        {
+          let mut orig_buffer = [0u8; $buffer_len];
+          let buffer =
+              tokenize_to_buffer!(&mut orig_buffer, $fmt).unwrap();
+            let len = buffer.len();
+            assert_eq!(
+              &orig_buffer[..(($buffer_len) - len)],
+              $expected_data,
+          );
+        }
+      };
+
+      ($expected_data:expr, $buffer_len:expr, $fmt:expr, $($args:expr),*) => {
+        {
+          let mut buffer = [0u8; $buffer_len];
+          let len = tokenize_to_buffer!(&mut buffer, $fmt, $($args),*).unwrap();
+          assert_eq!(
+              &buffer[..len],
+              $expected_data,
+          );
+        }
+      };
+    }
+
+    #[test]
+    fn test_decimal_format() {
+        tokenize_to_buffer_test!(
+            &[0x52, 0x1c, 0xb0, 0x4c, 0x2], // expected buffer
+            64,                             // buffer size
+            "The answer is %d!",
+            1
+        );
+
+        tokenize_to_buffer_test!(
+            &[0x36, 0xd0, 0xfb, 0x69, 0x1], // expected buffer
+            64,                             // buffer size
+            "No! The answer is %d!",
+            -1
+        );
+
+        tokenize_to_buffer_test!(
+            &[0xa4, 0xad, 0x50, 0x54, 0x0], // expected buffer
+            64,                             // buffer size
+            "I think you'll find that the answer is %d!",
+            0
+        );
+    }
+
+    #[test]
+    fn test_misc_integer_format() {
+        // %d, %i, %o, %u, %x, %X all encode integers the same.
+        tokenize_to_buffer_test!(
+            &[0x57, 0x88, 0xc5, 0xd8, 0x2], // expected buffer
+            64,                             // buffer size
+            "The answer is %i!",
+            1
+        );
+
+        tokenize_to_buffer_test!(
+            &[0x5d, 0x70, 0x12, 0xb4, 0x2], // expected buffer
+            64,                             // buffer size
+            "The answer is %o!",
+            1
+        );
+
+        tokenize_to_buffer_test!(
+            &[0x63, 0x58, 0x5f, 0x8f, 0x2], // expected buffer
+            64,                             // buffer size
+            "The answer is %u!",
+            1
+        );
+
+        tokenize_to_buffer_test!(
+            &[0x66, 0xcc, 0x05, 0x7d, 0x2], // expected buffer
+            64,                             // buffer size
+            "The answer is %x!",
+            1
+        );
+
+        tokenize_to_buffer_test!(
+            &[0x46, 0x4c, 0x16, 0x96, 0x2], // expected buffer
+            64,                             // buffer size
+            "The answer is %X!",
+            1
+        );
+    }
+
+    #[test]
+    fn test_string_format() {
+        tokenize_to_buffer_test!(
+            b"\x25\xf6\x2e\x66\x07Pigweed", // expected buffer
+            64,                             // buffer size
+            "Hello: %s!",
+            "Pigweed"
+        );
+    }
+
+    #[test]
+    fn test_string_format_overflow() {
+        tokenize_to_buffer_test!(
+            b"\x25\xf6\x2e\x66\x83Pig", // expected buffer
+            8,                          // buffer size
+            "Hello: %s!",
+            "Pigweed"
+        );
+    }
+
+    #[test]
+    fn test_char_format() {
+        tokenize_to_buffer_test!(
+            &[0x2e, 0x52, 0xac, 0xe4, 0x50], // expected buffer
+            64,                              // buffer size
+            "Hello: %cigweed",
+            "P".as_bytes()[0]
+        );
+    }
+}
diff --git a/pw_tokenizer/rust/pw_tokenizer_core.rs b/pw_tokenizer/rust/pw_tokenizer_core.rs
index 09d11fb..c8e5504 100644
--- a/pw_tokenizer/rust/pw_tokenizer_core.rs
+++ b/pw_tokenizer/rust/pw_tokenizer_core.rs
@@ -35,8 +35,7 @@
     hash_bytes_fixed(bytes, bytes.len())
 }
 
-/// Calculate the hash for a sequence of bytes, truncating to a fixed length
-/// if necessary.
+/// Calculate the hash for a sequence of bytes, examining at most `len` bytes.
 ///
 /// ```
 /// use pw_tokenizer_core::hash_bytes_fixed;
diff --git a/pw_tokenizer/rust/pw_tokenizer_macro.rs b/pw_tokenizer/rust/pw_tokenizer_macro.rs
new file mode 100644
index 0000000..d22a10e
--- /dev/null
+++ b/pw_tokenizer/rust/pw_tokenizer_macro.rs
@@ -0,0 +1,317 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// This proc macro crate is a private API for the `pw_tokenizer` crate.
+#![doc(hidden)]
+
+use std::collections::VecDeque;
+use std::ffi::CString;
+
+use proc_macro::TokenStream;
+use quote::{format_ident, quote, ToTokens};
+use syn::{
+    parse::{Parse, ParseStream},
+    parse_macro_input,
+    punctuated::Punctuated,
+    Expr, LitStr, Token,
+};
+
+use pw_tokenizer_core::{hash_string, TOKENIZER_ENTRY_MAGIC};
+use pw_tokenizer_printf as printf;
+
+type TokenStream2 = proc_macro2::TokenStream;
+
+struct Error {
+    text: String,
+}
+
+impl Error {
+    fn new(text: &str) -> Self {
+        Self {
+            text: text.to_string(),
+        }
+    }
+}
+
+type Result<T> = core::result::Result<T, Error>;
+
+// Handles tokenizing (hashing) `string` and adding it to the token database
+// with the specified `domain`.  A detailed description of what's happening is
+// found in the docs for [`pw_tokenizer::token`] macro.
+fn token_backend(domain: &str, string: &str) -> TokenStream2 {
+    let hash = hash_string(string);
+
+    // Line number is omitted as getting that info requires an experimental API:
+    // https://doc.rust-lang.org/proc_macro/struct.Span.html#method.start
+    let ident = format_ident!("_pw_tokenizer_string_entry_{:08X}", hash);
+
+    // pw_tokenizer is intended for use with ELF files only. Mach-O files (macOS
+    // executables) do not support section names longer than 16 characters, so a
+    // short, unused section name is used on macOS.
+    let section = if cfg!(target_os = "macos") {
+        ",pw,".to_string()
+    } else {
+        format!(".pw_tokenizer.entries.{:08X}", hash)
+    };
+
+    let string = CString::new(string).unwrap();
+    let string_bytes = string.as_bytes_with_nul();
+    let string_bytes_len = string_bytes.len();
+
+    let domain = CString::new(domain).unwrap();
+    let domain_bytes = domain.as_bytes_with_nul();
+    let domain_bytes_len = domain_bytes.len();
+
+    quote! {
+        // Use an inner scope to avoid identifier collision.  Name mangling
+        // will disambiguate these in the symbol table.
+        {
+            #[repr(C, packed(1))]
+            struct TokenEntry {
+                magic: u32,
+                token: u32,
+                domain_size: u32,
+                string_length: u32,
+                domain: [u8; #domain_bytes_len],
+                string: [u8; #string_bytes_len],
+            };
+            // This is currently manually verified to be correct.
+            // TODO: b/287132907 - Add integration tests for token database.
+            #[link_section = #section ]
+            static #ident: TokenEntry = TokenEntry {
+                magic: #TOKENIZER_ENTRY_MAGIC,
+                token: #hash,
+                domain_size: #domain_bytes_len as u32,
+                string_length: #string_bytes_len as u32,
+                domain: [ #(#domain_bytes),* ],
+                string: [ #(#string_bytes),* ],
+            };
+
+            #hash
+        }
+    }
+}
+
+// Documented in `pw_tokenizer::token`.
+#[proc_macro]
+pub fn _token(tokens: TokenStream) -> TokenStream {
+    let input = parse_macro_input!(tokens as LitStr);
+    token_backend("", &input.value()).into()
+}
+
+// Args to tokenize to buffer that are parsed according to the pattern:
+//   ($buffer:expr, $format_string:literal, $($args:expr),*)
+#[derive(Debug)]
+struct TokenizeToBuffer {
+    buffer: Expr,
+    format_string: LitStr,
+    args: VecDeque<Expr>,
+}
+
+impl Parse for TokenizeToBuffer {
+    fn parse(input: ParseStream) -> syn::parse::Result<Self> {
+        let buffer: Expr = input.parse()?;
+        input.parse::<Token![,]>()?;
+        let format_string: LitStr = input.parse()?;
+
+        let args = if input.is_empty() {
+            // If there are no more tokens, no arguments were specified.
+            VecDeque::new()
+        } else {
+            // Eat the `,` following the format string.
+            input.parse::<Token![,]>()?;
+
+            let punctuated = Punctuated::<Expr, Token![,]>::parse_terminated(input)?;
+            punctuated.into_iter().collect()
+        };
+
+        Ok(TokenizeToBuffer {
+            buffer,
+            format_string,
+            args,
+        })
+    }
+}
+
+// Grab the next argument returning a descriptive error if no more args are left.
+fn next_arg(spec: &printf::ConversionSpec, args: &mut VecDeque<Expr>) -> Result<Expr> {
+    args.pop_front()
+        .ok_or_else(|| Error::new(&format!("No argument given for {spec:?}")))
+}
+
+// Handle a single format conversion specifier (i.e. `%08x`).  Grabs the
+// necessary arguments for the specifier from `args` and generates code
+// to marshal the arguments into the buffer declared in `_tokenize_to_buffer`.
+// Returns an error if args is too short of if a format specifier is unsupported.
+fn handle_conversion(
+    spec: &printf::ConversionSpec,
+    args: &mut VecDeque<Expr>,
+) -> Result<TokenStream2> {
+    match spec.specifier {
+        printf::Specifier::Decimal
+        | printf::Specifier::Integer
+        | printf::Specifier::Octal
+        | printf::Specifier::Unsigned
+        | printf::Specifier::Hex
+        | printf::Specifier::UpperHex => {
+            // TODO: b/281862660 - Support Width::Variable and Precision::Variable.
+            if spec.min_field_width == printf::MinFieldWidth::Variable {
+                return Err(Error::new(
+                    "Variable width '*' integer formats are not supported.",
+                ));
+            }
+
+            if spec.precision == printf::Precision::Variable {
+                return Err(Error::new(
+                    "Variable precision '*' integer formats are not supported.",
+                ));
+            }
+
+            let arg = next_arg(spec, args)?;
+            let bits = match spec.length.unwrap_or(printf::Length::Long) {
+                printf::Length::Char => 8,
+                printf::Length::Short => 16,
+                printf::Length::Long => 32,
+                printf::Length::LongLong => 64,
+                printf::Length::IntMax => 64,
+                printf::Length::Size => 32,
+                printf::Length::PointerDiff => 32,
+                printf::Length::LongDouble => {
+                    return Err(Error::new(
+                        "Long double length parameter invalid for integer formats",
+                    ))
+                }
+            };
+            let ty = format_ident!("i{bits}");
+            Ok(quote! {
+              // pw_tokenizer always uses signed packing for all integers.
+              cursor.write_signed_varint(#ty::from(#arg) as i64)?;
+            })
+        }
+        printf::Specifier::String => {
+            // TODO: b/281862660 - Support Width::Variable and Precision::Variable.
+            if spec.min_field_width == printf::MinFieldWidth::Variable {
+                return Err(Error::new(
+                    "Variable width '*' string formats are not supported.",
+                ));
+            }
+
+            if spec.precision == printf::Precision::Variable {
+                return Err(Error::new(
+                    "Variable precision '*' string formats are not supported.",
+                ));
+            }
+
+            let arg = next_arg(spec, args)?;
+            Ok(quote! {
+              let mut buffer = __pw_tokenizer_crate::internal::encode_string(&mut cursor, #arg)?;
+            })
+        }
+        printf::Specifier::Char => {
+            let arg = next_arg(spec, args)?;
+            Ok(quote! {
+              cursor.write_u8_le(&u8::from(#arg))?;
+            })
+        }
+
+        printf::Specifier::Double
+        | printf::Specifier::UpperDouble
+        | printf::Specifier::Exponential
+        | printf::Specifier::UpperExponential
+        | printf::Specifier::SmallDouble
+        | printf::Specifier::UpperSmallDouble => {
+            // TODO: b/281862328 - Support floating point numbers.
+            Err(Error::new("Floating point numbers are not supported."))
+        }
+
+        // TODO: b/281862333 - Support pointers.
+        printf::Specifier::Pointer => Err(Error::new("Pointer types are not supported.")),
+    }
+}
+
+// Generates code to marshal a tokenized string and arguments into a buffer.
+// See [`pw_tokenizer::tokenize_to_buffer`] for details on behavior.
+//
+// Internally the [`AsMut<u8>`] is wrapped in a [`pw_stream::Cursor`] to
+// fill the buffer incrementally.
+#[proc_macro]
+pub fn _tokenize_to_buffer(tokens: TokenStream) -> TokenStream {
+    let input = parse_macro_input!(tokens as TokenizeToBuffer);
+    let token = token_backend("", &input.format_string.value());
+    let buffer = input.buffer;
+
+    let format_string = input.format_string.value();
+
+    let format = match printf::FormatString::parse(&format_string) {
+        Ok(format) => format,
+        Err(e) => {
+            return syn::Error::new_spanned(
+                input.format_string.to_token_stream(),
+                format!("Error parsing format string {e}"),
+            )
+            .to_compile_error()
+            .into()
+        }
+    };
+    let mut args = input.args;
+    let mut arg_encodings = Vec::new();
+
+    let mut errors = Vec::new();
+
+    for fragment in format.fragments {
+        if let printf::FormatFragment::Conversion(spec) = fragment {
+            match handle_conversion(&spec, &mut args) {
+                Ok(encoding) => arg_encodings.push(encoding),
+                Err(e) => errors.push(syn::Error::new_spanned(
+                    input.format_string.to_token_stream(),
+                    e.text,
+                )),
+            }
+        }
+    }
+
+    if !errors.is_empty() {
+        return errors
+            .into_iter()
+            .reduce(|mut accumulated_errors, error| {
+                accumulated_errors.combine(error);
+                accumulated_errors
+            })
+            .expect("errors should not be empty")
+            .to_compile_error()
+            .into();
+    }
+
+    let code = quote! {
+      {
+        // Wrapping code in an internal function to allow `?` to work in
+        // functions that don't return Results.
+        fn _pw_tokenizer_internal_encode(buffer: &mut [u8], token: u32) -> pw_status::Result<usize> {
+          // use pw_tokenizer's private re-export of these pw_stream bits to
+          // allow referencing with needing `pw_stream` in scope.
+          use __pw_tokenizer_crate::{Cursor, Seek, WriteInteger, WriteVarint};
+          let mut cursor = Cursor::new(buffer);
+          cursor.write_u32_le(&token)?;
+          #(#arg_encodings);*;
+          Ok(cursor.stream_position()? as usize)
+        }
+        _pw_tokenizer_internal_encode(#buffer, #token)
+      }
+    };
+    code.into()
+}
+
+// Macros tested in `pw_tokenizer` crate.
+#[cfg(test)]
+mod tests {}
diff --git a/pw_tokenizer/token_databases.rst b/pw_tokenizer/token_databases.rst
new file mode 100644
index 0000000..dbd9651
--- /dev/null
+++ b/pw_tokenizer/token_databases.rst
@@ -0,0 +1,310 @@
+.. _module-pw_tokenizer-token-databases:
+
+===============
+Token databases
+===============
+.. pigweed-module-subpage::
+   :name: pw_tokenizer
+   :tagline: Compress strings to shrink logs by +75%
+
+Token databases store a mapping of tokens to the strings they represent. An ELF
+file can be used as a token database, but it only contains the strings for its
+exact build. A token database file aggregates tokens from multiple ELF files, so
+that a single database can decode tokenized strings from any known ELF.
+
+Token databases contain the token, removal date (if any), and string for each
+tokenized string.
+
+----------------------
+Token database formats
+----------------------
+Three token database formats are supported: CSV, binary, and directory. Tokens
+may also be read from ELF files or ``.a`` archives, but cannot be written to
+these formats.
+
+CSV database format
+===================
+The CSV database format has three columns: the token in hexadecimal, the removal
+date (if any) in year-month-day format, and the string literal, surrounded by
+quotes. Quote characters within the string are represented as two quote
+characters.
+
+This example database contains six strings, three of which have removal dates.
+
+.. code-block::
+
+   141c35d5,          ,"The answer: ""%s"""
+   2e668cd6,2019-12-25,"Jello, world!"
+   7b940e2a,          ,"Hello %s! %hd %e"
+   851beeb6,          ,"%u %d"
+   881436a0,2020-01-01,"The answer is: %s"
+   e13b0f94,2020-04-01,"%llu"
+
+Binary database format
+======================
+The binary database format is comprised of a 16-byte header followed by a series
+of 8-byte entries. Each entry stores the token and the removal date, which is
+0xFFFFFFFF if there is none. The string literals are stored next in the same
+order as the entries. Strings are stored with null terminators. See
+`token_database.h <https://pigweed.googlesource.com/pigweed/pigweed/+/HEAD/pw_tokenizer/public/pw_tokenizer/token_database.h>`_
+for full details.
+
+The binary form of the CSV database is shown below. It contains the same
+information, but in a more compact and easily processed form. It takes 141 B
+compared with the CSV database's 211 B.
+
+.. code-block:: text
+
+   [header]
+   0x00: 454b4f54 0000534e  TOKENS..
+   0x08: 00000006 00000000  ........
+
+   [entries]
+   0x10: 141c35d5 ffffffff  .5......
+   0x18: 2e668cd6 07e30c19  ..f.....
+   0x20: 7b940e2a ffffffff  *..{....
+   0x28: 851beeb6 ffffffff  ........
+   0x30: 881436a0 07e40101  .6......
+   0x38: e13b0f94 07e40401  ..;.....
+
+   [string table]
+   0x40: 54 68 65 20 61 6e 73 77 65 72 3a 20 22 25 73 22  The answer: "%s"
+   0x50: 00 4a 65 6c 6c 6f 2c 20 77 6f 72 6c 64 21 00 48  .Jello, world!.H
+   0x60: 65 6c 6c 6f 20 25 73 21 20 25 68 64 20 25 65 00  ello %s! %hd %e.
+   0x70: 25 75 20 25 64 00 54 68 65 20 61 6e 73 77 65 72  %u %d.The answer
+   0x80: 20 69 73 3a 20 25 73 00 25 6c 6c 75 00            is: %s.%llu.
+
+.. _module-pw_tokenizer-directory-database-format:
+
+Directory database format
+=========================
+pw_tokenizer can consume directories of CSV databases. A directory database
+will be searched recursively for files with a `.pw_tokenizer.csv` suffix, all
+of which will be used for subsequent detokenization lookups.
+
+An example directory database might look something like this:
+
+.. code-block:: text
+
+   token_database
+   ├── chuck_e_cheese.pw_tokenizer.csv
+   ├── fungi_ble.pw_tokenizer.csv
+   └── some_more
+       └── arcade.pw_tokenizer.csv
+
+This format is optimized for storage in a Git repository alongside source code.
+The token database commands randomly generate unique file names for the CSVs in
+the database to prevent merge conflicts. Running ``mark_removed`` or ``purge``
+commands in the database CLI consolidates the files to a single CSV.
+
+The database command line tool supports a ``--discard-temporary
+<upstream_commit>`` option for ``add``. In this mode, the tool attempts to
+discard temporary tokens. It identifies the latest CSV not present in the
+provided ``<upstream_commit>``, and tokens present that CSV that are not in the
+newly added tokens are discarded. This helps keep temporary tokens (e.g from
+debug logs) out of the database.
+
+JSON support
+============
+While pw_tokenizer doesn't specify a JSON database format, a token database can
+be created from a JSON formatted array of strings. This is useful for side-band
+token database generation for strings that are not embedded as parsable tokens
+in compiled binaries. See :ref:`module-pw_tokenizer-database-creation` for
+instructions on generating a token database from a JSON file.
+
+.. _module-pw_tokenizer-managing-token-databases:
+
+------------------------
+Managing token databases
+------------------------
+Token databases are managed with the ``database.py`` script. This script can be
+used to extract tokens from compilation artifacts and manage database files.
+Invoke ``database.py`` with ``-h`` for full usage information.
+
+An example ELF file with tokenized logs is provided at
+``pw_tokenizer/py/example_binary_with_tokenized_strings.elf``. You can use that
+file to experiment with the ``database.py`` commands.
+
+.. _module-pw_tokenizer-database-creation:
+
+Create a database
+=================
+The ``create`` command makes a new token database from ELF files (.elf, .o, .so,
+etc.), archives (.a), existing token databases (CSV or binary), or a JSON file
+containing an array of strings.
+
+.. code-block:: sh
+
+   ./database.py create --database DATABASE_NAME ELF_OR_DATABASE_FILE...
+
+Two database output formats are supported: CSV and binary. Provide
+``--type binary`` to ``create`` to generate a binary database instead of the
+default CSV. CSV databases are great for checking into a source control or for
+human review. Binary databases are more compact and simpler to parse. The C++
+detokenizer library only supports binary databases currently.
+
+.. _module-pw_tokenizer-update-token-database:
+
+Update a database
+=================
+As new tokenized strings are added, update the database with the ``add``
+command.
+
+.. code-block:: sh
+
+   ./database.py add --database DATABASE_NAME ELF_OR_DATABASE_FILE...
+
+This command adds new tokens from ELF files or other databases to the database.
+Adding tokens already present in the database updates the date removed, if any,
+to the latest.
+
+A CSV token database can be checked into a source repository and updated as code
+changes are made. The build system can invoke ``database.py`` to update the
+database after each build.
+
+GN integration
+==============
+Token databases may be updated or created as part of a GN build. The
+``pw_tokenizer_database`` template provided by
+``$dir_pw_tokenizer/database.gni`` automatically updates an in-source tokenized
+strings database or creates a new database with artifacts from one or more GN
+targets or other database files.
+
+To create a new database, set the ``create`` variable to the desired database
+type (``"csv"`` or ``"binary"``). The database will be created in the output
+directory. To update an existing database, provide the path to the database with
+the ``database`` variable.
+
+.. code-block::
+
+   import("//build_overrides/pigweed.gni")
+
+   import("$dir_pw_tokenizer/database.gni")
+
+   pw_tokenizer_database("my_database") {
+     database = "database_in_the_source_tree.csv"
+     targets = [ "//firmware/image:foo(//targets/my_board:some_toolchain)" ]
+     input_databases = [ "other_database.csv" ]
+   }
+
+Instead of specifying GN targets, paths or globs to output files may be provided
+with the ``paths`` option.
+
+.. code-block::
+
+   pw_tokenizer_database("my_database") {
+     database = "database_in_the_source_tree.csv"
+     deps = [ ":apps" ]
+     optional_paths = [ "$root_build_dir/**/*.elf" ]
+   }
+
+.. note::
+
+   The ``paths`` and ``optional_targets`` arguments do not add anything to
+   ``deps``, so there is no guarantee that the referenced artifacts will exist
+   when the database is updated. Provide ``targets`` or ``deps`` or build other
+   GN targets first if this is a concern.
+
+CMake integration
+=================
+Token databases may be updated or created as part of a CMake build. The
+``pw_tokenizer_database`` template provided by
+``$dir_pw_tokenizer/database.cmake`` automatically updates an in-source tokenized
+strings database or creates a new database with artifacts from a CMake target.
+
+To create a new database, set the ``CREATE`` variable to the desired database
+type (``"csv"`` or ``"binary"``). The database will be created in the output
+directory.
+
+.. code-block::
+
+   include("$dir_pw_tokenizer/database.cmake")
+
+   pw_tokenizer_database("my_database") {
+     CREATE binary
+     TARGET my_target.ext
+     DEPS ${deps_list}
+   }
+
+To update an existing database, provide the path to the database with
+the ``database`` variable.
+
+.. code-block::
+
+   pw_tokenizer_database("my_database") {
+     DATABASE database_in_the_source_tree.csv
+     TARGET my_target.ext
+     DEPS ${deps_list}
+   }
+
+.. _module-pw_tokenizer-collisions:
+
+----------------
+Token collisions
+----------------
+Tokens are calculated with a hash function. It is possible for different
+strings to hash to the same token. When this happens, multiple strings will have
+the same token in the database, and it may not be possible to unambiguously
+decode a token.
+
+The detokenization tools attempt to resolve collisions automatically. Collisions
+are resolved based on two things:
+
+- whether the tokenized data matches the strings arguments' (if any), and
+- if / when the string was marked as having been removed from the database.
+
+Resolving collisions
+====================
+Collisions may occur occasionally. Run the command
+``python -m pw_tokenizer.database report <database>`` to see information about a
+token database, including any collisions.
+
+If there are collisions, take the following steps to resolve them.
+
+- Change one of the colliding strings slightly to give it a new token.
+- In C (not C++), artificial collisions may occur if strings longer than
+  ``PW_TOKENIZER_CFG_C_HASH_LENGTH`` are hashed. If this is happening, consider
+  setting ``PW_TOKENIZER_CFG_C_HASH_LENGTH`` to a larger value.  See
+  ``pw_tokenizer/public/pw_tokenizer/config.h``.
+- Run the ``mark_removed`` command with the latest version of the build
+  artifacts to mark missing strings as removed. This deprioritizes them in
+  collision resolution.
+
+  .. code-block:: sh
+
+     python -m pw_tokenizer.database mark_removed --database <database> <ELF files>
+
+  The ``purge`` command may be used to delete these tokens from the database.
+
+Probability of collisions
+=========================
+Hashes of any size have a collision risk. The probability of one at least
+one collision occurring for a given number of strings is unintuitively high
+(this is known as the `birthday problem
+<https://en.wikipedia.org/wiki/Birthday_problem>`_). If fewer than 32 bits are
+used for tokens, the probability of collisions increases substantially.
+
+This table shows the approximate number of strings that can be hashed to have a
+1% or 50% probability of at least one collision (assuming a uniform, random
+hash).
+
++-------+---------------------------------------+
+| Token | Collision probability by string count |
+| bits  +--------------------+------------------+
+|       |         50%        |          1%      |
++=======+====================+==================+
+|   32  |       77000        |        9300      |
++-------+--------------------+------------------+
+|   31  |       54000        |        6600      |
++-------+--------------------+------------------+
+|   24  |        4800        |         580      |
++-------+--------------------+------------------+
+|   16  |         300        |          36      |
++-------+--------------------+------------------+
+|    8  |          19        |           3      |
++-------+--------------------+------------------+
+
+Keep this table in mind when masking tokens (see
+:ref:`module-pw_tokenizer-masks`). 16 bits might be acceptable when
+tokenizing a small set of strings, such as module names, but won't be suitable
+for large sets of strings, like log messages.
diff --git a/pw_tokenizer/tokenization.rst b/pw_tokenizer/tokenization.rst
new file mode 100644
index 0000000..b080cce
--- /dev/null
+++ b/pw_tokenizer/tokenization.rst
@@ -0,0 +1,690 @@
+:tocdepth: 3
+
+.. _module-pw_tokenizer-tokenization:
+
+============
+Tokenization
+============
+.. pigweed-module-subpage::
+   :name: pw_tokenizer
+   :tagline: Compress strings to shrink logs by +75%
+
+Tokenization converts a string literal to a token. If it's a printf-style
+string, its arguments are encoded along with it. The results of tokenization can
+be sent off device or stored in place of a full string.
+
+--------
+Concepts
+--------
+See :ref:`module-pw_tokenizer-get-started-overview` for a high-level
+explanation of how ``pw_tokenizer`` works.
+
+Token generation: fixed length hashing at compile time
+======================================================
+String tokens are generated using a modified version of the x65599 hash used by
+the SDBM project. All hashing is done at compile time.
+
+In C code, strings are hashed with a preprocessor macro. For compatibility with
+macros, the hash must be limited to a fixed maximum number of characters. This
+value is set by ``PW_TOKENIZER_CFG_C_HASH_LENGTH``. Increasing
+``PW_TOKENIZER_CFG_C_HASH_LENGTH`` increases the compilation time for C due to
+the complexity of the hashing macros.
+
+C++ macros use a constexpr function instead of a macro. This function works with
+any length of string and has lower compilation time impact than the C macros.
+For consistency, C++ tokenization uses the same hash algorithm, but the
+calculated values will differ between C and C++ for strings longer than
+``PW_TOKENIZER_CFG_C_HASH_LENGTH`` characters.
+
+Token encoding
+==============
+The token is a 32-bit hash calculated during compilation. The string is encoded
+little-endian with the token followed by arguments, if any. For example, the
+31-byte string ``You can go about your business.`` hashes to 0xdac9a244.
+This is encoded as 4 bytes: ``44 a2 c9 da``.
+
+Arguments are encoded as follows:
+
+* **Integers**  (1--10 bytes) --
+  `ZagZag and varint encoded <https://developers.google.com/protocol-buffers/docs/encoding#signed-integers>`_,
+  similarly to Protocol Buffers. Smaller values take fewer bytes.
+* **Floating point numbers** (4 bytes) -- Single precision floating point.
+* **Strings** (1--128 bytes) -- Length byte followed by the string contents.
+  The top bit of the length whether the string was truncated or not. The
+  remaining 7 bits encode the string length, with a maximum of 127 bytes.
+
+.. TODO(hepler): insert diagram here!
+
+.. tip::
+   ``%s`` arguments can quickly fill a tokenization buffer. Keep ``%s``
+   arguments short or avoid encoding them as strings (e.g. encode an enum as an
+   integer instead of a string). See also
+   :ref:`module-pw_tokenizer-tokenized-strings-as-args`.
+
+.. _module-pw_tokenizer-proto:
+
+Tokenized fields in protocol buffers
+====================================
+Text may be represented in a few different ways:
+
+- Plain ASCII or UTF-8 text (``This is plain text``)
+- Base64-encoded tokenized message (``$ibafcA==``)
+- Binary-encoded tokenized message (``89 b6 9f 70``)
+- Little-endian 32-bit integer token (``0x709fb689``)
+
+``pw_tokenizer`` provides the ``pw.tokenizer.format`` protobuf field option.
+This option may be applied to a protobuf field to indicate that it may contain a
+tokenized string. A string that is optionally tokenized is represented with a
+single ``bytes`` field annotated with ``(pw.tokenizer.format) =
+TOKENIZATION_OPTIONAL``.
+
+For example, the following protobuf has one field that may contain a tokenized
+string.
+
+.. code-block:: protobuf
+
+  message MessageWithOptionallyTokenizedField {
+    bytes just_bytes = 1;
+    bytes maybe_tokenized = 2 [(pw.tokenizer.format) = TOKENIZATION_OPTIONAL];
+    string just_text = 3;
+  }
+
+-----------------------
+Tokenization in C++ / C
+-----------------------
+To tokenize a string, include ``pw_tokenizer/tokenize.h`` and invoke one of the
+``PW_TOKENIZE_*`` macros.
+
+Tokenize string literals outside of expressions
+===============================================
+``pw_tokenizer`` provides macros for tokenizing string literals with no
+arguments:
+
+* :c:macro:`PW_TOKENIZE_STRING`
+* :c:macro:`PW_TOKENIZE_STRING_DOMAIN`
+* :c:macro:`PW_TOKENIZE_STRING_MASK`
+
+The tokenization macros above cannot be used inside other expressions.
+
+.. admonition:: **Yes**: Assign :c:macro:`PW_TOKENIZE_STRING` to a ``constexpr`` variable.
+  :class: checkmark
+
+  .. code-block:: cpp
+
+    constexpr uint32_t kGlobalToken = PW_TOKENIZE_STRING("Wowee Zowee!");
+
+    void Function() {
+      constexpr uint32_t local_token = PW_TOKENIZE_STRING("Wowee Zowee?");
+    }
+
+.. admonition:: **No**: Use :c:macro:`PW_TOKENIZE_STRING` in another expression.
+  :class: error
+
+  .. code-block:: cpp
+
+   void BadExample() {
+     ProcessToken(PW_TOKENIZE_STRING("This won't compile!"));
+   }
+
+  Use :c:macro:`PW_TOKENIZE_STRING_EXPR` instead.
+
+Tokenize inside expressions
+===========================
+An alternate set of macros are provided for use inside expressions. These make
+use of lambda functions, so while they can be used inside expressions, they
+require C++ and cannot be assigned to constexpr variables or be used with
+special function variables like ``__func__``.
+
+* :c:macro:`PW_TOKENIZE_STRING_EXPR`
+* :c:macro:`PW_TOKENIZE_STRING_DOMAIN_EXPR`
+* :c:macro:`PW_TOKENIZE_STRING_MASK_EXPR`
+
+.. admonition:: When to use these macros
+
+  Use :c:macro:`PW_TOKENIZE_STRING` and related macros to tokenize string
+  literals that do not need %-style arguments encoded.
+
+.. admonition:: **Yes**: Use :c:macro:`PW_TOKENIZE_STRING_EXPR` within other expressions.
+  :class: checkmark
+
+  .. code-block:: cpp
+
+    void GoodExample() {
+      ProcessToken(PW_TOKENIZE_STRING_EXPR("This will compile!"));
+    }
+
+.. admonition:: **No**: Assign :c:macro:`PW_TOKENIZE_STRING_EXPR` to a ``constexpr`` variable.
+  :class: error
+
+  .. code-block:: cpp
+
+     constexpr uint32_t wont_work = PW_TOKENIZE_STRING_EXPR("This won't compile!"));
+
+  Instead, use :c:macro:`PW_TOKENIZE_STRING` to assign to a ``constexpr`` variable.
+
+.. admonition:: **No**: Tokenize ``__func__`` in :c:macro:`PW_TOKENIZE_STRING_EXPR`.
+  :class: error
+
+  .. code-block:: cpp
+
+    void BadExample() {
+      // This compiles, but __func__ will not be the outer function's name, and
+      // there may be compiler warnings.
+      constexpr uint32_t wont_work = PW_TOKENIZE_STRING_EXPR(__func__);
+    }
+
+  Instead, use :c:macro:`PW_TOKENIZE_STRING` to tokenize ``__func__`` or similar macros.
+
+Tokenize a message with arguments to a buffer
+=============================================
+* :c:macro:`PW_TOKENIZE_TO_BUFFER`
+* :c:macro:`PW_TOKENIZE_TO_BUFFER_DOMAIN`
+* :c:macro:`PW_TOKENIZE_TO_BUFFER_MASK`
+
+.. admonition:: Why use this macro
+
+   - Encode a tokenized message for consumption within a function.
+   - Encode a tokenized message into an existing buffer.
+
+   Avoid using ``PW_TOKENIZE_TO_BUFFER`` in widely expanded macros, such as a
+   logging macro, because it will result in larger code size than passing the
+   tokenized data to a function.
+
+.. _module-pw_tokenizer-custom-macro:
+
+Tokenize a message with arguments in a custom macro
+===================================================
+Projects can leverage the tokenization machinery in whichever way best suits
+their needs. The most efficient way to use ``pw_tokenizer`` is to pass tokenized
+data to a global handler function. A project's custom tokenization macro can
+handle tokenized data in a function of their choosing. The function may accept
+any arguments, but its final arguments must be:
+
+* The 32-bit token (:cpp:type:`pw_tokenizer_Token`)
+* The argument types (:cpp:type:`pw_tokenizer_ArgTypes`)
+* Variadic arguments, if any
+
+``pw_tokenizer`` provides two low-level macros to help projects create custom
+tokenization macros:
+
+* :c:macro:`PW_TOKENIZE_FORMAT_STRING`
+* :c:macro:`PW_TOKENIZER_REPLACE_FORMAT_STRING`
+
+.. caution::
+
+   Note the spelling difference! The first macro begins with ``PW_TOKENIZE_``
+   (no ``R``) whereas the second begins with ``PW_TOKENIZER_``.
+
+Use these macros to invoke an encoding function with the token, argument types,
+and variadic arguments. The function can then encode the tokenized message to a
+buffer using helpers in ``pw_tokenizer/encode_args.h``:
+
+.. Note: pw_tokenizer_EncodeArgs is a C function so you would expect to
+.. reference it as :c:func:`pw_tokenizer_EncodeArgs`. That doesn't work because
+.. it's defined in a header file that mixes C and C++.
+
+* :cpp:func:`pw::tokenizer::EncodeArgs`
+* :cpp:class:`pw::tokenizer::EncodedMessage`
+* :cpp:func:`pw_tokenizer_EncodeArgs`
+
+Example
+-------
+The following example implements a custom tokenization macro similar to
+:ref:`module-pw_log_tokenized`.
+
+.. code-block:: cpp
+
+   #include "pw_tokenizer/tokenize.h"
+
+   #ifndef __cplusplus
+   extern "C" {
+   #endif
+
+   void EncodeTokenizedMessage(uint32_t metadata,
+                               pw_tokenizer_Token token,
+                               pw_tokenizer_ArgTypes types,
+                               ...);
+
+   #ifndef __cplusplus
+   }  // extern "C"
+   #endif
+
+   #define PW_LOG_TOKENIZED_ENCODE_MESSAGE(metadata, format, ...)          \
+     do {                                                                  \
+       PW_TOKENIZE_FORMAT_STRING("logs", UINT32_MAX, format, __VA_ARGS__); \
+       EncodeTokenizedMessage(                                             \
+           metadata, PW_TOKENIZER_REPLACE_FORMAT_STRING(__VA_ARGS__));     \
+     } while (0)
+
+In this example, the ``EncodeTokenizedMessage`` function would handle encoding
+and processing the message. Encoding is done by the
+:cpp:class:`pw::tokenizer::EncodedMessage` class or
+:cpp:func:`pw::tokenizer::EncodeArgs` function from
+``pw_tokenizer/encode_args.h``. The encoded message can then be transmitted or
+stored as needed.
+
+.. code-block:: cpp
+
+   #include "pw_log_tokenized/log_tokenized.h"
+   #include "pw_tokenizer/encode_args.h"
+
+   void HandleTokenizedMessage(pw::log_tokenized::Metadata metadata,
+                               pw::span<std::byte> message);
+
+   extern "C" void EncodeTokenizedMessage(const uint32_t metadata,
+                                          const pw_tokenizer_Token token,
+                                          const pw_tokenizer_ArgTypes types,
+                                          ...) {
+     va_list args;
+     va_start(args, types);
+     pw::tokenizer::EncodedMessage<kLogBufferSize> encoded_message(token, types, args);
+     va_end(args);
+
+     HandleTokenizedMessage(metadata, encoded_message);
+   }
+
+.. admonition:: Why use a custom macro
+
+   - Optimal code size. Invoking a free function with the tokenized data results
+     in the smallest possible call site.
+   - Pass additional arguments, such as metadata, with the tokenized message.
+   - Integrate ``pw_tokenizer`` with other systems.
+
+Tokenizing function names
+=========================
+The string literal tokenization functions support tokenizing string literals or
+constexpr character arrays (``constexpr const char[]``). In GCC and Clang, the
+special ``__func__`` variable and ``__PRETTY_FUNCTION__`` extension are declared
+as ``static constexpr char[]`` in C++ instead of the standard ``static const
+char[]``. This means that ``__func__`` and ``__PRETTY_FUNCTION__`` can be
+tokenized while compiling C++ with GCC or Clang.
+
+.. code-block:: cpp
+
+   // Tokenize the special function name variables.
+   constexpr uint32_t function = PW_TOKENIZE_STRING(__func__);
+   constexpr uint32_t pretty_function = PW_TOKENIZE_STRING(__PRETTY_FUNCTION__);
+
+Note that ``__func__`` and ``__PRETTY_FUNCTION__`` are not string literals.
+They are defined as static character arrays, so they cannot be implicitly
+concatentated with string literals. For example, ``printf(__func__ ": %d",
+123);`` will not compile.
+
+Calculate minimum required buffer size
+======================================
+See :cpp:func:`pw::tokenizer::MinEncodingBufferSizeBytes`.
+
+.. _module-pw_tokenizer-base64-format:
+
+Encoding Base64
+===============
+The tokenizer encodes messages to a compact binary representation. Applications
+may desire a textual representation of tokenized strings. This makes it easy to
+use tokenized messages alongside plain text messages, but comes at a small
+efficiency cost: encoded Base64 messages occupy about 4/3 (133%) as much memory
+as binary messages.
+
+The Base64 format is comprised of a ``$`` character followed by the
+Base64-encoded contents of the tokenized message. For example, consider
+tokenizing the string ``This is an example: %d!`` with the argument -1. The
+string's token is 0x4b016e66.
+
+.. code-block:: text
+
+   Source code: PW_LOG("This is an example: %d!", -1);
+
+    Plain text: This is an example: -1! [23 bytes]
+
+        Binary: 66 6e 01 4b 01          [ 5 bytes]
+
+        Base64: $Zm4BSwE=               [ 9 bytes]
+
+To encode with the Base64 format, add a call to
+``pw::tokenizer::PrefixedBase64Encode`` or ``pw_tokenizer_PrefixedBase64Encode``
+in the tokenizer handler function. For example,
+
+.. code-block:: cpp
+
+   void TokenizedMessageHandler(const uint8_t encoded_message[],
+                                size_t size_bytes) {
+     pw::InlineBasicString base64 = pw::tokenizer::PrefixedBase64Encode(
+         pw::span(encoded_message, size_bytes));
+
+     TransmitLogMessage(base64.data(), base64.size());
+   }
+
+.. _module-pw_tokenizer-masks:
+
+Reduce token size with masking
+==============================
+``pw_tokenizer`` uses 32-bit tokens. On 32-bit or 64-bit architectures, using
+fewer than 32 bits does not improve runtime or code size efficiency. However,
+when tokens are packed into data structures or stored in arrays, the size of the
+token directly affects memory usage. In those cases, every bit counts, and it
+may be desireable to use fewer bits for the token.
+
+``pw_tokenizer`` allows users to provide a mask to apply to the token. This
+masked token is used in both the token database and the code. The masked token
+is not a masked version of the full 32-bit token, the masked token is the token.
+This makes it trivial to decode tokens that use fewer than 32 bits.
+
+Masking functionality is provided through the ``*_MASK`` versions of the macros:
+
+* :c:macro:`PW_TOKENIZE_STRING_MASK`
+* :c:macro:`PW_TOKENIZE_STRING_MASK_EXPR`
+* :c:macro:`PW_TOKENIZE_TO_BUFFER_MASK`
+
+For example, the following generates 16-bit tokens and packs them into an
+existing value.
+
+.. code-block:: cpp
+
+   constexpr uint32_t token = PW_TOKENIZE_STRING_MASK("domain", 0xFFFF, "Pigweed!");
+   uint32_t packed_word = (other_bits << 16) | token;
+
+Tokens are hashes, so tokens of any size have a collision risk. The fewer bits
+used for tokens, the more likely two strings are to hash to the same token. See
+:ref:`module-pw_tokenizer-collisions`.
+
+Masked tokens without arguments may be encoded in fewer bytes. For example, the
+16-bit token ``0x1234`` may be encoded as two little-endian bytes (``34 12``)
+rather than four (``34 12 00 00``). The detokenizer tools zero-pad data smaller
+than four bytes. Tokens with arguments must always be encoded as four bytes.
+
+.. _module-pw_tokenizer-domains:
+
+Keep tokens from different sources separate with domains
+========================================================
+``pw_tokenizer`` supports having multiple tokenization domains. Domains are a
+string label associated with each tokenized string. This allows projects to keep
+tokens from different sources separate. Potential use cases include the
+following:
+
+* Keep large sets of tokenized strings separate to avoid collisions.
+* Create a separate database for a small number of strings that use truncated
+  tokens, for example only 10 or 16 bits instead of the full 32 bits.
+
+If no domain is specified, the domain is empty (``""``). For many projects, this
+default domain is sufficient, so no additional configuration is required.
+
+.. code-block:: cpp
+
+   // Tokenizes this string to the default ("") domain.
+   PW_TOKENIZE_STRING("Hello, world!");
+
+   // Tokenizes this string to the "my_custom_domain" domain.
+   PW_TOKENIZE_STRING_DOMAIN("my_custom_domain", "Hello, world!");
+
+The database and detokenization command line tools default to reading from the
+default domain. The domain may be specified for ELF files by appending
+``#DOMAIN_NAME`` to the file path. Use ``#.*`` to read from all domains. For
+example, the following reads strings in ``some_domain`` from ``my_image.elf``.
+
+.. code-block:: sh
+
+   ./database.py create --database my_db.csv path/to/my_image.elf#some_domain
+
+See :ref:`module-pw_tokenizer-managing-token-databases` for information about
+the ``database.py`` command line tool.
+
+Limitations, bugs, and future work
+==================================
+
+GCC bug: tokenization in template functions
+-------------------------------------------
+GCC incorrectly ignores the section attribute for template `functions
+<https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70435>`_ and `variables
+<https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88061>`_. For example, the
+following won't work when compiling with GCC and tokenized logging:
+
+.. code-block:: cpp
+
+   template <...>
+   void DoThings() {
+     int value = GetValue();
+     // This log won't work with tokenized logs due to the templated context.
+     PW_LOG_INFO("Got value: %d", value);
+     ...
+   }
+
+The bug causes tokenized strings in template functions to be emitted into
+``.rodata`` instead of the special tokenized string section. This causes two
+problems:
+
+1. Tokenized strings will not be discovered by the token database tools.
+2. Tokenized strings may not be removed from the final binary.
+
+There are two workarounds.
+
+#. **Use Clang.** Clang puts the string data in the requested section, as
+   expected. No extra steps are required.
+
+#. **Move tokenization calls to a non-templated context.** Creating a separate
+   non-templated function and invoking it from the template resolves the issue.
+   This enables tokenizing in most cases encountered in practice with
+   templates.
+
+   .. code-block:: cpp
+
+      // In .h file:
+      void LogThings(value);
+
+      template <...>
+      void DoThings() {
+        int value = GetValue();
+        // This log will work: calls non-templated helper.
+        LogThings(value);
+        ...
+      }
+
+      // In .cc file:
+      void LogThings(int value) {
+        // Tokenized logging works as expected in this non-templated context.
+        PW_LOG_INFO("Got value %d", value);
+      }
+
+There is a third option, which isn't implemented yet, which is to compile the
+binary twice: once to extract the tokens, and once for the production binary
+(without tokens). If this is interesting to you please get in touch.
+
+64-bit tokenization
+-------------------
+The Python and C++ detokenizing libraries currently assume that strings were
+tokenized on a system with 32-bit ``long``, ``size_t``, ``intptr_t``, and
+``ptrdiff_t``. Decoding may not work correctly for these types if a 64-bit
+device performed the tokenization.
+
+Supporting detokenization of strings tokenized on 64-bit targets would be
+simple. This could be done by adding an option to switch the 32-bit types to
+64-bit. The tokenizer stores the sizes of these types in the
+``.pw_tokenizer.info`` ELF section, so the sizes of these types can be verified
+by checking the ELF file, if necessary.
+
+Tokenization in headers
+-----------------------
+Tokenizing code in header files (inline functions or templates) may trigger
+warnings such as ``-Wlto-type-mismatch`` under certain conditions. That
+is because tokenization requires declaring a character array for each tokenized
+string. If the tokenized string includes macros that change value, the size of
+this character array changes, which means the same static variable is defined
+with different sizes. It should be safe to suppress these warnings, but, when
+possible, code that tokenizes strings with macros that can change value should
+be moved to source files rather than headers.
+
+.. _module-pw_tokenizer-tokenized-strings-as-args:
+
+Tokenized strings as ``%s`` arguments
+-------------------------------------
+Encoding ``%s`` string arguments is inefficient, since ``%s`` strings are
+encoded 1:1, with no tokenization. It would be better to send a tokenized string
+literal as an integer instead of a string argument, but this is not yet
+supported.
+
+A string token could be sent by marking an integer % argument in a way
+recognized by the detokenization tools. The detokenizer would expand the
+argument to the string represented by the integer.
+
+.. code-block:: cpp
+
+   #define PW_TOKEN_ARG PRIx32 "<PW_TOKEN]"
+
+   constexpr uint32_t answer_token = PW_TOKENIZE_STRING("Uh, who is there");
+
+   PW_TOKENIZE_STRING("Knock knock: %" PW_TOKEN_ARG "?", answer_token);
+
+Strings with arguments could be encoded to a buffer, but since printf strings
+are null-terminated, a binary encoding would not work. These strings can be
+prefixed Base64-encoded and sent as ``%s`` instead. See
+:ref:`module-pw_tokenizer-base64-format`.
+
+Another possibility: encode strings with arguments to a ``uint64_t`` and send
+them as an integer. This would be efficient and simple, but only support a small
+number of arguments.
+
+----------------------
+Tokenization in Python
+----------------------
+The Python ``pw_tokenizer.encode`` module has limited support for encoding
+tokenized messages with the :func:`pw_tokenizer.encode.encode_token_and_args`
+function. This function requires a string's token is already calculated.
+Typically these tokens are provided by a database, but they can be manually
+created using the tokenizer hash.
+
+:func:`pw_tokenizer.tokens.pw_tokenizer_65599_hash` is particularly useful
+for offline token database generation in cases where tokenized strings in a
+binary cannot be embedded as parsable pw_tokenizer entries.
+
+.. note::
+   In C, the hash length of a string has a fixed limit controlled by
+   ``PW_TOKENIZER_CFG_C_HASH_LENGTH``. To match tokens produced by C (as opposed
+   to C++) code, ``pw_tokenizer_65599_hash()`` should be called with a matching
+   hash length limit. When creating an offline database, it's a good idea to
+   generate tokens for both, and merge the databases.
+
+.. _module-pw_tokenizer-cli-encoding:
+
+-----------------
+Encoding CLI tool
+-----------------
+The ``pw_tokenizer.encode`` command line tool can be used to encode
+format strings and optional arguments.
+
+.. code-block:: bash
+
+  python -m pw_tokenizer.encode [-h] FORMAT_STRING [ARG ...]
+
+Example:
+
+.. code-block:: text
+
+  $ python -m pw_tokenizer.encode "There's... %d many of %s!" 2 them
+        Raw input: "There's... %d many of %s!" % (2, 'them')
+  Formatted input: There's... 2 many of them!
+            Token: 0xb6ef8b2d
+          Encoded: b'-\x8b\xef\xb6\x04\x04them' (2d 8b ef b6 04 04 74 68 65 6d) [10 bytes]
+  Prefixed Base64: $LYvvtgQEdGhlbQ==
+
+See ``--help`` for full usage details.
+
+--------
+Appendix
+--------
+
+Case study
+==========
+.. note:: This section discusses the implementation, results, and lessons
+   learned from a real-world deployment of ``pw_tokenizer``.
+
+The tokenizer module was developed to bring tokenized logging to an
+in-development product. The product already had an established text-based
+logging system. Deploying tokenization was straightforward and had substantial
+benefits.
+
+Results
+-------
+* Log contents shrunk by over 50%, even with Base64 encoding.
+
+  * Significant size savings for encoded logs, even using the less-efficient
+    Base64 encoding required for compatibility with the existing log system.
+  * Freed valuable communication bandwidth.
+  * Allowed storing many more logs in crash dumps.
+
+* Substantial flash savings.
+
+  * Reduced the size firmware images by up to 18%.
+
+* Simpler logging code.
+
+  * Removed CPU-heavy ``snprintf`` calls.
+  * Removed complex code for forwarding log arguments to a low-priority task.
+
+This section describes the tokenizer deployment process and highlights key
+insights.
+
+Firmware deployment
+-------------------
+* In the project's logging macro, calls to the underlying logging function were
+  replaced with a tokenized log macro invocation.
+* The log level was passed as the payload argument to facilitate runtime log
+  level control.
+* For this project, it was necessary to encode the log messages as text. In
+  the handler function the log messages were encoded in the $-prefixed
+  :ref:`module-pw_tokenizer-base64-format`, then dispatched as normal log messages.
+* Asserts were tokenized a callback-based API that has been removed (a
+  :ref:`custom macro <module-pw_tokenizer-custom-macro>` is a better
+  alternative).
+
+.. attention::
+  Do not encode line numbers in tokenized strings. This results in a huge
+  number of lines being added to the database, since every time code moves,
+  new strings are tokenized. If :ref:`module-pw_log_tokenized` is used, line
+  numbers are encoded in the log metadata. Line numbers may also be included by
+  by adding ``"%d"`` to the format string and passing ``__LINE__``.
+
+.. _module-pw_tokenizer-database-management:
+
+Database management
+-------------------
+* The token database was stored as a CSV file in the project's Git repo.
+* The token database was automatically updated as part of the build, and
+  developers were expected to check in the database changes alongside their code
+  changes.
+* A presubmit check verified that all strings added by a change were added to
+  the token database.
+* The token database included logs and asserts for all firmware images in the
+  project.
+* No strings were purged from the token database.
+
+.. tip::
+   Merge conflicts may be a frequent occurrence with an in-source CSV database.
+   Use the :ref:`module-pw_tokenizer-directory-database-format` instead.
+
+Decoding tooling deployment
+---------------------------
+* The Python detokenizer in ``pw_tokenizer`` was deployed to two places:
+
+  * Product-specific Python command line tools, using
+    ``pw_tokenizer.Detokenizer``.
+  * Standalone script for decoding prefixed Base64 tokens in files or
+    live output (e.g. from ``adb``), using ``detokenize.py``'s command line
+    interface.
+
+* The C++ detokenizer library was deployed to two Android apps with a Java
+  Native Interface (JNI) layer.
+
+  * The binary token database was included as a raw resource in the APK.
+  * In one app, the built-in token database could be overridden by copying a
+    file to the phone.
+
+.. tip::
+   Make the tokenized logging tools simple to use for your project.
+
+   * Provide simple wrapper shell scripts that fill in arguments for the
+     project. For example, point ``detokenize.py`` to the project's token
+     databases.
+   * Use ``pw_tokenizer.AutoUpdatingDetokenizer`` to decode in
+     continuously-running tools, so that users don't have to restart the tool
+     when the token database updates.
+   * Integrate detokenization everywhere it is needed. Integrating the tools
+     takes just a few lines of code, and token databases can be embedded in APKs
+     or binaries.
diff --git a/pw_toolchain/arm_clang/BUILD.gn b/pw_toolchain/arm_clang/BUILD.gn
index 9660142..68c2e73 100644
--- a/pw_toolchain/arm_clang/BUILD.gn
+++ b/pw_toolchain/arm_clang/BUILD.gn
@@ -42,8 +42,7 @@
 
 # Default config added to all the ARM cortex M targets to link `nosys` library.
 config("nosys") {
-  # TODO(prabhukr): libs = ["nosys"] did not work as expected (pwrev/133110).
-  ldflags = [ "-lnosys" ]
+  libs = [ "nosys" ]
 }
 
 config("enable_float_printf") {
diff --git a/pw_toolchain/arm_clang/clang_config.gni b/pw_toolchain/arm_clang/clang_config.gni
index de212b9..7c979de 100644
--- a/pw_toolchain/arm_clang/clang_config.gni
+++ b/pw_toolchain/arm_clang/clang_config.gni
@@ -79,5 +79,12 @@
     cflags += _arm_flags.cflags
     ldflags += _arm_flags.cflags
     ldflags += _arm_flags.ldflags
+
+    libs = [
+      "c_nano",
+      "m",
+      "gcc",
+      "stdc++_nano",
+    ]
   }
 }
diff --git a/pw_toolchain/arm_gcc/BUILD.bazel b/pw_toolchain/arm_gcc/BUILD.bazel
index 5e34751..ba61571 100644
--- a/pw_toolchain/arm_gcc/BUILD.bazel
+++ b/pw_toolchain/arm_gcc/BUILD.bazel
@@ -13,6 +13,11 @@
 # the License.
 
 load(
+    "@pw_toolchain//cc_toolchain:defs.bzl",
+    "pw_cc_toolchain",
+    "pw_cc_toolchain_feature",
+)
+load(
     "//pw_build:pigweed.bzl",
     "pw_cc_library",
 )
@@ -40,3 +45,172 @@
         "//pw_toolchain:wrap_abort",
     ],
 )
+
+pw_cc_toolchain_feature(
+    name = "sysroot",
+    builtin_sysroot = "external/gcc_arm_none_eabi_toolchain",
+    cxx_builtin_include_directories = [
+        "%sysroot%/arm-none-eabi/include/newlib-nano",
+        "%sysroot%/arm-none-eabi/include/c++/12.2.1",
+        "%sysroot%/arm-none-eabi/include/c++/12.2.1/arm-none-eabi",
+        "%sysroot%/arm-none-eabi/include/c++/12.2.1/backward",
+        "%sysroot%/lib/gcc/arm-none-eabi/12.2.1/include",
+        "%sysroot%/lib/gcc/arm-none-eabi/12.2.1/include-fixed",
+        "%sysroot%/arm-none-eabi/include",
+    ],
+)
+
+pw_cc_toolchain_feature(
+    name = "cortex_common",
+    asmopts = [
+        "-mabi=aapcs",
+        "-mthumb",
+    ],
+    copts = [
+        "-ffreestanding",
+        "-fno-common",
+        "-Wno-psabi",
+        "-specs=nano.specs",
+        "-specs=nosys.specs",
+    ],
+    linkopts = [
+        "-Wl,--gc-sections",
+        "-specs=nano.specs",
+        "-specs=nosys.specs",
+        "-lstdc++",
+        "-lnosys",
+        "-lc",
+        "-lm",
+        "-Wl,--no-warn-rwx-segment",
+    ],
+)
+
+_MCPU = [
+    "cortex-m0",
+    "cortex-m3",
+    "cortex-m4",
+    "cortex-m4+nofp",
+]
+
+pw_cc_toolchain_feature(
+    name = "cortex-m0",
+    copts = [
+        "-mcpu=cortex-m0",
+        "-mfloat-abi=soft",
+    ],
+    linkopts = [
+        "-mcpu=cortex-m0",
+        "-mfloat-abi=soft",
+    ],
+)
+
+pw_cc_toolchain_feature(
+    name = "cortex-m3",
+    copts = [
+        "-mcpu=cortex-m3",
+        "-mfloat-abi=soft",
+    ],
+    linkopts = [
+        "-mcpu=cortex-m3",
+        "-mfloat-abi=soft",
+    ],
+)
+
+pw_cc_toolchain_feature(
+    name = "cortex-m4",
+    copts = [
+        "-mcpu=cortex-m4",
+        "-mfloat-abi=hard",
+    ],
+    linkopts = [
+        "-mcpu=cortex-m4",
+        "-mfloat-abi=hard",
+    ],
+)
+
+pw_cc_toolchain_feature(
+    name = "cortex-m4+nofp",
+    copts = [
+        "-mcpu=cortex-m4+nofp",
+        "-mfloat-abi=soft",
+    ],
+    linkopts = [
+        "-mcpu=cortex-m4+nofp",
+        "-mfloat-abi=soft",
+    ],
+)
+
+# Using a list comprehension here to avoid the mind-numbing boilerplate.
+#
+# TODO(tpudlik): We ought to refactor the pw_cc_toolchain API so that the
+# *_files and tools don't need to be retyped every time you want to create a
+# variant with different feature deps.
+[pw_cc_toolchain(
+    name = "arm_gcc_toolchain_" + mcpu,
+    abi_libc_version = "unknown",
+    abi_version = "unknown",
+    all_files = "@gcc_arm_none_eabi_toolchain//:all",
+    ar = "@gcc_arm_none_eabi_toolchain//:bin/arm-none-eabi-ar",
+    ar_files = "@gcc_arm_none_eabi_toolchain//:all",
+    as_files = "@gcc_arm_none_eabi_toolchain//:all",
+    compiler = "unknown",
+    compiler_files = "@gcc_arm_none_eabi_toolchain//:all",
+    coverage_files = "@gcc_arm_none_eabi_toolchain//:all",
+    cpp = "@gcc_arm_none_eabi_toolchain//:bin/arm-none-eabi-gcc",
+    dwp_files = "@gcc_arm_none_eabi_toolchain//:all",
+    feature_deps = [
+        "@pw_toolchain//features:no_canonical_prefixes",
+        ":" + mcpu,
+        ":sysroot",
+        ":cortex_common",
+    ],
+    gcc = "@gcc_arm_none_eabi_toolchain//:bin/arm-none-eabi-gcc",
+    gcov = "@gcc_arm_none_eabi_toolchain//:bin/arm-none-eabi-gcov",
+    host_system_name = "unknown",
+    ld = "@gcc_arm_none_eabi_toolchain//:bin/arm-none-eabi-ld",
+    linker_files = "@gcc_arm_none_eabi_toolchain//:all",
+    objcopy_files = "@gcc_arm_none_eabi_toolchain//:all",
+    strip = "@gcc_arm_none_eabi_toolchain//:bin/arm-none-eabi-strip",
+    strip_files = "@gcc_arm_none_eabi_toolchain//:all",
+    supports_param_files = 0,
+    target_cpu = "unknown",
+    target_libc = "unknown",
+    target_system_name = "unknown",
+    toolchain_identifier = "arm-gcc-toolchain",
+) for mcpu in _MCPU]
+
+toolchain(
+    name = "arm_gcc_cc_toolchain_cortex-m0",
+    target_compatible_with = [
+        "@pw_toolchain//constraints/arm_mcpu:cortex-m0",
+    ],
+    toolchain = ":arm_gcc_toolchain_cortex-m0",
+    toolchain_type = "@bazel_tools//tools/cpp:toolchain_type",
+)
+
+toolchain(
+    name = "arm_gcc_cc_toolchain_cortex-m3",
+    target_compatible_with = [
+        "@pw_toolchain//constraints/arm_mcpu:cortex-m3",
+    ],
+    toolchain = ":arm_gcc_toolchain_cortex-m3",
+    toolchain_type = "@bazel_tools//tools/cpp:toolchain_type",
+)
+
+toolchain(
+    name = "arm_gcc_cc_toolchain_cortex-m4",
+    target_compatible_with = [
+        "@pw_toolchain//constraints/arm_mcpu:cortex-m4",
+    ],
+    toolchain = ":arm_gcc_toolchain_cortex-m4",
+    toolchain_type = "@bazel_tools//tools/cpp:toolchain_type",
+)
+
+toolchain(
+    name = "arm_gcc_cc_toolchain_cortex-m4+nofp",
+    target_compatible_with = [
+        "@pw_toolchain//constraints/arm_mcpu:cortex-m4+nofp",
+    ],
+    toolchain = ":arm_gcc_toolchain_cortex-m4+nofp",
+    toolchain_type = "@bazel_tools//tools/cpp:toolchain_type",
+)
diff --git a/pw_toolchain/arm_gcc/toolchains.gni b/pw_toolchain/arm_gcc/toolchains.gni
index fe1ed17..47f6d16 100644
--- a/pw_toolchain/arm_gcc/toolchains.gni
+++ b/pw_toolchain/arm_gcc/toolchains.gni
@@ -22,10 +22,31 @@
 }
 
 declare_args() {
-  # This flag allows you to specify the root directory of the ARM GCC tools to
-  # to use when compiling with an arm-none-eabi toolchain. This is useful for
-  # debugging toolchain-related issues, or for building with an
-  # externally-provided toolchain.
+  # This flag allows you to specify a prefix for ARM GCC tools use when
+  # compiling with an arm-none-eabi toolchain. This is useful for debugging
+  # toolchain-related issues, or for building with an externally-provided
+  # toolchain.
+  #
+  # Pigweed toolchains should NOT override this variable so projects or users
+  # can control it via `.gn` or by setting it as a regular gn argument (e.g.
+  # `gn gen --args='pw_toolchain_ARM_NONE_EABI_PREFIX=/path/to/my-'`).
+  #
+  # Examples:
+  #   pw_toolchain_ARM_NONE_EABI_PREFIX = ""
+  #   command: "arm-none-eabi-gcc" (from PATH)
+  #
+  #   pw_toolchain_ARM_NONE_EABI_PREFIX = "my-"
+  #   command: "my-arm-none-eabi-gcc" (from PATH)
+  #
+  #   pw_toolchain_ARM_NONE_EABI_PREFIX = "/bin/my-"
+  #   command: "/bin/my-arm-none-eabi-gcc" (absolute path)
+  #
+  #   pw_toolchain_ARM_NONE_EABI_PREFIX = "//environment/gcc_next/"
+  #   command: "../environment/gcc_next/arm-none-eabi-gcc" (relative path)
+  #
+  # GN templates should use `arm_gcc_toolchain_tools.*` to get the intended
+  # command string rather than relying directly on
+  # pw_toolchain_ARM_NONE_EABI_PREFIX.
   #
   # If the prefix begins with "//", it will be rebased to be relative to the
   # root build directory.
diff --git a/pw_toolchain/clang_tools.gni b/pw_toolchain/clang_tools.gni
index c92ab54..5b438a5 100644
--- a/pw_toolchain/clang_tools.gni
+++ b/pw_toolchain/clang_tools.gni
@@ -26,16 +26,39 @@
 }
 
 declare_args() {
-  # This flag allows you to specify the root directory of the clang, clang++,
+  # This flag allows you to specify a prefix to use for clang, clang++,
   # and llvm-ar binaries to use when compiling with a clang-based toolchain.
   # This is useful for debugging toolchain-related issues by building with an
   # externally-provided toolchain.
   #
+  # Pigweed toolchains should NOT override this variable so projects or users
+  # can control it via `.gn` or by setting it as a regular gn argument (e.g.
+  # `gn gen --args='pw_toolchain_CLANG_PREFIX=/path/to/my-llvm-'`).
+  #
+  # Examples:
+  #   pw_toolchain_CLANG_PREFIX = ""
+  #   command: "clang" (from PATH)
+  #
+  #   pw_toolchain_CLANG_PREFIX = "my-"
+  #   command: "my-clang" (from PATH)
+  #
+  #   pw_toolchain_CLANG_PREFIX = "/bin/my-"
+  #   command: "/bin/my-clang" (absolute path)
+  #
+  #   pw_toolchain_CLANG_PREFIX = "//environment/clang_next/"
+  #   command: "../environment/clang_next/clang" (relative path)
+  #
+  # GN templates should use `pw_toolchain_clang_tools.*` to get the intended
+  # command string rather than relying directly on pw_toolchain_CLANG_PREFIX.
+  #
   # If the prefix begins with "//", it will be rebased to be relative to the
   # root build directory.
   pw_toolchain_CLANG_PREFIX = _default_llvm_prefix
 
-  # This flag allows you to specify the root directory of the rustc binary.
+  # This flag allows you to specify a prefix for rustc.
+  #
+  # This follows the same rules as pw_toolchain_CLANG_PREFIX, see above for
+  # more information.
   #
   # If the prefix begins with "//", it will be rebased to be relative to the
   # root build directory.
@@ -47,6 +70,8 @@
   cc = "clang"
   cxx = "clang++"
   ld = cxx
+  llvm_cov = "llvm-cov"
+  llvm_profdata = "llvm-profdata"
   rustc = "rustc"
 
   _toolchain_prefix = pw_toolchain_CLANG_PREFIX
@@ -65,6 +90,8 @@
     cc = _toolchain_prefix + cc
     cxx = _toolchain_prefix + cxx
     ld = _toolchain_prefix + ld
+    llvm_cov = _toolchain_prefix + llvm_cov
+    llvm_profdata = _toolchain_prefix + llvm_profdata
   }
 
   _rust_prefix = pw_toolchain_RUST_PREFIX
diff --git a/pw_toolchain/docs.rst b/pw_toolchain/docs.rst
index 6d8250c..ac49ed7 100644
--- a/pw_toolchain/docs.rst
+++ b/pw_toolchain/docs.rst
@@ -85,12 +85,12 @@
 
 .. code-block::
 
-     static_analysis = {
-      clang_tidy_path = "//third_party/ctcache/clang-tidy"
-      _clang_tidy_cfg_path = rebase_path("//.clang-tidy", root_build_dir)
-      cc_post = "echo '-: $_clang_tidy_cfg_path' >> {{output}}.d"
-      cxx_post = "echo '-: $_clang_tidy_cfg_path' >> {{output}}.d"
-     }
+   static_analysis = {
+    clang_tidy_path = "//third_party/ctcache/clang-tidy"
+    _clang_tidy_cfg_path = rebase_path("//.clang-tidy", root_build_dir)
+    cc_post = "echo '-: $_clang_tidy_cfg_path' >> {{output}}.d"
+    cxx_post = "echo '-: $_clang_tidy_cfg_path' >> {{output}}.d"
+   }
 
 Excluding files from checks
 ===========================
diff --git a/pw_toolchain/host_clang/BUILD.bazel b/pw_toolchain/host_clang/BUILD.bazel
index 884169d..c3cf6fd4 100644
--- a/pw_toolchain/host_clang/BUILD.bazel
+++ b/pw_toolchain/host_clang/BUILD.bazel
@@ -27,15 +27,42 @@
 pw_cc_toolchain_feature(
     name = "macos_stdlib",
     cxx_builtin_include_directories = [
-        "@llvm_toolchain//:include/c++/v1",
-        "@llvm_toolchain//:lib/clang/17/include",
+        "%package(@llvm_toolchain//)%/include/c++/v1",
+        "%package(@llvm_toolchain//)%/lib/clang/17/include",
     ],
     linker_files = ["@llvm_toolchain//:lib/libc++.a"],
     target_compatible_with = ["@platforms//os:macos"],
 )
 
+pw_cc_toolchain_feature(
+    name = "linux_sysroot",
+    builtin_sysroot = "external/linux_sysroot",
+    cxx_builtin_include_directories = [
+        "%package(@llvm_toolchain//)%/include/x86_64-unknown-linux-gnu/c++/v1",
+        "%package(@llvm_toolchain//)%/include/c++/v1",
+        "%package(@llvm_toolchain//)%/lib/clang/17/include",
+        "%sysroot%/usr/local/include",
+        "%sysroot%/usr/include/x86_64-linux-gnu",
+        "%sysroot%/usr/include",
+    ],
+    linkopts = [
+        "-pthread",
+        "-stdlib=libc++",
+        "--rtlib=compiler-rt",
+        "--unwindlib=libunwind",
+    ],
+)
+
+filegroup(
+    name = "all_linux_files",
+    srcs = [
+        "@linux_sysroot//:all",
+        "@llvm_toolchain//:all",
+    ],
+)
+
 pw_cc_toolchain(
-    name = "host_toolchain",
+    name = "host_toolchain_macos",
     abi_libc_version = "unknown",
     abi_version = "unknown",
     all_files = "@llvm_toolchain//:all",
@@ -62,10 +89,13 @@
     strip = "@llvm_toolchain//:bin/llvm-strip",
     strip_files = "@llvm_toolchain//:all",
     supports_param_files = 0,
-    target_cpu = "unknown",
-    target_libc = "unknown",
+    # The implementations of some "legacy features" built into Bazel use
+    # `target_libc` to determine if a toolchain targets MacOS,
+    # https://github.com/bazelbuild/bazel/blob/release-7.0.0-pre.20230816.3rc1/src/main/java/com/google/devtools/build/lib/rules/cpp/CcModule.java#L1301-L1304
+    target_cpu = "darwin",
+    target_libc = "macosx",
     target_system_name = "unknown",
-    toolchain_identifier = "host-toolchain",
+    toolchain_identifier = "host-toolchain-macos",
 )
 
 toolchain(
@@ -76,6 +106,50 @@
     target_compatible_with = [
         "@platforms//os:macos",
     ],
-    toolchain = ":host_toolchain",
+    toolchain = ":host_toolchain_macos",
+    toolchain_type = "@bazel_tools//tools/cpp:toolchain_type",
+)
+
+pw_cc_toolchain(
+    name = "host_toolchain_linux",
+    abi_libc_version = "unknown",
+    abi_version = "unknown",
+    all_files = ":all_linux_files",
+    ar = "@llvm_toolchain//:bin/llvm-ar",
+    ar_files = ":all_linux_files",
+    as_files = ":all_linux_files",
+    compiler = "unknown",
+    compiler_files = ":all_linux_files",
+    coverage_files = ":all_linux_files",
+    cpp = "@llvm_toolchain//:bin/clang++",
+    dwp_files = ":all_linux_files",
+    feature_deps = [
+        ":linux_sysroot",
+        "@pw_toolchain//features:no_canonical_prefixes",
+    ],
+    gcc = "@llvm_toolchain//:bin/clang",
+    gcov = "@llvm_toolchain//:bin/llvm-cov",
+    host_system_name = "unknown",
+    ld = "@llvm_toolchain//:bin/clang++",
+    linker_files = ":all_linux_files",
+    objcopy_files = ":all_linux_files",
+    strip = "@llvm_toolchain//:bin/llvm-strip",
+    strip_files = ":all_linux_files",
+    supports_param_files = 0,
+    target_cpu = "unknown",
+    target_libc = "unknown",
+    target_system_name = "unknown",
+    toolchain_identifier = "host-toolchain-linux",
+)
+
+toolchain(
+    name = "host_cc_toolchain_linux",
+    exec_compatible_with = [
+        "@platforms//os:linux",
+    ],
+    target_compatible_with = [
+        "@platforms//os:linux",
+    ],
+    toolchain = ":host_toolchain_linux",
     toolchain_type = "@bazel_tools//tools/cpp:toolchain_type",
 )
diff --git a/pw_toolchain/host_clang/toolchains.gni b/pw_toolchain/host_clang/toolchains.gni
index 7602dbc..2d314de 100644
--- a/pw_toolchain/host_clang/toolchains.gni
+++ b/pw_toolchain/host_clang/toolchains.gni
@@ -45,23 +45,10 @@
 
 # Specifies the tools used by host Clang toolchains.
 _host_clang_toolchain = {
-  if (pw_toolchain_OSS_FUZZ_ENABLED) {
-    # OSS-Fuzz sets compiler and linker paths. See
-    # google.github.io/oss-fuzz/getting-started/new-project-guide/#Requirements.
-
-    # Just use the "llvm-ar" on the system path.
-    ar = "llvm-ar"
-    cc = getenv("CC")
-    cxx = getenv("CXX")
-  } else {
-    forward_variables_from(pw_toolchain_clang_tools, "*")
-  }
-
+  forward_variables_from(pw_toolchain_clang_tools, "*")
   is_host_toolchain = true
-
   static_analysis = {
-    # Enable static analysis for host clang based toolchains,
-    # even with OSS-Fuzz enabled.
+    # Enable static analysis for host clang based toolchains.
     enabled = true
   }
 }
@@ -126,7 +113,27 @@
 
   fuzz = {
     name = "host_clang_fuzz"
-    forward_variables_from(_host_clang_toolchain, "*")
+    cc = ""
+    cxx = ""
+    forward_variables_from(_host_clang_toolchain,
+                           "*",
+                           [
+                             "cc",
+                             "cxx",
+                           ])
+
+    # OSS-Fuzz sets compiler paths. See
+    # google.github.io/oss-fuzz/getting-started/new-project-guide/#Requirements.
+    if (pw_toolchain_OSS_FUZZ_ENABLED) {
+      cc = getenv("CC")
+      cxx = getenv("CXX")
+    }
+    if (cc == "") {
+      cc = _host_clang_toolchain.cc
+    }
+    if (cxx == "") {
+      cxx = _host_clang_toolchain.cxx
+    }
     defaults = {
       forward_variables_from(_defaults, "*")
 
diff --git a/pw_toolchain/py/pw_toolchain/clang_arm_toolchain.py b/pw_toolchain/py/pw_toolchain/clang_arm_toolchain.py
index 5a02f38..7b42091 100644
--- a/pw_toolchain/py/pw_toolchain/clang_arm_toolchain.py
+++ b/pw_toolchain/py/pw_toolchain/clang_arm_toolchain.py
@@ -175,11 +175,6 @@
         + str(
             Path(compiler_info['sysroot']) / 'lib' / compiler_info['multi_dir']
         ),
-        # Add libraries to link.
-        '-lc_nano',
-        '-lm',
-        '-lgcc',
-        '-lstdc++_nano',
     ]
 
     # Add C runtime object files.
diff --git a/pw_toolchain/rust/BUILD.bazel b/pw_toolchain/rust/BUILD.bazel
new file mode 100644
index 0000000..0a41c4f
--- /dev/null
+++ b/pw_toolchain/rust/BUILD.bazel
@@ -0,0 +1,22 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(":defs.bzl", "pw_rust_declare_toolchain_targets")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+# TODO: b/296459700 - Make this modular and configurable.
+pw_rust_declare_toolchain_targets()
diff --git a/pw_toolchain/rust/defs.bzl b/pw_toolchain/rust/defs.bzl
new file mode 100644
index 0000000..f3f58d2
--- /dev/null
+++ b/pw_toolchain/rust/defs.bzl
@@ -0,0 +1,216 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Utilities for declaring Rust toolchains that are compatible with Arm gcc."""
+
+load("@rules_rust//rust:toolchain.bzl", "rust_analyzer_toolchain", "rust_toolchain")
+load("//pw_env_setup/bazel/cipd_setup:cipd_rules.bzl", "cipd_repository")
+
+HOSTS = [
+    {
+        "cpu": "aarch64",
+        "cipd_arch": "arm64",
+        "os": "linux",
+        "triple": "aarch64-unknown-linux-gnu",
+        "dylib_ext": ".so",
+    },
+    {
+        "cpu": "x86_64",
+        "cipd_arch": "amd64",
+        "os": "linux",
+        "triple": "x86_64-unknown-linux-gnu",
+        "dylib_ext": ".so",
+    },
+    {
+        "cpu": "aarch64",
+        "cipd_arch": "arm64",
+        "os": "macos",
+        "triple": "aarch64-apple-darwin",
+        "dylib_ext": ".dylib",
+    },
+    {
+        "cpu": "x86_64",
+        "cipd_arch": "amd64",
+        "os": "macos",
+        "triple": "x86_64-apple-darwin",
+        "dylib_ext": ".dylib",
+    },
+]
+
+EXTRA_TARGETS = [
+    {
+        "cpu": "armv6-m",
+        "triple": "thumbv6m-none-eabi",
+    },
+    {
+        "cpu": "armv7-m",
+        "triple": "thumbv7m-none-eabi",
+    },
+    {
+        "cpu": "armv7e-m",
+        "triple": "thumbv7m-none-eabi",
+    },
+]
+
+# buildifier: disable=unnamed-macro
+def pw_rust_register_toolchain_and_target_repos(cipd_tag):
+    """Declare and register CIPD repos for Rust toolchain and target rupport.
+
+    Args:
+      cipd_tag: Tag with which to select specific package versions.
+    """
+    for host in HOSTS:
+        cipd_os = host["os"]
+        if cipd_os == "macos":
+            cipd_os = "mac"
+
+        cipd_repository(
+            name = "rust_toolchain_host_{}_{}".format(host["os"], host["cpu"]),
+            build_file = "//pw_toolchain/rust:rust_toolchain.BUILD",
+            path = "fuchsia/third_party/rust/host/{}-{}".format(cipd_os, host["cipd_arch"]),
+            tag = cipd_tag,
+        )
+
+        cipd_repository(
+            name = "rust_toolchain_target_{}".format(host["triple"]),
+            build_file = "//pw_toolchain/rust:rust_stdlib.BUILD",
+            path = "fuchsia/third_party/rust/target/{}".format(host["triple"]),
+            tag = cipd_tag,
+        )
+
+    for target in EXTRA_TARGETS:
+        cipd_repository(
+            name = "rust_toolchain_target_{}".format(target["triple"]),
+            build_file = "//pw_toolchain/rust:rust_stdlib.BUILD",
+            path = "fuchsia/third_party/rust/target/{}".format(target["triple"]),
+            tag = cipd_tag,
+        )
+
+# buildifier: disable=unnamed-macro
+def pw_rust_register_toolchains():
+    """Register Rust Toolchains
+
+    For this registration to be valid one must
+    1. Call `pw_rust_register_toolchain_and_target_repos(tag)` pervisouly in the
+       WORKSPACE file.
+    2. Call `pw_rust_declare_toolchain_targets()` from
+       `//pw_toolchain/rust/BUILD.bazel`.
+    """
+    for host in HOSTS:
+        native.register_toolchains(
+            "//pw_toolchain/rust:host_rust_toolchain_{}_{}".format(host["os"], host["cpu"]),
+            "//pw_toolchain/rust:host_rust_analyzer_toolchain_{}_{}".format(host["os"], host["cpu"]),
+        )
+        for target in EXTRA_TARGETS:
+            native.register_toolchains(
+                "//pw_toolchain/rust:{}_{}_rust_toolchain_{}_{}".format(host["os"], host["cpu"], target["triple"], target["cpu"]),
+            )
+
+# buildifier: disable=unnamed-macro
+def pw_rust_declare_toolchain_targets():
+    """Declare rust toolchain targets"""
+    for host in HOSTS:
+        _pw_rust_host_toolchain(
+            name = "host_rust_toolchain_{}_{}".format(host["os"], host["cpu"]),
+            analyzer_toolchain_name = "host_rust_analyzer_toolchain_{}_{}".format(host["os"], host["cpu"]),
+            compatible_with = [
+                "@platforms//cpu:{}".format(host["cpu"]),
+                "@platforms//os:{}".format(host["os"]),
+            ],
+            dylib_ext = host["dylib_ext"],
+            target_repo = "@rust_toolchain_target_{}".format(host["triple"]),
+            toolchain_repo = "@rust_toolchain_host_{}_{}".format(host["os"], host["cpu"]),
+            triple = host["triple"],
+        )
+        for target in EXTRA_TARGETS:
+            _pw_rust_toolchain(
+                name = "{}_{}_rust_toolchain_{}_{}".format(host["os"], host["cpu"], target["triple"], target["cpu"]),
+                exec_triple = host["triple"],
+                target_triple = target["triple"],
+                target_repo = "@rust_toolchain_target_{}".format(target["triple"]),
+                toolchain_repo = "@rust_toolchain_host_{}_{}".format(host["os"], host["cpu"]),
+                dylib_ext = "*.so",
+                exec_compatible_with = [
+                    "@platforms//cpu:{}".format(host["cpu"]),
+                    "@platforms//os:{}".format(host["os"]),
+                ],
+                target_compatible_with = [
+                    "@platforms//cpu:{}".format(target["cpu"]),
+                ],
+            )
+
+def _pw_rust_toolchain(
+        name,
+        exec_triple,
+        target_triple,
+        toolchain_repo,
+        target_repo,
+        dylib_ext,
+        exec_compatible_with,
+        target_compatible_with):
+    rust_toolchain(
+        name = "{}_rust_toolchain".format(name),
+        binary_ext = "",
+        default_edition = "2021",
+        dylib_ext = dylib_ext,
+        exec_triple = exec_triple,
+        rust_doc = "{}//:bin/rustdoc".format(toolchain_repo),
+        rust_std = "{}//:rust_std".format(target_repo),
+        rustc = "{}//:bin/rustc".format(toolchain_repo),
+        rustc_lib = "{}//:rustc_lib".format(toolchain_repo),
+        staticlib_ext = ".a",
+        stdlib_linkflags = [],
+        target_triple = target_triple,
+    )
+    native.toolchain(
+        name = name,
+        exec_compatible_with = exec_compatible_with,
+        target_compatible_with = target_compatible_with,
+        toolchain = ":{}_rust_toolchain".format(name),
+        toolchain_type = "@rules_rust//rust:toolchain",
+    )
+
+def _pw_rust_host_toolchain(
+        name,
+        analyzer_toolchain_name,
+        triple,
+        toolchain_repo,
+        target_repo,
+        dylib_ext,
+        compatible_with):
+    _pw_rust_toolchain(
+        name = name,
+        exec_triple = triple,
+        target_triple = triple,
+        toolchain_repo = toolchain_repo,
+        target_repo = target_repo,
+        dylib_ext = dylib_ext,
+        exec_compatible_with = compatible_with,
+        target_compatible_with = compatible_with,
+    )
+
+    rust_analyzer_toolchain(
+        name = "{}_rust_analyzer_toolchain".format(analyzer_toolchain_name),
+        proc_macro_srv = "{}//:libexec/rust-analyzer-proc-macro-srv".format(toolchain_repo),
+        rustc = "{}//:bin/rustc".format(toolchain_repo),
+        rustc_srcs = "{}//:rustc_srcs".format(toolchain_repo),
+        visibility = ["//visibility:public"],
+    )
+
+    native.toolchain(
+        name = analyzer_toolchain_name,
+        exec_compatible_with = compatible_with,
+        target_compatible_with = compatible_with,
+        toolchain = ":{}_rust_analyzer_toolchain".format(analyzer_toolchain_name),
+        toolchain_type = "@rules_rust//rust/rust_analyzer:toolchain_type",
+    )
diff --git a/pw_toolchain/rust/rust_stdlib.BUILD b/pw_toolchain/rust/rust_stdlib.BUILD
new file mode 100644
index 0000000..b92a116
--- /dev/null
+++ b/pw_toolchain/rust/rust_stdlib.BUILD
@@ -0,0 +1,31 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load("@rules_rust//rust:toolchain.bzl", "rust_stdlib_filegroup")
+
+exports_files(glob(["**"]))
+
+filegroup(
+    name = "all",
+    srcs = glob(["**"]),
+    visibility = ["//visibility:public"],
+)
+
+rust_stdlib_filegroup(
+    name = "rust_std",
+    # This is globbing over the target triple. Ideally, only the relevant target
+    # tripple is part of this filegroup.
+    srcs = glob(["lib/rustlib/*/lib/*"]),
+    visibility = ["//visibility:public"],
+)
diff --git a/pw_toolchain/rust/rust_toolchain.BUILD b/pw_toolchain/rust/rust_toolchain.BUILD
new file mode 100644
index 0000000..f8f3bf0
--- /dev/null
+++ b/pw_toolchain/rust/rust_toolchain.BUILD
@@ -0,0 +1,33 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+exports_files(glob(["**"]))
+
+filegroup(
+    name = "all",
+    srcs = glob(["**"]),
+    visibility = ["//visibility:public"],
+)
+
+filegroup(
+    name = "rustc_lib",
+    srcs = glob(["lib/*.so"]),
+    visibility = ["//visibility:public"],
+)
+
+filegroup(
+    name = "rustc_srcs",
+    srcs = glob(["lib/rustlib/src/rust/src/**"]),
+    visibility = ["//visibility:public"],
+)
diff --git a/pw_toolchain/static_analysis_toolchain.gni b/pw_toolchain/static_analysis_toolchain.gni
index e532ff3..5aa5abf 100644
--- a/pw_toolchain/static_analysis_toolchain.gni
+++ b/pw_toolchain/static_analysis_toolchain.gni
@@ -16,6 +16,7 @@
 
 import("$dir_pw_compilation_testing/negative_compilation_test.gni")
 import("$dir_pw_third_party/boringssl/boringssl.gni")
+import("$dir_pw_third_party/chre/chre.gni")
 import("$dir_pw_third_party/googletest/googletest.gni")
 import("$dir_pw_third_party/mbedtls/mbedtls.gni")
 import("$dir_pw_toolchain/universal_tools.gni")
@@ -53,6 +54,7 @@
   dir_pw_third_party_mbedtls,
   dir_pw_third_party_boringssl,
   dir_pw_third_party_googletest,
+  dir_pw_third_party_chre,
 ]
 
 # Creates a toolchain target for static analysis.
@@ -97,6 +99,7 @@
       ]
     }
   }
+
   _skipped_regexps += pw_toolchain_STATIC_ANALYSIS_SKIP_SOURCES_RES
   _skipped_include_paths += pw_toolchain_STATIC_ANALYSIS_SKIP_INCLUDE_PATHS
 
diff --git a/pw_toolchain_bazel/cc_toolchain/private/cc_toolchain.bzl b/pw_toolchain_bazel/cc_toolchain/private/cc_toolchain.bzl
index 6344070..5d5f7df 100644
--- a/pw_toolchain_bazel/cc_toolchain/private/cc_toolchain.bzl
+++ b/pw_toolchain_bazel/cc_toolchain/private/cc_toolchain.bzl
@@ -15,6 +15,15 @@
 
 load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
 load(
+    "@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
+    "action_config",
+    "feature",
+    "flag_group",
+    "flag_set",
+    "tool",
+    "variable_with_value",
+)
+load(
     "//cc_toolchain/private:providers.bzl",
     "ToolchainFeatureInfo",
 )
@@ -26,11 +35,6 @@
     "ALL_LINK_ACTIONS",
     "check_deps",
 )
-load(
-    "@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
-    "action_config",
-    "tool",
-)
 
 PW_CC_TOOLCHAIN_CONFIG_ATTRS = {
     "feature_deps": "pw_cc_toolchain_feature labels that provide features for this toolchain",
@@ -50,7 +54,6 @@
     "compiler": "See documentation for cc_common.create_cc_toolchain_config_info()",
     "abi_version": "See documentation for cc_common.create_cc_toolchain_config_info()",
     "abi_libc_version": "See documentation for cc_common.create_cc_toolchain_config_info()",
-    "builtin_sysroot": "See documentation for cc_common.create_cc_toolchain_config_info()",
     "cc_target_os": "See documentation for cc_common.create_cc_toolchain_config_info()",
 }
 
@@ -64,6 +67,7 @@
     "cxx_builtin_include_directories": "Use a pw_cc_toolchain_feature to add cxx_builtin_include_directories",
     "tool_paths": "pw_cc_toolchain does not support tool_paths, use \"ar\", \"cpp\", \"gcc\", \"gcov\", \"ld\", and \"strip\" attributes to set toolchain tools",
     "make_variables": "pw_cc_toolchain does not yet support make variables",
+    "builtin_sysroot": "Use a pw_cc_toolchain_feature to add a builtin_sysroot",
 }
 
 def _action_configs(action_tool, action_list):
@@ -89,6 +93,86 @@
         for action in action_list
     ]
 
+def _archiver_flags_feature(is_mac):
+    """Returns our implementation of the legacy archiver_flags feature.
+
+    We provide our own implementation of the archiver_flags.  The default
+    implementation of this legacy feature at
+    https://github.com/bazelbuild/bazel/blob/252d36384b8b630d77d21fac0d2c5608632aa393/src/main/java/com/google/devtools/build/lib/rules/cpp/CppActionConfigs.java#L620-L660
+    contains a bug that prevents it from working with llvm-libtool-darwin only
+    fixed in
+    https://github.com/bazelbuild/bazel/commit/ae7cfa59461b2c694226be689662d387e9c38427,
+    which has not yet been released.
+
+    However, we don't merely fix the bug. Part of the Pigweed build involves
+    linking some empty libraries (with no object files). This leads to invoking
+    the archiving tool with no input files. Such an invocation is considered a
+    success by llvm-ar, but not by llvm-libtool-darwin. So for now, we use
+    flags appropriate for llvm-ar here, even on MacOS.
+
+    Args:
+        is_mac: Does the toolchain this feature will be included in target MacOS?
+
+    Returns:
+        The archiver_flags feature.
+    """
+
+    # TODO(b/297413805): Remove this implementation.
+    return feature(
+        name = "archiver_flags",
+        flag_sets = [
+            flag_set(
+                actions = [
+                    ACTION_NAMES.cpp_link_static_library,
+                ],
+                flag_groups = [
+                    flag_group(
+                        flags = _archiver_flags(is_mac),
+                    ),
+                    flag_group(
+                        expand_if_available = "output_execpath",
+                        flags = ["%{output_execpath}"],
+                    ),
+                ],
+            ),
+            flag_set(
+                actions = [
+                    ACTION_NAMES.cpp_link_static_library,
+                ],
+                flag_groups = [
+                    flag_group(
+                        expand_if_available = "libraries_to_link",
+                        iterate_over = "libraries_to_link",
+                        flag_groups = [
+                            flag_group(
+                                expand_if_equal = variable_with_value(
+                                    name = "libraries_to_link.type",
+                                    value = "object_file",
+                                ),
+                                flags = ["%{libraries_to_link.name}"],
+                            ),
+                            flag_group(
+                                expand_if_equal = variable_with_value(
+                                    name = "libraries_to_link.type",
+                                    value = "object_file_group",
+                                ),
+                                flags = ["%{libraries_to_link.object_files}"],
+                                iterate_over = "libraries_to_link.object_files",
+                            ),
+                        ],
+                    ),
+                ],
+            ),
+        ],
+    )
+
+def _archiver_flags(is_mac):
+    """Returns flags for llvm-ar."""
+    if is_mac:
+        return ["--format=darwin", "rcs"]
+    else:
+        return ["rcsD"]
+
 def _pw_cc_toolchain_config_impl(ctx):
     """Rule that provides a CcToolchainConfigInfo.
 
@@ -108,7 +192,7 @@
     all_actions += [
         action_config(
             action_name = ACTION_NAMES.cpp_link_static_library,
-            implies = ["archiver_flags"],
+            implies = ["archiver_flags", "linker_param_file"],
             tools = [
                 tool(
                     tool = ctx.executable.ar,
@@ -134,10 +218,19 @@
     ]
 
     features = [dep[ToolchainFeatureInfo].feature for dep in ctx.attr.feature_deps]
+    features.append(_archiver_flags_feature(ctx.attr.target_libc == "macosx"))
     builtin_include_dirs = []
     for dep in ctx.attr.feature_deps:
         builtin_include_dirs.extend(dep[ToolchainFeatureInfo].cxx_builtin_include_directories)
 
+    sysroot_dir = None
+    for dep in ctx.attr.feature_deps:
+        dep_sysroot = dep[ToolchainFeatureInfo].builtin_sysroot
+        if dep_sysroot:
+            if sysroot_dir:
+                fail("Failed to set sysroot at `{}`, already have sysroot at `{}` ".format(dep_sysroot, sysroot_dir))
+            sysroot_dir = dep_sysroot
+
     return cc_common.create_cc_toolchain_config_info(
         ctx = ctx,
         action_configs = all_actions,
@@ -151,7 +244,7 @@
         compiler = ctx.attr.compiler,
         abi_version = ctx.attr.abi_version,
         abi_libc_version = ctx.attr.abi_libc_version,
-        builtin_sysroot = ctx.attr.builtin_sysroot,
+        builtin_sysroot = sysroot_dir,
         cc_target_os = ctx.attr.cc_target_os,
     )
 
@@ -176,7 +269,6 @@
         "compiler": attr.string(),
         "abi_version": attr.string(),
         "abi_libc_version": attr.string(),
-        "builtin_sysroot": attr.string(),
         "cc_target_os": attr.string(),
     },
     provides = [CcToolchainConfigInfo],
diff --git a/pw_toolchain_bazel/cc_toolchain/private/providers.bzl b/pw_toolchain_bazel/cc_toolchain/private/providers.bzl
index 1e017ce..a63a7d1 100644
--- a/pw_toolchain_bazel/cc_toolchain/private/providers.bzl
+++ b/pw_toolchain_bazel/cc_toolchain/private/providers.bzl
@@ -16,7 +16,8 @@
 ToolchainFeatureInfo = provider(
     doc = "A provider containing cc_toolchain features and related fields.",
     fields = {
-        "feature": "feature: The a group of build flags structured as a toolchain feature",
-        "cxx_builtin_include_directories": "List[str]: Builtin C/C++ standard library include directories",
+        "feature": "feature: A group of build flags structured as a toolchain feature.",
+        "cxx_builtin_include_directories": "List[str]: Builtin C/C++ standard library include directories.",
+        "builtin_sysroot": "str: Path to the sysroot directory. Use `external/[repo_name]` for sysroots provided as an external repository.",
     },
 )
diff --git a/pw_toolchain_bazel/cc_toolchain/private/toolchain_feature.bzl b/pw_toolchain_bazel/cc_toolchain/private/toolchain_feature.bzl
index 5e0285b..d92ae60 100644
--- a/pw_toolchain_bazel/cc_toolchain/private/toolchain_feature.bzl
+++ b/pw_toolchain_bazel/cc_toolchain/private/toolchain_feature.bzl
@@ -33,6 +33,7 @@
     "linkopts": "List[str]: Flags to pass to C compile actions",
     "linker_files": "List[File]: Files to link",
     "cxx_builtin_include_directories": "List[str]: Paths to C++ standard library include directories",
+    "builtin_sysroot": "str: Path to the directory containing the sysroot",
 }
 
 def _dict_to_str(dict_to_stringify):
@@ -137,6 +138,11 @@
         initialized_args["cxx_builtin_include_directories"] = kwargs["cxx_builtin_include_directories"]
     else:
         initialized_args["cxx_builtin_include_directories"] = []
+
+    if "builtin_sysroot" in kwargs:
+        initialized_args["builtin_sysroot"] = kwargs["builtin_sysroot"]
+    else:
+        initialized_args["builtin_sysroot"] = None
     return initialized_args
 
 def build_toolchain_feature_info(ctx, **kwargs):
@@ -164,6 +170,7 @@
         ToolchainFeatureInfo(
             feature = new_feature,
             cxx_builtin_include_directories = initialized_args["cxx_builtin_include_directories"],
+            builtin_sysroot = initialized_args["builtin_sysroot"],
         ),
         DefaultInfo(files = initialized_args["linker_files"]),
     ]
@@ -186,6 +193,7 @@
         linkopts = ctx.attr.linkopts,
         linker_files = ctx.files.linker_files,
         cxx_builtin_include_directories = ctx.attr.cxx_builtin_include_directories,
+        builtin_sysroot = ctx.attr.builtin_sysroot,
     )
 
 pw_cc_toolchain_feature = rule(
@@ -198,6 +206,7 @@
         "linkopts": attr.string_list(),
         "linker_files": attr.label_list(allow_files = True),
         "cxx_builtin_include_directories": attr.string_list(),
+        "builtin_sysroot": attr.string(),
     },
     provides = [ToolchainFeatureInfo, DefaultInfo],
 )
diff --git a/pw_toolchain_bazel/constraints/arm_mcpu/BUILD.bazel b/pw_toolchain_bazel/constraints/arm_mcpu/BUILD.bazel
new file mode 100644
index 0000000..a8b671f
--- /dev/null
+++ b/pw_toolchain_bazel/constraints/arm_mcpu/BUILD.bazel
@@ -0,0 +1,65 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+# The target Arm processor.
+#
+# The values of this constraint_setting correspond to valid values of the -mcpu
+# flag for Arm gcc. See
+# https://gcc.gnu.org/onlinedocs/gcc/ARM-Options.html#index-mcpu-2. These
+# values are intended to be used in `target_compatible_with` attributes of
+# toolchains which specify the corresponding value of -mcpu.
+#
+# The constraint_values are not currently exhaustively enumerated (only a few
+# of the legal -mcpu values have corresponding constraint_values). The intent
+# is for additional values to be added when needed.
+#
+# The intent is to support only (processor type + optional architectural
+# extensions) forms of -mcpu (e.g., cortex-m4 or cortex-m4+nofp), not march
+# values (e.g., armv7e-m). This is because it is recommended to use (processor
+# type + optional architectural extensions) when configuring Arm GCC (see
+# https://community.arm.com/arm-community-blogs/b/tools-software-ides-blog/posts/compiler-flags-across-architectures-march-mtune-and-mcpu).
+# In addition, the march values can already be captured using @platforms//cpu.
+constraint_setting(
+    name = "mcpu",
+    default_constraint_value = "none",
+)
+
+constraint_value(
+    name = "none",
+    constraint_setting = ":mcpu",
+)
+
+constraint_value(
+    name = "cortex-m0",
+    constraint_setting = ":mcpu",
+)
+
+constraint_value(
+    name = "cortex-m3",
+    constraint_setting = ":mcpu",
+)
+
+constraint_value(
+    name = "cortex-m4",
+    constraint_setting = ":mcpu",
+)
+
+constraint_value(
+    name = "cortex-m4+nofp",
+    constraint_setting = ":mcpu",
+)
diff --git a/pw_toolchain_bazel/features/macos/private/xcode_command_line_tools.bzl b/pw_toolchain_bazel/features/macos/private/xcode_command_line_tools.bzl
index 20c1ada..dbd741e 100644
--- a/pw_toolchain_bazel/features/macos/private/xcode_command_line_tools.bzl
+++ b/pw_toolchain_bazel/features/macos/private/xcode_command_line_tools.bzl
@@ -101,12 +101,10 @@
         ToolchainFeatureInfo
     """
     sdk_path = ctx.attr.sdk[XcodeSdkInfo].sdk_path
-    flags = ["--sysroot=" + sdk_path]
     return build_toolchain_feature_info(
         ctx = ctx,
-        copts = flags,
-        linkopts = flags,
-        cxx_builtin_include_directories = [sdk_path],
+        cxx_builtin_include_directories = ["%sysroot%/usr/include"],
+        builtin_sysroot = sdk_path,
     )
 
 pw_macos_sysroot = rule(
diff --git a/pw_trace_tokenized/docs.rst b/pw_trace_tokenized/docs.rst
index 761889b..22363ac 100644
--- a/pw_trace_tokenized/docs.rst
+++ b/pw_trace_tokenized/docs.rst
@@ -202,7 +202,7 @@
 Ex. Invoking PW_TRACE_INSTANT with 'test1' and 'test2', then calling this
 function would produce this in the output logs:
 
-.. code:: sh
+.. code-block:: sh
 
   [TRACE] begin
   [TRACE] data: BWdDMRoABWj52YMB
diff --git a/pw_transfer/BUILD.bazel b/pw_transfer/BUILD.bazel
index 18ce055..55eeab6 100644
--- a/pw_transfer/BUILD.bazel
+++ b/pw_transfer/BUILD.bazel
@@ -12,10 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-load("//pw_build:pigweed.bzl", "pw_cc_binary", "pw_cc_library", "pw_cc_test")
-load("//pw_protobuf_compiler:pw_proto_library.bzl", "pw_proto_library")
 load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
 load("@rules_proto//proto:defs.bzl", "proto_library")
+load("//pw_build:pigweed.bzl", "pw_cc_binary", "pw_cc_library", "pw_cc_test")
+load("//pw_protobuf_compiler:pw_proto_library.bzl", "pw_proto_library")
 
 package(default_visibility = ["//visibility:public"])
 
diff --git a/pw_transfer/docs.rst b/pw_transfer/docs.rst
index a7a0887..2d60436 100644
--- a/pw_transfer/docs.rst
+++ b/pw_transfer/docs.rst
@@ -630,7 +630,7 @@
 
 To run the tests on your machine, run
 
-.. code:: bash
+.. code-block:: bash
 
   $ bazel test --features=c++17 \
         pw_transfer/integration_test:cross_language_small_test \
@@ -645,7 +645,7 @@
 when running the tests. This allows manual testing of older versions of
 pw_transfer against newer versions.
 
-.. code:: bash
+.. code-block:: bash
 
   # Test a newer version of pw_transfer against an old C++ client that was
   # backed up to another directory.
@@ -663,7 +663,7 @@
 
 The CIPD package contents can be created with this command:
 
-.. code::bash
+.. code-block::bash
 
   $ bazel build --features=c++17 pw_transfer/integration_test:server \
                                  pw_transfer/integration_test:cpp_client
@@ -684,6 +684,7 @@
 slow. However, you can request that the tests be run in presubmit on your
 change by adding to following line to the commit message footer:
 
-.. code::
+.. code-block::
 
-  Cq-Include-Trybots: luci.pigweed.try:pigweed-integration-transfer
+   Cq-Include-Trybots: luci.pigweed.try:pigweed-integration-transfer
+
diff --git a/pw_transfer/integration_test/BUILD.bazel b/pw_transfer/integration_test/BUILD.bazel
index a153798..84be05c 100644
--- a/pw_transfer/integration_test/BUILD.bazel
+++ b/pw_transfer/integration_test/BUILD.bazel
@@ -12,9 +12,9 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-load("//pw_build:pigweed.bzl", "pw_cc_binary")
 load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
 load("@rules_proto//proto:defs.bzl", "proto_library")
+load("//pw_build:pigweed.bzl", "pw_cc_binary")
 
 pw_cc_binary(
     name = "server",
diff --git a/pw_unit_test/docs.rst b/pw_unit_test/docs.rst
index 1ab6eee..74e7f18 100644
--- a/pw_unit_test/docs.rst
+++ b/pw_unit_test/docs.rst
@@ -12,7 +12,7 @@
 
 .. note::
 
-  This documentation is currently incomplete.
+   This documentation is currently incomplete.
 
 -------------------------------------------
 pw_unit_test:light: GoogleTest for Embedded
@@ -33,22 +33,29 @@
 expected in a complete testing framework; nevertheless, it is already used
 heavily within Pigweed.
 
-.. note::
+GoogleTest compatibility
+========================
+pw_unit_test implements a subset of GoogleTest. Supported features include:
 
-  Many of GoogleTest's more advanced features are not yet implemented. Missing
-  features include:
+* Test and test suite declarations.
+* Most ``EXPECT`` and ``ASSERT`` macros.
+* Stream-style expectation messages, such as
+  ``EXPECT_EQ(val, 5) << "Inputs: " << input``. Messages are currently ignored.
 
-  * Any GoogleMock features (e.g. :c:macro:`EXPECT_THAT`)
-  * Floating point comparison macros (e.g. :c:macro:`EXPECT_FLOAT_EQ`)
-  * Death tests (e.g. :c:macro:`EXPECT_DEATH`); ``EXPECT_DEATH_IF_SUPPORTED``
-    does nothing but silently passes
-  * Value-parameterized tests
+Many of GoogleTest's advanced features are not yet implemented. Missing features
+include:
 
-  To request a feature addition, please
-  `let us know <mailto:pigweed@googlegroups.com>`_.
+* Any GoogleMock features (e.g. :c:macro:`EXPECT_THAT`)
+* Floating point comparison macros (e.g. :c:macro:`EXPECT_FLOAT_EQ`)
+* Death tests (e.g. :c:macro:`EXPECT_DEATH`); ``EXPECT_DEATH_IF_SUPPORTED``
+  does nothing but silently passes
+* Value-parameterized tests
 
-  See `Using upstream GoogleTest`_ below for information
-  about using upstream GoogleTest instead.
+To request a feature addition, please `let us know
+<mailto:pigweed@googlegroups.com>`_.
+
+See `Using upstream GoogleTest`_ below for information
+about using upstream GoogleTest instead.
 
 The EventHandler interface
 ==========================
@@ -249,7 +256,7 @@
 
 **Example**
 
-.. code::
+.. code-block::
 
    import("$dir_pw_unit_test/test.gni")
 
@@ -285,7 +292,7 @@
 
 **Example**
 
-.. code::
+.. code-block::
 
    import("$dir_pw_unit_test/test.gni")
 
@@ -454,7 +461,7 @@
 
 **Example**
 
-.. code::
+.. code-block::
 
    include($ENV{PW_ROOT}/pw_unit_test/test.cmake)
 
@@ -494,7 +501,7 @@
 
 **Example**
 
-.. code::
+.. code-block::
 
    include($ENV{PW_ROOT}/pw_unit_test/test.cmake)
 
@@ -663,7 +670,7 @@
 To set up RPC-based unit tests in your application, instantiate a
 ``pw::unit_test::UnitTestService`` and register it with your RPC server.
 
-.. code:: c++
+.. code-block:: c++
 
    #include "pw_rpc/server.h"
    #include "pw_unit_test/unit_test_service.h"
@@ -683,9 +690,10 @@
 All tests flashed to an attached device can be run via python by calling
 ``pw_unit_test.rpc.run_tests()`` with a RPC client services object that has
 the unit testing RPC service enabled. By default, the results will output via
-logging.
+logging. This method returns a ``TestRecord`` dataclass instance, containing
+the results of the test run.
 
-.. code:: python
+.. code-block:: python
 
    from pw_hdlc.rpc import HdlcRpcClient
    from pw_unit_test.rpc import run_tests
@@ -699,7 +707,7 @@
 pw_unit_test.rpc
 ----------------
 .. automodule:: pw_unit_test.rpc
-   :members: EventHandler, run_tests
+   :members: EventHandler, run_tests, TestRecord
 
 ----------------------------
 Module Configuration Options
@@ -734,23 +742,23 @@
 the `main` functions written for `pw_unit_test:light` to work with upstream
 GoogleTest without modification, as shown below.
 
-  .. code-block:: c++
+.. code-block:: c++
 
-    #include "gtest/gtest.h"
-    #include "pw_unit_test/logging_event_handler.h"
+   #include "gtest/gtest.h"
+   #include "pw_unit_test/logging_event_handler.h"
 
-    int main() {
-      testing::InitGoogleTest();
-      pw::unit_test::LoggingEventHandler logger;
-      pw::unit_test::RegisterEventHandler(&logger);
-      return RUN_ALL_TESTS();
-    }
+   int main() {
+     testing::InitGoogleTest();
+     pw::unit_test::LoggingEventHandler logger;
+     pw::unit_test::RegisterEventHandler(&logger);
+     return RUN_ALL_TESTS();
+   }
 
 .. cpp:namespace-push:: pw::unit_test
 
 .. cpp:class:: GoogleTestHandlerAdapter
 
-  A GoogleTest Event Listener that fires GoogleTest emitted events to an
-  appropriate ``EventHandler``.
+   A GoogleTest Event Listener that fires GoogleTest emitted events to an
+   appropriate ``EventHandler``.
 
 .. cpp::namespace-pop::
diff --git a/pw_unit_test/framework_test.cc b/pw_unit_test/framework_test.cc
index c06348b..b57a642 100644
--- a/pw_unit_test/framework_test.cc
+++ b/pw_unit_test/framework_test.cc
@@ -88,6 +88,37 @@
   EXPECT_TRUE(false);
 }
 
+TEST(PigweedTest, Logs) {
+  EXPECT_TRUE(true) << "This message is ignored";
+  EXPECT_FALSE(false) << "This message is ignored";
+  EXPECT_EQ(0, 0) << "This message is ignored";
+  EXPECT_NE(0, 1) << "This message is ignored";
+  EXPECT_GT(1, 0) << "This message is ignored";
+  EXPECT_GE(0, 0) << "This message is ignored";
+  EXPECT_LT(0, 1) << "This message is ignored";
+  EXPECT_LE(0, 0) << "This message is ignored";
+  EXPECT_STREQ("", "") << "This message is ignored";
+  EXPECT_STRNE("", "?") << "This message is ignored";
+
+  ASSERT_TRUE(true) << "This message is ignored";
+  ASSERT_FALSE(false) << "This message is ignored";
+  ASSERT_EQ(0, 0) << "This message is ignored";
+  ASSERT_NE(0, 1) << "This message is ignored";
+  ASSERT_GT(1, 0) << "This message is ignored";
+  ASSERT_GE(0, 0) << "This message is ignored";
+  ASSERT_LT(0, 1) << "This message is ignored";
+  ASSERT_LE(0, 0) << "This message is ignored";
+  ASSERT_STREQ("", "") << "This message is ignored";
+  ASSERT_STRNE("", "?") << "This message is ignored";
+
+  if (false) {
+    ADD_FAILURE() << "This failed!" << 123;
+    GTEST_FAIL() << "This failed!" << 123 << '?';
+    GTEST_SKIP() << 1.0f << " skips!";
+  }
+  GTEST_SUCCEED() << "This message is ignored";
+}
+
 class SkipOnSetUpTest : public ::testing::Test {
  public:
   void SetUp() override { GTEST_SKIP(); }
@@ -147,6 +178,13 @@
   EXPECT_EQ(i, 4);
 }
 
+class ClassWithPrivateMethod {
+  FRIEND_TEST(FixtureTest, FriendClass);
+
+ private:
+  int Return314() { return 314; }
+};
+
 class FixtureTest : public ::testing::Test {
  public:
   FixtureTest() : string_("hello world") {}
@@ -163,6 +201,10 @@
   EXPECT_EQ(StringLength(), 11);
 }
 
+TEST_F(FixtureTest, FriendClass) {
+  EXPECT_EQ(ClassWithPrivateMethod().Return314(), 314);
+}
+
 class PigweedTestFixture : public ::testing::Test {
  protected:
   PigweedTestFixture() : cool_number_(35) {}
@@ -198,6 +240,7 @@
   static int value;
 
   static void SetUpTestSuite() {
+    value = 1;
     EXPECT_EQ(value, 1);
     value++;
   }
diff --git a/pw_unit_test/public/pw_unit_test/internal/framework.h b/pw_unit_test/public/pw_unit_test/internal/framework.h
index 11bccc7..25682e6 100644
--- a/pw_unit_test/public/pw_unit_test/internal/framework.h
+++ b/pw_unit_test/public/pw_unit_test/internal/framework.h
@@ -47,16 +47,21 @@
   _PW_TEST_SUITE_NAMES_MUST_BE_UNIQUE(int /* TEST_F */, test_fixture); \
   _PW_TEST(test_fixture, test_name, test_fixture)
 
-#define EXPECT_TRUE(expr) static_cast<void>(_PW_TEST_BOOL(expr, true))
-#define EXPECT_FALSE(expr) static_cast<void>(_PW_TEST_BOOL(expr, false))
-#define EXPECT_EQ(lhs, rhs) static_cast<void>(_PW_TEST_OP(lhs, rhs, ==))
-#define EXPECT_NE(lhs, rhs) static_cast<void>(_PW_TEST_OP(lhs, rhs, !=))
-#define EXPECT_GT(lhs, rhs) static_cast<void>(_PW_TEST_OP(lhs, rhs, >))
-#define EXPECT_GE(lhs, rhs) static_cast<void>(_PW_TEST_OP(lhs, rhs, >=))
-#define EXPECT_LT(lhs, rhs) static_cast<void>(_PW_TEST_OP(lhs, rhs, <))
-#define EXPECT_LE(lhs, rhs) static_cast<void>(_PW_TEST_OP(lhs, rhs, <=))
-#define EXPECT_STREQ(lhs, rhs) static_cast<void>(_PW_TEST_C_STR(lhs, rhs, ==))
-#define EXPECT_STRNE(lhs, rhs) static_cast<void>(_PW_TEST_C_STR(lhs, rhs, !=))
+// Use of the FRIEND_TEST() macro is discouraged, because it induces coupling
+// between testing and implementation code. Consider this a last resort only.
+#define FRIEND_TEST(test_suite_name, test_name) \
+  friend class test_suite_name##_##test_name##_Test
+
+#define EXPECT_TRUE(expr) _PW_TEST_EXPECT(_PW_TEST_BOOL(expr, true))
+#define EXPECT_FALSE(expr) _PW_TEST_EXPECT(_PW_TEST_BOOL(expr, false))
+#define EXPECT_EQ(lhs, rhs) _PW_TEST_EXPECT(_PW_TEST_OP(lhs, rhs, ==))
+#define EXPECT_NE(lhs, rhs) _PW_TEST_EXPECT(_PW_TEST_OP(lhs, rhs, !=))
+#define EXPECT_GT(lhs, rhs) _PW_TEST_EXPECT(_PW_TEST_OP(lhs, rhs, >))
+#define EXPECT_GE(lhs, rhs) _PW_TEST_EXPECT(_PW_TEST_OP(lhs, rhs, >=))
+#define EXPECT_LT(lhs, rhs) _PW_TEST_EXPECT(_PW_TEST_OP(lhs, rhs, <))
+#define EXPECT_LE(lhs, rhs) _PW_TEST_EXPECT(_PW_TEST_OP(lhs, rhs, <=))
+#define EXPECT_STREQ(lhs, rhs) _PW_TEST_EXPECT(_PW_TEST_C_STR(lhs, rhs, ==))
+#define EXPECT_STRNE(lhs, rhs) _PW_TEST_EXPECT(_PW_TEST_C_STR(lhs, rhs, !=))
 
 #define ASSERT_TRUE(expr) _PW_TEST_ASSERT(_PW_TEST_BOOL(expr, true))
 #define ASSERT_FALSE(expr) _PW_TEST_ASSERT(_PW_TEST_BOOL(expr, false))
@@ -70,17 +75,19 @@
 #define ASSERT_STRNE(lhs, rhs) _PW_TEST_ASSERT(_PW_TEST_C_STR(lhs, rhs, !=))
 
 // Generates a non-fatal failure with a generic message.
-#define ADD_FAILURE()                                                  \
-  ::pw::unit_test::internal::Framework::Get().CurrentTestExpectSimple( \
-      "(line is not executed)", "(line was executed)", __LINE__, false)
+#define ADD_FAILURE()                                                    \
+  ::pw::unit_test::internal::Framework::Get().CurrentTestExpectSimple(   \
+      "(line is not executed)", "(line was executed)", __LINE__, false); \
+  _PW_UNIT_TEST_LOG
 
 // Generates a fatal failure with a generic message.
 #define GTEST_FAIL() return ADD_FAILURE()
 
 // Skips test at runtime, which is neither successful nor failed. Skip aborts
 // current function.
-#define GTEST_SKIP() \
-  return ::pw::unit_test::internal::Framework::Get().CurrentTestSkip(__LINE__)
+#define GTEST_SKIP()                                                     \
+  ::pw::unit_test::internal::Framework::Get().CurrentTestSkip(__LINE__); \
+  return _PW_UNIT_TEST_LOG
 
 // Define either macro to 1 to omit the definition of FAIL(), which is a
 // generic name and clashes with some other libraries.
@@ -91,7 +98,8 @@
 // Generates a success with a generic message.
 #define GTEST_SUCCEED()                                                \
   ::pw::unit_test::internal::Framework::Get().CurrentTestExpectSimple( \
-      "(success)", "(success)", __LINE__, true)
+      "(success)", "(success)", __LINE__, true);                       \
+  _PW_UNIT_TEST_LOG
 
 // Define either macro to 1 to omit the definition of SUCCEED(), which
 // is a generic name and clashes with some other libraries.
@@ -246,6 +254,9 @@
   // Whether the current test is skipped.
   bool IsSkipped() const { return current_result_ == TestResult::kSkipped; }
 
+  // Whether the current test has failed.
+  bool HasFailure() const { return current_result_ == TestResult::kFailure; }
+
   // Constructs an instance of a unit test class and runs the test.
   //
   // Tests are constructed within a static memory pool at run time instead of
@@ -450,6 +461,8 @@
   static void SetUpTestSuite() {}
   static void TearDownTestSuite() {}
 
+  static bool HasFailure() { return Framework::Get().HasFailure(); }
+
   // Runs the unit test.
   void PigweedTestRun() {
     SetUp();
@@ -497,6 +510,35 @@
   return true;
 }
 
+// GoogleTest supports stream-style messages, but pw_unit_test does not. This
+// class accepts and ignores C++ <<-style logs. This could be replaced with
+// pw_log/glog_adapter.h.
+class IgnoreLogs {
+ public:
+  constexpr IgnoreLogs() = default;
+
+  template <typename T>
+  constexpr const IgnoreLogs& operator<<(const T&) const {
+    return *this;
+  }
+};
+
+// Used to ignore a stream-style message in an assert, which returns. This uses
+// a similar approach as upstream GoogleTest, but drops any messages.
+class ReturnHelper {
+ public:
+  constexpr ReturnHelper() = default;
+
+  // Return void so that assigning to ReturnHelper converts the log expression
+  // to void without blocking the stream-style log with a closing parenthesis.
+  // NOLINTNEXTLINE(misc-unconventional-assign-operator)
+  constexpr void operator=(const IgnoreLogs&) const {}
+};
+
+#define _PW_UNIT_TEST_LOG                     \
+  ::pw::unit_test::internal::ReturnHelper() = \
+      ::pw::unit_test::internal::IgnoreLogs()
+
 }  // namespace internal
 
 #if PW_CXX_STANDARD_IS_SUPPORTED(17)
@@ -542,12 +584,13 @@
                                                                             \
   void class_name::PigweedTestBody()
 
-#define _PW_TEST_ASSERT(expectation)                                           \
-  do {                                                                         \
-    if (!(expectation)) {                                                      \
-      return static_cast<void>(0); /* Prevent using ASSERT in constructors. */ \
-    }                                                                          \
-  } while (0)
+#define _PW_TEST_ASSERT(expectation) \
+  if (!(expectation))                \
+  return _PW_UNIT_TEST_LOG
+
+#define _PW_TEST_EXPECT(expectation) \
+  if (!(expectation))                \
+  _PW_UNIT_TEST_LOG
 
 #define _PW_TEST_BOOL(expr, value)                               \
   ::pw::unit_test::internal::Framework::Get().CurrentTestExpect( \
diff --git a/pw_unit_test/py/pw_unit_test/rpc.py b/pw_unit_test/py/pw_unit_test/rpc.py
index ac1c56a..459071e 100644
--- a/pw_unit_test/py/pw_unit_test/rpc.py
+++ b/pw_unit_test/py/pw_unit_test/rpc.py
@@ -17,7 +17,7 @@
 import abc
 from dataclasses import dataclass
 import logging
-from typing import Iterable
+from typing import Iterable, List, Tuple
 
 from pw_rpc.client import Services
 from pw_rpc.callback_client import OptionalTimeout, UseDefault
@@ -133,13 +133,28 @@
         log('        Actual: %s', expectation.evaluated_expression)
 
 
+@dataclass(frozen=True)
+class TestRecord:
+    """Class for recording test results."""
+
+    passing_tests: Tuple[TestCase, ...]
+    failing_tests: Tuple[TestCase, ...]
+    disabled_tests: Tuple[TestCase, ...]
+
+    def all_tests_passed(self) -> bool:
+        return not self.failing_tests
+
+    def __bool__(self) -> bool:
+        return self.all_tests_passed()
+
+
 def run_tests(
     rpcs: Services,
     report_passed_expectations: bool = False,
     test_suites: Iterable[str] = (),
     event_handlers: Iterable[EventHandler] = (LoggingEventHandler(),),
     timeout_s: OptionalTimeout = UseDefault.VALUE,
-) -> bool:
+) -> TestRecord:
     """Runs unit tests on a device over Pigweed RPC.
 
     Calls each of the provided event handlers as test events occur, and returns
@@ -174,39 +189,53 @@
     for event_handler in event_handlers:
         event_handler.run_all_tests_start()
 
-    all_tests_passed = False
+    passing_tests: List[TestCase] = []
+    failing_tests: List[TestCase] = []
+    disabled_tests: List[TestCase] = []
 
     for response in test_responses:
-        if response.HasField('test_case_start'):
-            raw_test_case = response.test_case_start
-            current_test_case = _test_case(raw_test_case)
-
-        for event_handler in event_handlers:
-            if response.HasField('test_run_start'):
+        if response.HasField('test_run_start'):
+            for event_handler in event_handlers:
                 event_handler.run_all_tests_start()
-            elif response.HasField('test_run_end'):
+        elif response.HasField('test_run_end'):
+            for event_handler in event_handlers:
                 event_handler.run_all_tests_end(
                     response.test_run_end.passed, response.test_run_end.failed
                 )
-                if response.test_run_end.failed == 0:
-                    all_tests_passed = True
-            elif response.HasField('test_case_start'):
+            assert len(passing_tests) == response.test_run_end.passed
+            assert len(failing_tests) == response.test_run_end.failed
+            test_record = TestRecord(
+                passing_tests=tuple(passing_tests),
+                failing_tests=tuple(failing_tests),
+                disabled_tests=tuple(disabled_tests),
+            )
+        elif response.HasField('test_case_start'):
+            raw_test_case = response.test_case_start
+            current_test_case = _test_case(raw_test_case)
+            for event_handler in event_handlers:
                 event_handler.test_case_start(current_test_case)
-            elif response.HasField('test_case_end'):
-                result = TestCaseResult(response.test_case_end)
+        elif response.HasField('test_case_end'):
+            result = TestCaseResult(response.test_case_end)
+            for event_handler in event_handlers:
                 event_handler.test_case_end(current_test_case, result)
-            elif response.HasField('test_case_disabled'):
-                event_handler.test_case_disabled(
-                    _test_case(response.test_case_disabled)
-                )
-            elif response.HasField('test_case_expectation'):
-                raw_expectation = response.test_case_expectation
-                expectation = TestExpectation(
-                    raw_expectation.expression,
-                    raw_expectation.evaluated_expression,
-                    raw_expectation.line_number,
-                    raw_expectation.success,
-                )
+            if result == TestCaseResult.SUCCESS:
+                passing_tests.append(current_test_case)
+            else:
+                failing_tests.append(current_test_case)
+        elif response.HasField('test_case_disabled'):
+            raw_test_case = response.test_case_disabled
+            current_test_case = _test_case(raw_test_case)
+            for event_handler in event_handlers:
+                event_handler.test_case_disabled(current_test_case)
+            disabled_tests.append(current_test_case)
+        elif response.HasField('test_case_expectation'):
+            raw_expectation = response.test_case_expectation
+            expectation = TestExpectation(
+                raw_expectation.expression,
+                raw_expectation.evaluated_expression,
+                raw_expectation.line_number,
+                raw_expectation.success,
+            )
+            for event_handler in event_handlers:
                 event_handler.test_case_expect(current_test_case, expectation)
-
-    return all_tests_passed
+    return test_record
diff --git a/pw_unit_test/test.gni b/pw_unit_test/test.gni
index aae8464..0719e7d 100644
--- a/pw_unit_test/test.gni
+++ b/pw_unit_test/test.gni
@@ -536,6 +536,7 @@
           "unit_tests",
           "action_tests",
           "perf_tests",
+          "fuzz_tests",
         ]
         walk_keys = [ "test_barrier" ]
         output_conversion = "json"
diff --git a/pw_varint/BUILD.bazel b/pw_varint/BUILD.bazel
index 30bbf60..4688787 100644
--- a/pw_varint/BUILD.bazel
+++ b/pw_varint/BUILD.bazel
@@ -26,6 +26,7 @@
     name = "pw_varint",
     srcs = [
         "varint.cc",
+        "varint_c.c",
     ],
     hdrs = [
         "public/pw_varint/varint.h",
@@ -35,6 +36,7 @@
         "//pw_polyfill",
         "//pw_preprocessor",
         "//pw_span",
+        "//third_party/fuchsia:stdcompat",
     ],
 )
 
diff --git a/pw_varint/BUILD.gn b/pw_varint/BUILD.gn
index 973e757..30f5e9a 100644
--- a/pw_varint/BUILD.gn
+++ b/pw_varint/BUILD.gn
@@ -16,6 +16,7 @@
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_fuzzer/fuzz_test.gni")
 import("$dir_pw_unit_test/test.gni")
 
 config("default_config") {
@@ -25,12 +26,19 @@
 pw_source_set("pw_varint") {
   public_configs = [ ":default_config" ]
   public_deps = [
+    "$dir_pw_third_party/fuchsia:stdcompat",
     dir_pw_polyfill,
     dir_pw_preprocessor,
     dir_pw_span,
   ]
-  sources = [ "varint.cc" ]
+  sources = [
+    "varint.cc",
+    "varint_c.c",
+  ]
   public = [ "public/pw_varint/varint.h" ]
+
+  # TODO(b/259746255): Remove this when everything compiles with -Wconversion.
+  configs = [ "$dir_pw_build:conversion_warnings" ]
 }
 
 pw_source_set("stream") {
@@ -54,11 +62,8 @@
   ]
 }
 
-pw_test("varint_test") {
-  deps = [
-    ":pw_varint",
-    "$dir_pw_fuzzer:fuzztest",
-  ]
+pw_fuzz_test("varint_test") {
+  deps = [ ":pw_varint" ]
   sources = [
     "varint_test.cc",
     "varint_test_c.c",
diff --git a/pw_varint/CMakeLists.txt b/pw_varint/CMakeLists.txt
index 3a7f933..55cf742 100644
--- a/pw_varint/CMakeLists.txt
+++ b/pw_varint/CMakeLists.txt
@@ -24,8 +24,10 @@
     pw_polyfill
     pw_preprocessor
     pw_span
+    pw_third_party.fuchsia.stdcompat
   SOURCES
     varint.cc
+    varint_c.c
 )
 
 pw_add_library(pw_varint.stream STATIC
diff --git a/pw_varint/docs.rst b/pw_varint/docs.rst
index b9b465d..d294555 100644
--- a/pw_varint/docs.rst
+++ b/pw_varint/docs.rst
@@ -1,51 +1,70 @@
 .. _module-pw_varint:
 
----------
+=========
 pw_varint
----------
-The ``pw_varint`` module provides functions for encoding and decoding variable
-length integers, or varints. For smaller values, varints require less memory
-than a fixed-size encoding. For example, a 32-bit (4-byte) integer requires 1--5
-bytes when varint-encoded.
+=========
+.. doxygenfile:: pw_varint/varint.h
+   :sections: detaileddescription
 
-`Protocol Buffers <https://developers.google.com/protocol-buffers/docs/encoding#varints>`_
-use a variable-length encoding for integers.
-
+-------------
 Compatibility
-=============
+-------------
 * C
 * C++14 (with :doc:`../pw_polyfill/docs`)
 * `Rust </rustdoc/pw_varint>`_
 
-API
+-------------
+API Reference
+-------------
+
+.. _module-pw_varint-api-c:
+
+C
+=
+.. doxygendefine:: PW_VARINT_MAX_INT32_SIZE_BYTES
+.. doxygendefine:: PW_VARINT_MAX_INT64_SIZE_BYTES
+.. doxygenfunction:: pw_varint_Encode32
+.. doxygenfunction:: pw_varint_Encode64
+.. doxygenfunction:: pw_varint_Decode32
+.. doxygenfunction:: pw_varint_Decode64
+.. doxygenfunction:: pw_varint_ZigZagEncode32
+.. doxygenfunction:: pw_varint_ZigZagEncode64
+.. doxygenfunction:: pw_varint_ZigZagDecode32
+.. doxygenfunction:: pw_varint_ZigZagDecode64
+.. doxygendefine:: PW_VARINT_ENCODED_SIZE_BYTES
+.. doxygenfunction:: pw_varint_EncodedSizeBytes
+.. doxygenenum:: pw_varint_Format
+.. doxygenfunction:: pw_varint_EncodeCustom
+.. doxygenfunction:: pw_varint_DecodeCustom
+
+C++
 ===
-
-.. doxygenfunction:: pw::varint::EncodedSize(uint64_t integer)
-
-.. doxygenfunction:: pw::varint::ZigZagEncodedSize(int64_t integer)
-
+.. doxygenvariable:: pw::varint::kMaxVarint32SizeBytes
+.. doxygenvariable:: pw::varint::kMaxVarint64SizeBytes
+.. doxygenfunction:: pw::varint::ZigZagEncode
+.. doxygenfunction:: pw::varint::ZigZagDecode
+.. doxygenfunction:: pw::varint::EncodedSize
+.. doxygenfunction:: pw::varint::EncodeLittleEndianBase128
+.. doxygenfunction:: pw::varint::Encode(T integer, const span<std::byte> &output)
+.. doxygenfunction:: pw::varint::Decode(const span<const std::byte>& input, int64_t* output)
+.. doxygenfunction:: pw::varint::Decode(const span<const std::byte>& input, uint64_t* output)
 .. doxygenfunction:: pw::varint::MaxValueInBytes(size_t bytes)
+.. doxygenenum:: pw::varint::Format
+.. doxygenfunction:: pw::varint::Encode(uint64_t value, span<std::byte> output, Format format)
+.. doxygenfunction:: pw::varint::Decode(span<const std::byte> input, uint64_t* value, Format format)
 
 Stream API
 ----------
-
 .. doxygenfunction:: pw::varint::Read(stream::Reader& reader, uint64_t* output, size_t max_size)
-
 .. doxygenfunction:: pw::varint::Read(stream::Reader& reader, int64_t* output, size_t max_size)
 
-Dependencies
-============
-* ``pw_span``
-
-Zephyr
-======
-To enable ``pw_varint`` for Zephyr add ``CONFIG_PIGWEED_VARINT=y`` to the
-project's configuration.
-
 Rust
 ====
 ``pw_varint``'s Rust API is documented in our
 `rustdoc API docs </rustdoc/pw_varint>`_.
 
-..
-  TODO(b/280102965): Update above to point to rustdoc API docs
+------
+Zephyr
+------
+To enable ``pw_varint`` for Zephyr add ``CONFIG_PIGWEED_VARINT=y`` to the
+project's configuration.
diff --git a/pw_varint/public/pw_varint/varint.h b/pw_varint/public/pw_varint/varint.h
index 16689f8..9eac767 100644
--- a/pw_varint/public/pw_varint/varint.h
+++ b/pw_varint/public/pw_varint/varint.h
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2023 The Pigweed Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
 // use this file except in compliance with the License. You may obtain a copy of
@@ -13,6 +13,25 @@
 // the License.
 #pragma once
 
+/// @file pw_varint/varint.h
+///
+/// The `pw_varint` module provides functions for encoding and decoding variable
+/// length integers or varints. For smaller values, varints require less memory
+/// than a fixed-size encoding. For example, a 32-bit (4-byte) integer requires
+/// 1–5 bytes when varint-encoded.
+///
+/// `pw_varint` supports custom variable-length encodings with different
+/// terminator bit values and positions (@cpp_enum{pw::varint::Format}).
+/// The basic encoding for unsigned integers is Little Endian Base 128 (LEB128).
+/// ZigZag encoding is also supported, which maps negative integers to positive
+/// integers to improve encoding density for LEB128.
+///
+/// <a
+/// href=https://developers.google.com/protocol-buffers/docs/encoding#varints>
+/// Protocol Buffers</a> and @rstref{HDLC <module-pw_hdlc>} use variable-length
+/// integer encodings for integers.
+
+#include <stdbool.h>
 #include <stddef.h>
 #include <stdint.h>
 
@@ -22,8 +41,112 @@
 extern "C" {
 #endif
 
-// Expose a subset of the varint API for use in C code.
+/// Maximum size of an LEB128-encoded `uint32_t`.
+#define PW_VARINT_MAX_INT32_SIZE_BYTES 5
 
+/// Maximum size of an LEB128-encoded `uint64_t`.
+#define PW_VARINT_MAX_INT64_SIZE_BYTES 10
+
+/// Encodes a 32-bit integer as LEB128.
+///
+/// @returns the number of bytes written
+size_t pw_varint_Encode32(uint32_t integer,
+                          void* output,
+                          size_t output_size_bytes);
+
+/// Encodes a 64-bit integer as LEB128.
+///
+/// @returns the number of bytes written
+size_t pw_varint_Encode64(uint64_t integer,
+                          void* output,
+                          size_t output_size_bytes);
+
+/// Zig-zag encodes an `int32_t`, returning it as a `uint32_t`.
+static inline uint32_t pw_varint_ZigZagEncode32(int32_t n) {
+  return (uint32_t)((uint32_t)n << 1) ^ (uint32_t)(n >> (sizeof(n) * 8 - 1));
+}
+
+/// Zig-zag encodes an `int64_t`, returning it as a `uint64_t`.
+static inline uint64_t pw_varint_ZigZagEncode64(int64_t n) {
+  return (uint64_t)((uint64_t)n << 1) ^ (uint64_t)(n >> (sizeof(n) * 8 - 1));
+}
+
+/// Extracts and encodes 7 bits from the integer. Sets the top bit to indicate
+/// more data is coming, which must be cleared if this was the last byte.
+static inline uint8_t pw_varint_EncodeOneByte32(uint32_t* integer) {
+  const uint8_t bits = (uint8_t)((*integer & 0x7Fu) | 0x80u);
+  *integer >>= 7;
+  return bits;
+}
+
+/// @copydoc pw_varint_EncodeOneByte32
+static inline uint8_t pw_varint_EncodeOneByte64(uint64_t* integer) {
+  const uint8_t bits = (uint8_t)((*integer & 0x7Fu) | 0x80u);
+  *integer >>= 7;
+  return bits;
+}
+
+/// Zig-zag decodes a `uint64_t`, returning it as an `int64_t`.
+static inline int32_t pw_varint_ZigZagDecode32(uint32_t n)
+    PW_NO_SANITIZE("unsigned-integer-overflow") {
+  return (int32_t)((n >> 1) ^ (~(n & 1) + 1));
+}
+
+/// Zig-zag decodes a `uint64_t`, returning it as an `int64_t`.
+static inline int64_t pw_varint_ZigZagDecode64(uint64_t n)
+    PW_NO_SANITIZE("unsigned-integer-overflow") {
+  return (int64_t)((n >> 1) ^ (~(n & 1) + 1));
+}
+
+/// Decodes an LEB128-encoded integer to a `uint32_t`.
+/// @returns the number of bytes read; 0 if decoding failed
+size_t pw_varint_Decode32(const void* input,
+                          size_t input_size_bytes,
+                          uint32_t* output);
+
+/// Decodes an LEB128-encoded integer to a `uint64_t`.
+/// @returns the number of bytes read; 0 if decoding failed
+size_t pw_varint_Decode64(const void* input,
+                          size_t input_size_bytes,
+                          uint64_t* output);
+
+/// Decodes one byte of an LEB128-encoded integer to a `uint32_t`.
+/// @returns true if there is more data to decode (top bit is set).
+static inline bool pw_varint_DecodeOneByte32(uint8_t byte,
+                                             size_t count,
+                                             uint32_t* value) {
+  *value |= (uint32_t)(byte & 0x7fu) << (count * 7);
+  return (byte & 0x80u) != 0u;
+}
+
+/// Decodes one byte of an LEB128-encoded integer to a `uint64_t`.
+/// @returns true if there is more data to decode (top bit is set).
+static inline bool pw_varint_DecodeOneByte64(uint8_t byte,
+                                             size_t count,
+                                             uint64_t* value) {
+  *value |= (uint64_t)(byte & 0x7fu) << (count * 7);
+  return (byte & 0x80u) != 0u;
+}
+
+/// Macro that returns the encoded size of up to a 64-bit integer. This is
+/// inefficient, but is a constant expression if the input is a constant. Use
+/// `pw_varint_EncodedSizeBytes` for runtime encoded size calculation.
+#define PW_VARINT_ENCODED_SIZE_BYTES(value)        \
+  ((unsigned long long)value < (1u << 7)      ? 1u \
+   : (unsigned long long)value < (1u << 14)   ? 2u \
+   : (unsigned long long)value < (1u << 21)   ? 3u \
+   : (unsigned long long)value < (1u << 28)   ? 4u \
+   : (unsigned long long)value < (1llu << 35) ? 5u \
+   : (unsigned long long)value < (1llu << 42) ? 6u \
+   : (unsigned long long)value < (1llu << 49) ? 7u \
+   : (unsigned long long)value < (1llu << 56) ? 8u \
+   : (unsigned long long)value < (1llu << 63) ? 9u \
+                                              : 10u)
+
+/// Returns the size of a `uint64_t` when encoded as a varint (LEB128).
+size_t pw_varint_EncodedSizeBytes(uint64_t integer);
+
+/// Describes a custom varint format.
 typedef enum {
   PW_VARINT_ZERO_TERMINATED_LEAST_SIGNIFICANT = 0,
   PW_VARINT_ZERO_TERMINATED_MOST_SIGNIFICANT = 1,
@@ -31,41 +154,18 @@
   PW_VARINT_ONE_TERMINATED_MOST_SIGNIFICANT = 3,
 } pw_varint_Format;
 
+/// Encodes a `uint64_t` using a custom varint format.
 size_t pw_varint_EncodeCustom(uint64_t integer,
                               void* output,
                               size_t output_size,
                               pw_varint_Format format);
+
+/// Decodes a `uint64_t` using a custom varint format.
 size_t pw_varint_DecodeCustom(const void* input,
                               size_t input_size,
                               uint64_t* output,
                               pw_varint_Format format);
 
-static inline size_t pw_varint_Encode(uint64_t integer,
-                                      void* output,
-                                      size_t output_size) {
-  return pw_varint_EncodeCustom(
-      integer, output, output_size, PW_VARINT_ZERO_TERMINATED_MOST_SIGNIFICANT);
-}
-
-size_t pw_varint_ZigZagEncode(int64_t integer,
-                              void* output,
-                              size_t output_size);
-
-static inline size_t pw_varint_Decode(const void* input,
-                                      size_t input_size,
-                                      uint64_t* output) {
-  return pw_varint_DecodeCustom(
-      input, input_size, output, PW_VARINT_ZERO_TERMINATED_MOST_SIGNIFICANT);
-}
-
-size_t pw_varint_ZigZagDecode(const void* input,
-                              size_t input_size,
-                              int64_t* output);
-
-// Returns the size of an when encoded as a varint.
-size_t pw_varint_EncodedSize(uint64_t integer);
-size_t pw_varint_ZigZagEncodedSize(int64_t integer);
-
 #ifdef __cplusplus
 
 }  // extern "C"
@@ -73,26 +173,34 @@
 #include <limits>
 #include <type_traits>
 
+#include "lib/stdcompat/bit.h"
 #include "pw_polyfill/language_feature_macros.h"
 #include "pw_span/span.h"
 
 namespace pw {
 namespace varint {
 
-// The maximum number of bytes occupied by an encoded varint.
-PW_INLINE_VARIABLE constexpr size_t kMaxVarint32SizeBytes = 5;
-PW_INLINE_VARIABLE constexpr size_t kMaxVarint64SizeBytes = 10;
+/// Maximum size of a varint (LEB128) encoded `uint32_t`.
+PW_INLINE_VARIABLE constexpr size_t kMaxVarint32SizeBytes =
+    PW_VARINT_MAX_INT32_SIZE_BYTES;
 
-// ZigZag encodes a signed integer. This maps small negative numbers to small,
-// unsigned positive numbers, which improves their density for LEB128 encoding.
-//
-// ZigZag encoding works by moving the sign bit from the most-significant bit to
-// the least-significant bit. For the signed k-bit integer n, the formula is
-//
-//   (n << 1) ^ (n >> (k - 1))
-//
-// See the following for a description of ZigZag encoding:
-//   https://developers.google.com/protocol-buffers/docs/encoding#types
+/// Maximum size of a varint (LEB128) encoded `uint64_t`.
+PW_INLINE_VARIABLE constexpr size_t kMaxVarint64SizeBytes =
+    PW_VARINT_MAX_INT64_SIZE_BYTES;
+
+/// ZigZag encodes a signed integer. This maps small negative numbers to small,
+/// unsigned positive numbers, which improves their density for LEB128 encoding.
+///
+/// ZigZag encoding works by moving the sign bit from the most-significant bit
+/// to the least-significant bit. For the signed `k`-bit integer `n`, the
+/// formula is:
+///
+/// @code{.cpp}
+///   (n << 1) ^ (n >> (k - 1))
+/// @endcode
+///
+/// See the following for a description of ZigZag encoding:
+///   https://developers.google.com/protocol-buffers/docs/encoding#types
 template <typename T>
 constexpr std::make_unsigned_t<T> ZigZagEncode(T n) {
   static_assert(std::is_signed<T>(), "Zig-zag encoding is for signed integers");
@@ -101,9 +209,10 @@
          static_cast<U>(n >> (sizeof(T) * 8 - 1));
 }
 
-// ZigZag decodes a signed integer.
-// The calculation is done modulo std::numeric_limits<T>::max()+1, so the
-// unsigned integer overflows are intentional.
+/// ZigZag decodes a signed integer.
+///
+/// The calculation is done modulo `std::numeric_limits<T>::max()+1`, so the
+/// unsigned integer overflows are intentional.
 template <typename T>
 constexpr std::make_signed_t<T> ZigZagDecode(T n)
     PW_NO_SANITIZE("unsigned-integer-overflow") {
@@ -112,88 +221,6 @@
   return static_cast<std::make_signed_t<T>>((n >> 1) ^ (~(n & 1) + 1));
 }
 
-// Encodes a uint64_t with Little-Endian Base 128 (LEB128) encoding.
-inline size_t EncodeLittleEndianBase128(uint64_t integer,
-                                        const span<std::byte>& output) {
-  return pw_varint_Encode(integer, output.data(), output.size());
-}
-
-// Encodes the provided integer using a variable-length encoding and returns the
-// number of bytes written.
-//
-// The encoding is the same as used in protocol buffers. Signed integers are
-// ZigZag encoded to remove leading 1s from small negative numbers, then the
-// resulting number is encoded as Little Endian Base 128 (LEB128). Unsigned
-// integers are encoded directly as LEB128.
-//
-// Returns the number of bytes written or 0 if the result didn't fit in the
-// encoding buffer.
-template <typename T>
-size_t Encode(T integer, const span<std::byte>& output) {
-  if (std::is_signed<T>()) {
-    using Signed =
-        std::conditional_t<std::is_signed<T>::value, T, std::make_signed_t<T>>;
-    return pw_varint_ZigZagEncode(
-        static_cast<Signed>(integer), output.data(), output.size());
-  } else {
-    using Unsigned = std::
-        conditional_t<std::is_signed<T>::value, std::make_unsigned_t<T>, T>;
-    return pw_varint_Encode(
-        static_cast<Unsigned>(integer), output.data(), output.size());
-  }
-}
-
-// Decodes a varint-encoded value. If reading into a signed integer, the value
-// is ZigZag decoded.
-//
-// Returns the number of bytes read from the input if successful. Returns zero
-// if the result does not fit in a int64_t / uint64_t or if the input is
-// exhausted before the number terminates. Reads a maximum of 10 bytes.
-//
-// The following example decodes multiple varints from a buffer:
-//
-//   while (!data.empty()) {
-//     int64_t value;
-//     size_t bytes = Decode(data, &value);
-//
-//     if (bytes == 0u) {
-//       return Status::DataLoss();
-//     }
-//     results.push_back(value);
-//     data = data.subspan(bytes)
-//   }
-//
-inline size_t Decode(const span<const std::byte>& input, int64_t* value) {
-  return pw_varint_ZigZagDecode(input.data(), input.size(), value);
-}
-
-inline size_t Decode(const span<const std::byte>& input, uint64_t* value) {
-  return pw_varint_Decode(input.data(), input.size(), value);
-}
-
-enum class Format {
-  kZeroTerminatedLeastSignificant = PW_VARINT_ZERO_TERMINATED_LEAST_SIGNIFICANT,
-  kZeroTerminatedMostSignificant = PW_VARINT_ZERO_TERMINATED_MOST_SIGNIFICANT,
-  kOneTerminatedLeastSignificant = PW_VARINT_ONE_TERMINATED_LEAST_SIGNIFICANT,
-  kOneTerminatedMostSignificant = PW_VARINT_ONE_TERMINATED_MOST_SIGNIFICANT,
-};
-
-// Encodes a varint in a custom format.
-inline size_t Encode(uint64_t value, span<std::byte> output, Format format) {
-  return pw_varint_EncodeCustom(value,
-                                output.data(),
-                                output.size(),
-                                static_cast<pw_varint_Format>(format));
-}
-
-// Decodes a varint from a custom format.
-inline size_t Decode(span<const std::byte> input,
-                     uint64_t* value,
-                     Format format) {
-  return pw_varint_DecodeCustom(
-      input.data(), input.size(), value, static_cast<pw_varint_Format>(format));
-}
-
 /// @brief Computes the size of an integer when encoded as a varint.
 ///
 /// @param integer The integer whose encoded size is to be computed. `integer`
@@ -204,38 +231,101 @@
           typename = std::enable_if_t<std::is_integral<T>::value ||
                                       std::is_convertible<T, uint64_t>::value>>
 constexpr size_t EncodedSize(T integer) {
-  return integer == 0 ? 1
-                      : (64 -
-                         static_cast<size_t>(
-                             __builtin_clzll(static_cast<uint64_t>(integer))) +
-                         6) /
-                            7;
+  if (integer == 0) {
+    return 1;
+  }
+  return static_cast<size_t>(
+      (64 - cpp20::countl_zero(static_cast<uint64_t>(integer)) + 6) / 7);
 }
 
-/// @brief Computes the size of an integer when encoded as a varint.
-///
-/// @param integer The integer whose encoded size is to be computed. `integer`
-/// can be signed or unsigned.
-///
-/// @returns The size of `integer` when encoded as a varint.
-constexpr size_t EncodedSize(uint64_t integer) {
-  return integer == 0 ? 1
-                      : (64 -
-                         static_cast<size_t>(
-                             __builtin_clzll(static_cast<uint64_t>(integer))) +
-                         6) /
-                            7;
+/// Encodes a `uint64_t` with Little-Endian Base 128 (LEB128) encoding.
+/// @returns the number of bytes written; 0 if the buffer is too small
+inline size_t EncodeLittleEndianBase128(uint64_t integer,
+                                        const span<std::byte>& output) {
+  return pw_varint_Encode64(integer, output.data(), output.size());
 }
 
-/// @brief Returns the size of a signed integer when
-/// [ZigZag](https://protobuf.dev/programming-guides/encoding/#signed-ints)-encoded
-/// as a variable-length integer (varint).
+/// Encodes the provided integer using a variable-length encoding and returns
+/// the number of bytes written.
 ///
-/// @param integer The signed integer that will be ZigZag-encoded as a varint.
+/// The encoding is the same as used in protocol buffers. Signed integers are
+/// ZigZag encoded to remove leading 1s from small negative numbers, then the
+/// resulting number is encoded as Little Endian Base 128 (LEB128). Unsigned
+/// integers are encoded directly as LEB128.
 ///
-/// @returns The size of `integer` when ZigZag-encoded as a varint.
-constexpr size_t ZigZagEncodedSize(int64_t integer) {
-  return EncodedSize(ZigZagEncode(integer));
+/// Returns the number of bytes written or 0 if the result didn't fit in the
+/// encoding buffer.
+template <typename T>
+size_t Encode(T integer, const span<std::byte>& output) {
+  if (std::is_signed<T>()) {
+    using Signed =
+        std::conditional_t<std::is_signed<T>::value, T, std::make_signed_t<T>>;
+    return EncodeLittleEndianBase128(ZigZagEncode(static_cast<Signed>(integer)),
+                                     output);
+  } else {
+    using Unsigned = std::
+        conditional_t<std::is_signed<T>::value, std::make_unsigned_t<T>, T>;
+    return EncodeLittleEndianBase128(static_cast<Unsigned>(integer), output);
+  }
+}
+
+/// Decodes a varint-encoded value. If reading into a signed integer, the value
+/// is ZigZag decoded.
+///
+/// Returns the number of bytes read from the input if successful. Returns zero
+/// if the result does not fit in a `int64_t`/ `uint64_t` or if the input is
+/// exhausted before the number terminates. Reads a maximum of 10 bytes.
+///
+/// The following example decodes multiple varints from a buffer:
+///
+/// @code{.cpp}
+///
+///   while (!data.empty()) {
+///     int64_t value;
+///     size_t bytes = Decode(data, &value);
+///
+///     if (bytes == 0u) {
+///       return Status::DataLoss();
+///     }
+///     results.push_back(value);
+///     data = data.subspan(bytes)
+///   }
+///
+/// @endcode
+inline size_t Decode(const span<const std::byte>& input, int64_t* output) {
+  uint64_t value = 0;
+  size_t bytes_read = pw_varint_Decode64(input.data(), input.size(), &value);
+  *output = pw_varint_ZigZagDecode64(value);
+  return bytes_read;
+}
+
+/// @overload
+inline size_t Decode(const span<const std::byte>& input, uint64_t* value) {
+  return pw_varint_Decode64(input.data(), input.size(), value);
+}
+
+/// Describes a custom varint format.
+enum class Format {
+  kZeroTerminatedLeastSignificant = PW_VARINT_ZERO_TERMINATED_LEAST_SIGNIFICANT,
+  kZeroTerminatedMostSignificant = PW_VARINT_ZERO_TERMINATED_MOST_SIGNIFICANT,
+  kOneTerminatedLeastSignificant = PW_VARINT_ONE_TERMINATED_LEAST_SIGNIFICANT,
+  kOneTerminatedMostSignificant = PW_VARINT_ONE_TERMINATED_MOST_SIGNIFICANT,
+};
+
+/// Encodes a varint in a custom format.
+inline size_t Encode(uint64_t value, span<std::byte> output, Format format) {
+  return pw_varint_EncodeCustom(value,
+                                output.data(),
+                                output.size(),
+                                static_cast<pw_varint_Format>(format));
+}
+
+/// Decodes a varint from a custom format.
+inline size_t Decode(span<const std::byte> input,
+                     uint64_t* value,
+                     Format format) {
+  return pw_varint_DecodeCustom(
+      input.data(), input.size(), value, static_cast<pw_varint_Format>(format));
 }
 
 /// @brief Returns the maximum (max) integer value that can be encoded as a
diff --git a/pw_varint/varint.cc b/pw_varint/varint.cc
index 4ae65cd..e10bb73 100644
--- a/pw_varint/varint.cc
+++ b/pw_varint/varint.cc
@@ -122,42 +122,9 @@
   return count;
 }
 
-// TODO(frolv): Remove this deprecated alias.
-extern "C" size_t pw_VarintEncode(uint64_t integer,
-                                  void* output,
-                                  size_t output_size) {
-  return pw_varint_Encode(integer, output, output_size);
-}
-
-extern "C" size_t pw_varint_ZigZagEncode(int64_t integer,
-                                         void* output,
-                                         size_t output_size) {
-  return pw_varint_Encode(ZigZagEncode(integer), output, output_size);
-}
-
-// TODO(frolv): Remove this deprecated alias.
-extern "C" size_t pw_VarintDecode(const void* input,
-                                  size_t input_size,
-                                  uint64_t* output) {
-  return pw_varint_Decode(input, input_size, output);
-}
-
-extern "C" size_t pw_varint_ZigZagDecode(const void* input,
-                                         size_t input_size,
-                                         int64_t* output) {
-  uint64_t value = 0;
-  size_t bytes = pw_varint_Decode(input, input_size, &value);
-  *output = ZigZagDecode(value);
-  return bytes;
-}
-
-extern "C" size_t pw_varint_EncodedSize(uint64_t integer) {
+extern "C" size_t pw_varint_EncodedSizeBytes(uint64_t integer) {
   return EncodedSize(integer);
 }
 
-extern "C" size_t pw_varint_ZigZagEncodedSize(int64_t integer) {
-  return ZigZagEncodedSize(integer);
-}
-
 }  // namespace varint
 }  // namespace pw
diff --git a/pw_varint/varint_c.c b/pw_varint/varint_c.c
new file mode 100644
index 0000000..9194b4e
--- /dev/null
+++ b/pw_varint/varint_c.c
@@ -0,0 +1,77 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_varint/varint.h"
+
+#define VARINT_ENCODE_FUNCTION_BODY(bits)                        \
+  size_t written = 0;                                            \
+  uint8_t* buffer = (uint8_t*)output;                            \
+                                                                 \
+  do {                                                           \
+    if (written >= output_size_bytes) {                          \
+      return 0u;                                                 \
+    }                                                            \
+    buffer[written++] = pw_varint_EncodeOneByte##bits(&integer); \
+  } while (integer != 0u);                                       \
+                                                                 \
+  buffer[written - 1] &= 0x7f;                                   \
+  return written
+
+size_t pw_varint_Encode32(uint32_t integer,
+                          void* output,
+                          size_t output_size_bytes) {
+  VARINT_ENCODE_FUNCTION_BODY(32);
+}
+
+size_t pw_varint_Encode64(uint64_t integer,
+                          void* output,
+                          size_t output_size_bytes) {
+  VARINT_ENCODE_FUNCTION_BODY(64);
+}
+
+#define VARINT_DECODE_FUNCTION_BODY(bits)                                     \
+  uint##bits##_t value = 0;                                                   \
+  size_t count = 0;                                                           \
+  const uint8_t* buffer = (const uint8_t*)(input);                            \
+                                                                              \
+  /* Only read to the end of the buffer or largest possible encoded size. */  \
+  const size_t max_count =                                                    \
+      input_size_bytes < PW_VARINT_MAX_INT##bits##_SIZE_BYTES                 \
+          ? input_size_bytes                                                  \
+          : PW_VARINT_MAX_INT##bits##_SIZE_BYTES;                             \
+                                                                              \
+  bool keep_going;                                                            \
+  do {                                                                        \
+    if (count >= max_count) {                                                 \
+      return 0;                                                               \
+    }                                                                         \
+                                                                              \
+    keep_going = pw_varint_DecodeOneByte##bits(buffer[count], count, &value); \
+    count += 1;                                                               \
+  } while (keep_going);                                                       \
+                                                                              \
+  *output = value;                                                            \
+  return count
+
+size_t pw_varint_Decode32(const void* input,
+                          size_t input_size_bytes,
+                          uint32_t* output) {
+  VARINT_DECODE_FUNCTION_BODY(32);
+}
+
+size_t pw_varint_Decode64(const void* input,
+                          size_t input_size_bytes,
+                          uint64_t* output) {
+  VARINT_DECODE_FUNCTION_BODY(64);
+}
diff --git a/pw_varint/varint_test.cc b/pw_varint/varint_test.cc
index 45676be..02bfd00 100644
--- a/pw_varint/varint_test.cc
+++ b/pw_varint/varint_test.cc
@@ -28,14 +28,20 @@
 extern "C" {
 
 // Functions defined in varint_test.c which call the varint API from C.
-size_t pw_varint_CallEncode(uint64_t integer, void* output, size_t output_size);
-size_t pw_varint_CallZigZagEncode(int64_t integer,
-                                  void* output,
-                                  size_t output_size);
-size_t pw_varint_CallDecode(void* input, size_t input_size, uint64_t* output);
-size_t pw_varint_CallZigZagDecode(void* input,
-                                  size_t input_size,
-                                  int64_t* output);
+size_t pw_varint_CallEncode32(uint32_t integer,
+                              void* output,
+                              size_t output_size_bytes);
+size_t pw_varint_CallEncode64(uint64_t integer,
+                              void* output,
+                              size_t output_size_bytes);
+size_t pw_varint_CallZigZagAndVarintEncode64(int64_t integer,
+                                             void* output,
+                                             size_t output_size_bytes);
+size_t pw_varint_CallDecode32(void* input, size_t input_size, uint32_t* output);
+size_t pw_varint_CallDecode64(void* input, size_t input_size, uint64_t* output);
+size_t pw_varint_CallZigZagAndVarintDecode64(void* input,
+                                             size_t input_size,
+                                             int64_t* output);
 
 }  // extern "C"
 
@@ -65,11 +71,11 @@
 }
 
 TEST_F(VarintWithBuffer, EncodeSizeUnsigned32_SmallSingleByte_C) {
-  ASSERT_EQ(1u, pw_varint_CallEncode(UINT32_C(0), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(1u, pw_varint_CallEncode64(UINT32_C(0), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{0}, buffer_[0]);
-  ASSERT_EQ(1u, pw_varint_CallEncode(UINT32_C(1), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(1u, pw_varint_CallEncode64(UINT32_C(1), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{1}, buffer_[0]);
-  ASSERT_EQ(1u, pw_varint_CallEncode(UINT32_C(2), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(1u, pw_varint_CallEncode64(UINT32_C(2), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{2}, buffer_[0]);
 }
 
@@ -85,13 +91,15 @@
 }
 
 TEST_F(VarintWithBuffer, EncodeSizeUnsigned32_LargeSingleByte_C) {
-  ASSERT_EQ(1u, pw_varint_CallEncode(UINT32_C(63), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(1u, pw_varint_CallEncode64(UINT32_C(63), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{63}, buffer_[0]);
-  ASSERT_EQ(1u, pw_varint_CallEncode(UINT32_C(64), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(1u, pw_varint_CallEncode64(UINT32_C(64), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{64}, buffer_[0]);
-  ASSERT_EQ(1u, pw_varint_CallEncode(UINT32_C(126), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(1u,
+            pw_varint_CallEncode64(UINT32_C(126), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{126}, buffer_[0]);
-  ASSERT_EQ(1u, pw_varint_CallEncode(UINT32_C(127), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(1u,
+            pw_varint_CallEncode64(UINT32_C(127), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{127}, buffer_[0]);
 }
 
@@ -109,20 +117,42 @@
 }
 
 TEST_F(VarintWithBuffer, EncodeSizeUnsigned32_MultiByte_C) {
-  ASSERT_EQ(2u, pw_varint_CallEncode(UINT32_C(128), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(2u,
+            pw_varint_CallEncode64(UINT32_C(128), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\x80\x01", buffer_, 2), 0);
-  ASSERT_EQ(2u, pw_varint_CallEncode(UINT32_C(129), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(2u,
+            pw_varint_CallEncode64(UINT32_C(129), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\x81\x01", buffer_, 2), 0);
 
   ASSERT_EQ(
       5u,
-      pw_varint_CallEncode(
+      pw_varint_CallEncode32(
           std::numeric_limits<uint32_t>::max() - 1, buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\xfe\xff\xff\xff\x0f", buffer_, 5), 0);
 
   ASSERT_EQ(
       5u,
-      pw_varint_CallEncode(
+      pw_varint_CallEncode32(
+          std::numeric_limits<uint32_t>::max(), buffer_, sizeof(buffer_)));
+  EXPECT_EQ(std::memcmp("\xff\xff\xff\xff\x0f", buffer_, 5), 0);
+
+  // Call with 64-bit function as well
+  ASSERT_EQ(2u,
+            pw_varint_CallEncode64(UINT32_C(128), buffer_, sizeof(buffer_)));
+  EXPECT_EQ(std::memcmp("\x80\x01", buffer_, 2), 0);
+  ASSERT_EQ(2u,
+            pw_varint_CallEncode64(UINT32_C(129), buffer_, sizeof(buffer_)));
+  EXPECT_EQ(std::memcmp("\x81\x01", buffer_, 2), 0);
+
+  ASSERT_EQ(
+      5u,
+      pw_varint_CallEncode64(
+          std::numeric_limits<uint32_t>::max() - 1, buffer_, sizeof(buffer_)));
+  EXPECT_EQ(std::memcmp("\xfe\xff\xff\xff\x0f", buffer_, 5), 0);
+
+  ASSERT_EQ(
+      5u,
+      pw_varint_CallEncode64(
           std::numeric_limits<uint32_t>::max(), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\xff\xff\xff\xff\x0f", buffer_, 5), 0);
 }
@@ -142,19 +172,24 @@
 
 TEST_F(VarintWithBuffer, EncodeSizeSigned32_SmallSingleByte_C) {
   ASSERT_EQ(1u,
-            pw_varint_CallZigZagEncode(INT32_C(0), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT32_C(0), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{0}, buffer_[0]);
   ASSERT_EQ(1u,
-            pw_varint_CallZigZagEncode(INT32_C(-1), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT32_C(-1), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{1}, buffer_[0]);
   ASSERT_EQ(1u,
-            pw_varint_CallZigZagEncode(INT32_C(1), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT32_C(1), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{2}, buffer_[0]);
   ASSERT_EQ(1u,
-            pw_varint_CallZigZagEncode(INT32_C(-2), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT32_C(-2), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{3}, buffer_[0]);
   ASSERT_EQ(1u,
-            pw_varint_CallZigZagEncode(INT32_C(2), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT32_C(2), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{4}, buffer_[0]);
 }
 
@@ -169,13 +204,16 @@
 
 TEST_F(VarintWithBuffer, EncodeSizeSigned32_LargeSingleByte_C) {
   ASSERT_EQ(1u,
-            pw_varint_CallZigZagEncode(INT32_C(-63), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT32_C(-63), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{125}, buffer_[0]);
   ASSERT_EQ(1u,
-            pw_varint_CallZigZagEncode(INT32_C(63), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT32_C(63), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{126}, buffer_[0]);
   ASSERT_EQ(1u,
-            pw_varint_CallZigZagEncode(INT32_C(-64), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT32_C(-64), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{127}, buffer_[0]);
 }
 
@@ -196,22 +234,25 @@
 
 TEST_F(VarintWithBuffer, EncodeSizeSigned32_MultiByte_C) {
   ASSERT_EQ(2u,
-            pw_varint_CallZigZagEncode(INT32_C(64), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT32_C(64), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\x80\x01", buffer_, 2), 0);
   ASSERT_EQ(2u,
-            pw_varint_CallZigZagEncode(INT32_C(-65), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT32_C(-65), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\x81\x01", buffer_, 2), 0);
   ASSERT_EQ(2u,
-            pw_varint_CallZigZagEncode(INT32_C(65), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT32_C(65), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\x82\x01", buffer_, 2), 0);
 
   ASSERT_EQ(5u,
-            pw_varint_CallZigZagEncode(
+            pw_varint_CallZigZagAndVarintEncode64(
                 std::numeric_limits<int32_t>::min(), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\xff\xff\xff\xff\x0f", buffer_, 5), 0);
 
   ASSERT_EQ(5u,
-            pw_varint_CallZigZagEncode(
+            pw_varint_CallZigZagAndVarintEncode64(
                 std::numeric_limits<int32_t>::max(), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\xfe\xff\xff\xff\x0f", buffer_, 5), 0);
 }
@@ -226,11 +267,11 @@
 }
 
 TEST_F(VarintWithBuffer, EncodeSizeUnsigned64_SmallSingleByte_C) {
-  ASSERT_EQ(1u, pw_varint_CallEncode(UINT64_C(0), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(1u, pw_varint_CallEncode64(UINT64_C(0), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{0}, buffer_[0]);
-  ASSERT_EQ(1u, pw_varint_CallEncode(UINT64_C(1), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(1u, pw_varint_CallEncode64(UINT64_C(1), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{1}, buffer_[0]);
-  ASSERT_EQ(1u, pw_varint_CallEncode(UINT64_C(2), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(1u, pw_varint_CallEncode64(UINT64_C(2), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{2}, buffer_[0]);
 }
 
@@ -246,13 +287,15 @@
 }
 
 TEST_F(VarintWithBuffer, EncodeSizeUnsigned64_LargeSingleByte_C) {
-  ASSERT_EQ(1u, pw_varint_CallEncode(UINT64_C(63), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(1u, pw_varint_CallEncode64(UINT64_C(63), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{63}, buffer_[0]);
-  ASSERT_EQ(1u, pw_varint_CallEncode(UINT64_C(64), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(1u, pw_varint_CallEncode64(UINT64_C(64), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{64}, buffer_[0]);
-  ASSERT_EQ(1u, pw_varint_CallEncode(UINT64_C(126), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(1u,
+            pw_varint_CallEncode64(UINT64_C(126), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{126}, buffer_[0]);
-  ASSERT_EQ(1u, pw_varint_CallEncode(UINT64_C(127), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(1u,
+            pw_varint_CallEncode64(UINT64_C(127), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{127}, buffer_[0]);
 }
 
@@ -278,33 +321,35 @@
 }
 
 TEST_F(VarintWithBuffer, EncodeSizeUnsigned64_MultiByte_C) {
-  ASSERT_EQ(2u, pw_varint_CallEncode(UINT64_C(128), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(2u,
+            pw_varint_CallEncode64(UINT64_C(128), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\x80\x01", buffer_, 2), 0);
-  ASSERT_EQ(2u, pw_varint_CallEncode(UINT64_C(129), buffer_, sizeof(buffer_)));
+  ASSERT_EQ(2u,
+            pw_varint_CallEncode64(UINT64_C(129), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\x81\x01", buffer_, 2), 0);
 
   ASSERT_EQ(
       5u,
-      pw_varint_CallEncode(
+      pw_varint_CallEncode64(
           std::numeric_limits<uint32_t>::max() - 1, buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\xfe\xff\xff\xff\x0f", buffer_, 5), 0);
 
   ASSERT_EQ(
       5u,
-      pw_varint_CallEncode(
+      pw_varint_CallEncode64(
           std::numeric_limits<uint32_t>::max(), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\xff\xff\xff\xff\x0f", buffer_, 5), 0);
 
   ASSERT_EQ(
       10u,
-      pw_varint_CallEncode(
+      pw_varint_CallEncode64(
           std::numeric_limits<uint64_t>::max() - 1, buffer_, sizeof(buffer_)));
   EXPECT_EQ(
       std::memcmp("\xfe\xff\xff\xff\xff\xff\xff\xff\xff\x01", buffer_, 10), 0);
 
   ASSERT_EQ(
       10u,
-      pw_varint_CallEncode(
+      pw_varint_CallEncode64(
           std::numeric_limits<uint64_t>::max(), buffer_, sizeof(buffer_)));
   EXPECT_EQ(
       std::memcmp("\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01", buffer_, 10), 0);
@@ -325,19 +370,24 @@
 
 TEST_F(VarintWithBuffer, EncodeSizeSigned64_SmallSingleByte_C) {
   ASSERT_EQ(1u,
-            pw_varint_CallZigZagEncode(INT64_C(0), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT64_C(0), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{0}, buffer_[0]);
   ASSERT_EQ(1u,
-            pw_varint_CallZigZagEncode(INT64_C(-1), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT64_C(-1), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{1}, buffer_[0]);
   ASSERT_EQ(1u,
-            pw_varint_CallZigZagEncode(INT64_C(1), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT64_C(1), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{2}, buffer_[0]);
   ASSERT_EQ(1u,
-            pw_varint_CallZigZagEncode(INT64_C(-2), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT64_C(-2), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{3}, buffer_[0]);
   ASSERT_EQ(1u,
-            pw_varint_CallZigZagEncode(INT64_C(2), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT64_C(2), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{4}, buffer_[0]);
 }
 
@@ -352,13 +402,16 @@
 
 TEST_F(VarintWithBuffer, EncodeSizeSigned64_LargeSingleByte_C) {
   ASSERT_EQ(1u,
-            pw_varint_CallZigZagEncode(INT64_C(-63), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT64_C(-63), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{125}, buffer_[0]);
   ASSERT_EQ(1u,
-            pw_varint_CallZigZagEncode(INT64_C(63), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT64_C(63), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{126}, buffer_[0]);
   ASSERT_EQ(1u,
-            pw_varint_CallZigZagEncode(INT64_C(-64), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT64_C(-64), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::byte{127}, buffer_[0]);
 }
 
@@ -391,37 +444,40 @@
 
 TEST_F(VarintWithBuffer, EncodeSizeSigned64_MultiByte_C) {
   ASSERT_EQ(2u,
-            pw_varint_CallZigZagEncode(INT64_C(64), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT64_C(64), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\x80\x01", buffer_, 2), 0);
   ASSERT_EQ(2u,
-            pw_varint_CallZigZagEncode(INT64_C(-65), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT64_C(-65), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\x81\x01", buffer_, 2), 0);
   ASSERT_EQ(2u,
-            pw_varint_CallZigZagEncode(INT64_C(65), buffer_, sizeof(buffer_)));
+            pw_varint_CallZigZagAndVarintEncode64(
+                INT64_C(65), buffer_, sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\x82\x01", buffer_, 2), 0);
 
   ASSERT_EQ(5u,
-            pw_varint_CallZigZagEncode(
+            pw_varint_CallZigZagAndVarintEncode64(
                 static_cast<int64_t>(std::numeric_limits<int32_t>::min()),
                 buffer_,
                 sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\xff\xff\xff\xff\x0f", buffer_, 5), 0);
 
   ASSERT_EQ(5u,
-            pw_varint_CallZigZagEncode(
+            pw_varint_CallZigZagAndVarintEncode64(
                 static_cast<int64_t>(std::numeric_limits<int32_t>::max()),
                 buffer_,
                 sizeof(buffer_)));
   EXPECT_EQ(std::memcmp("\xfe\xff\xff\xff\x0f", buffer_, 5), 0);
 
   ASSERT_EQ(10u,
-            pw_varint_CallZigZagEncode(
+            pw_varint_CallZigZagAndVarintEncode64(
                 std::numeric_limits<int64_t>::min(), buffer_, sizeof(buffer_)));
   EXPECT_EQ(
       std::memcmp("\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01", buffer_, 10), 0);
 
   ASSERT_EQ(10u,
-            pw_varint_CallZigZagEncode(
+            pw_varint_CallZigZagAndVarintEncode64(
                 std::numeric_limits<int64_t>::max(), buffer_, sizeof(buffer_)));
   EXPECT_EQ(
       std::memcmp("\xfe\xff\xff\xff\xff\xff\xff\xff\xff\x01", buffer_, 10), 0);
@@ -481,44 +537,50 @@
 
 FUZZ_TEST(Varint, EncodeDecodeUnsigned32);
 
-void EncodeDecode_C(uint64_t value) {
-  std::byte buffer[10];
-  size_t encoded = pw_varint_CallEncode(value, buffer, sizeof(buffer));
+#define ENCODE_DECODE_C_TEST(bits)                                   \
+  void EncodeDecode##bits##_C(uint##bits##_t value) {                \
+    std::byte buffer[kMaxVarint##bits##SizeBytes];                   \
+    size_t encoded =                                                 \
+        pw_varint_CallEncode##bits(value, buffer, sizeof(buffer));   \
+                                                                     \
+    uint##bits##_t result;                                           \
+    size_t decoded =                                                 \
+        pw_varint_CallDecode##bits(buffer, sizeof(buffer), &result); \
+                                                                     \
+    EXPECT_EQ(encoded, decoded);                                     \
+    ASSERT_EQ(value, result);                                        \
+  }                                                                  \
+                                                                     \
+  TEST(Varint, EncodeDecode##bits##Signed32Incremental_C) {          \
+    int32_t i = std::numeric_limits<int32_t>::min();                 \
+    while (true) {                                                   \
+      EncodeDecode##bits##_C(static_cast<uint##bits##_t>(i));        \
+                                                                     \
+      if (i > std::numeric_limits<int32_t>::max() - kIncrement) {    \
+        break;                                                       \
+      }                                                              \
+                                                                     \
+      i += kIncrement;                                               \
+    }                                                                \
+  }                                                                  \
+                                                                     \
+  TEST(Varint, EncodeDecode##bits##Unsigned32Incremental_C) {        \
+    uint32_t i = 0;                                                  \
+    while (true) {                                                   \
+      EncodeDecode##bits##_C(static_cast<uint##bits##_t>(i));        \
+                                                                     \
+      if (i > std::numeric_limits<uint32_t>::max() - kIncrement) {   \
+        break;                                                       \
+      }                                                              \
+                                                                     \
+      i += kIncrement;                                               \
+    }                                                                \
+  }                                                                  \
+                                                                     \
+  FUZZ_TEST(Varint, EncodeDecode##bits##_C)
 
-  uint64_t result;
-  size_t decoded = pw_varint_CallDecode(buffer, sizeof(buffer), &result);
-
-  EXPECT_EQ(encoded, decoded);
-  ASSERT_EQ(value, result);
-}
-
-TEST(Varint, EncodeDecodeSigned32Incremental_C) {
-  int32_t i = std::numeric_limits<int32_t>::min();
-  while (true) {
-    EncodeDecode_C(static_cast<uint64_t>(i));
-
-    if (i > std::numeric_limits<int32_t>::max() - kIncrement) {
-      break;
-    }
-
-    i += kIncrement;
-  }
-}
-
-TEST(Varint, EncodeDecodeUnsigned32Incremental_C) {
-  uint32_t i = 0;
-  while (true) {
-    EncodeDecode_C(static_cast<uint64_t>(i));
-
-    if (i > std::numeric_limits<uint32_t>::max() - kIncrement) {
-      break;
-    }
-
-    i += kIncrement;
-  }
-}
-
-FUZZ_TEST(Varint, EncodeDecode_C);
+ENCODE_DECODE_C_TEST(32);
+ENCODE_DECODE_C_TEST(64);
 
 template <size_t kStringSize>
 auto MakeBuffer(const char (&data)[kStringSize]) {
@@ -556,32 +618,38 @@
   int64_t value = -1234;
 
   auto buffer = MakeBuffer("\x00");
-  EXPECT_EQ(pw_varint_CallZigZagDecode(buffer.data(), buffer.size(), &value),
+  EXPECT_EQ(pw_varint_CallZigZagAndVarintDecode64(
+                buffer.data(), buffer.size(), &value),
             1u);
   EXPECT_EQ(value, 0);
 
   buffer = MakeBuffer("\x01");
-  EXPECT_EQ(pw_varint_CallZigZagDecode(buffer.data(), buffer.size(), &value),
+  EXPECT_EQ(pw_varint_CallZigZagAndVarintDecode64(
+                buffer.data(), buffer.size(), &value),
             1u);
   EXPECT_EQ(value, -1);
 
   buffer = MakeBuffer("\x02");
-  EXPECT_EQ(pw_varint_CallZigZagDecode(buffer.data(), buffer.size(), &value),
+  EXPECT_EQ(pw_varint_CallZigZagAndVarintDecode64(
+                buffer.data(), buffer.size(), &value),
             1u);
   EXPECT_EQ(value, 1);
 
   buffer = MakeBuffer("\x03");
-  EXPECT_EQ(pw_varint_CallZigZagDecode(buffer.data(), buffer.size(), &value),
+  EXPECT_EQ(pw_varint_CallZigZagAndVarintDecode64(
+                buffer.data(), buffer.size(), &value),
             1u);
   EXPECT_EQ(value, -2);
 
   buffer = MakeBuffer("\x04");
-  EXPECT_EQ(pw_varint_CallZigZagDecode(buffer.data(), buffer.size(), &value),
+  EXPECT_EQ(pw_varint_CallZigZagAndVarintDecode64(
+                buffer.data(), buffer.size(), &value),
             1u);
   EXPECT_EQ(value, 2);
 
   buffer = MakeBuffer("\x04");
-  EXPECT_EQ(pw_varint_CallZigZagDecode(buffer.data(), buffer.size(), &value),
+  EXPECT_EQ(pw_varint_CallZigZagAndVarintDecode64(
+                buffer.data(), buffer.size(), &value),
             1u);
   EXPECT_EQ(value, 2);
 }
@@ -619,37 +687,44 @@
   int64_t value = -1234;
 
   auto buffer2 = MakeBuffer("\x80\x01");
-  EXPECT_EQ(pw_varint_CallZigZagDecode(buffer2.data(), buffer2.size(), &value),
+  EXPECT_EQ(pw_varint_CallZigZagAndVarintDecode64(
+                buffer2.data(), buffer2.size(), &value),
             2u);
   EXPECT_EQ(value, 64);
 
   buffer2 = MakeBuffer("\x81\x01");
-  EXPECT_EQ(pw_varint_CallZigZagDecode(buffer2.data(), buffer2.size(), &value),
+  EXPECT_EQ(pw_varint_CallZigZagAndVarintDecode64(
+                buffer2.data(), buffer2.size(), &value),
             2u);
   EXPECT_EQ(value, -65);
 
   buffer2 = MakeBuffer("\x82\x01");
-  EXPECT_EQ(pw_varint_CallZigZagDecode(buffer2.data(), buffer2.size(), &value),
+  EXPECT_EQ(pw_varint_CallZigZagAndVarintDecode64(
+                buffer2.data(), buffer2.size(), &value),
             2u);
   EXPECT_EQ(value, 65);
 
   auto buffer4 = MakeBuffer("\xff\xff\xff\xff\x0f");
-  EXPECT_EQ(pw_varint_CallZigZagDecode(buffer4.data(), buffer4.size(), &value),
+  EXPECT_EQ(pw_varint_CallZigZagAndVarintDecode64(
+                buffer4.data(), buffer4.size(), &value),
             5u);
   EXPECT_EQ(value, std::numeric_limits<int32_t>::min());
 
   buffer4 = MakeBuffer("\xfe\xff\xff\xff\x0f");
-  EXPECT_EQ(pw_varint_CallZigZagDecode(buffer4.data(), buffer4.size(), &value),
+  EXPECT_EQ(pw_varint_CallZigZagAndVarintDecode64(
+                buffer4.data(), buffer4.size(), &value),
             5u);
   EXPECT_EQ(value, std::numeric_limits<int32_t>::max());
 
   auto buffer8 = MakeBuffer("\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01");
-  EXPECT_EQ(pw_varint_CallZigZagDecode(buffer8.data(), buffer8.size(), &value),
+  EXPECT_EQ(pw_varint_CallZigZagAndVarintDecode64(
+                buffer8.data(), buffer8.size(), &value),
             10u);
   EXPECT_EQ(value, std::numeric_limits<int64_t>::min());
 
   buffer8 = MakeBuffer("\xfe\xff\xff\xff\xff\xff\xff\xff\xff\x01");
-  EXPECT_EQ(pw_varint_CallZigZagDecode(buffer8.data(), buffer8.size(), &value),
+  EXPECT_EQ(pw_varint_CallZigZagAndVarintDecode64(
+                buffer8.data(), buffer8.size(), &value),
             10u);
   EXPECT_EQ(value, std::numeric_limits<int64_t>::max());
 }
@@ -1022,50 +1097,42 @@
   EXPECT_EQ(value, 0u);
 }
 
-TEST(Varint, EncodedSize) {
-  EXPECT_EQ(EncodedSize(uint64_t(0u)), 1u);
-  EXPECT_EQ(EncodedSize(uint64_t(1u)), 1u);
-  EXPECT_EQ(EncodedSize(uint64_t(127u)), 1u);
-  EXPECT_EQ(EncodedSize(uint64_t(128u)), 2u);
-  EXPECT_EQ(EncodedSize(uint64_t(16383u)), 2u);
-  EXPECT_EQ(EncodedSize(uint64_t(16384u)), 3u);
-  EXPECT_EQ(EncodedSize(uint64_t(2097151u)), 3u);
-  EXPECT_EQ(EncodedSize(uint64_t(2097152u)), 4u);
-  EXPECT_EQ(EncodedSize(uint64_t(268435455u)), 4u);
-  EXPECT_EQ(EncodedSize(uint64_t(268435456u)), 5u);
-  EXPECT_EQ(EncodedSize(uint64_t(34359738367u)), 5u);
-  EXPECT_EQ(EncodedSize(uint64_t(34359738368u)), 6u);
-  EXPECT_EQ(EncodedSize(uint64_t(4398046511103u)), 6u);
-  EXPECT_EQ(EncodedSize(uint64_t(4398046511104u)), 7u);
-  EXPECT_EQ(EncodedSize(uint64_t(562949953421311u)), 7u);
-  EXPECT_EQ(EncodedSize(uint64_t(562949953421312u)), 8u);
-  EXPECT_EQ(EncodedSize(uint64_t(72057594037927935u)), 8u);
-  EXPECT_EQ(EncodedSize(uint64_t(72057594037927936u)), 9u);
-  EXPECT_EQ(EncodedSize(uint64_t(9223372036854775807u)), 9u);
-  EXPECT_EQ(EncodedSize(uint64_t(9223372036854775808u)), 10u);
-  EXPECT_EQ(EncodedSize(std::numeric_limits<uint64_t>::max()), 10u);
-  EXPECT_EQ(EncodedSize(std::numeric_limits<int64_t>::max()), 9u);
-  EXPECT_EQ(EncodedSize(int64_t(-1)), 10u);
-  EXPECT_EQ(EncodedSize(std::numeric_limits<int64_t>::min()), 10u);
-}
+#define ENCODED_SIZE_TEST(function)                                           \
+  TEST(Varint, function) {                                                    \
+    EXPECT_EQ(function(uint64_t(0u)), 1u);                                    \
+    EXPECT_EQ(function(uint64_t(1u)), 1u);                                    \
+    EXPECT_EQ(function(uint64_t(127u)), 1u);                                  \
+    EXPECT_EQ(function(uint64_t(128u)), 2u);                                  \
+    EXPECT_EQ(function(uint64_t(16383u)), 2u);                                \
+    EXPECT_EQ(function(uint64_t(16384u)), 3u);                                \
+    EXPECT_EQ(function(uint64_t(2097151u)), 3u);                              \
+    EXPECT_EQ(function(uint64_t(2097152u)), 4u);                              \
+    EXPECT_EQ(function(uint64_t(268435455u)), 4u);                            \
+    EXPECT_EQ(function(uint64_t(268435456u)), 5u);                            \
+    EXPECT_EQ(function(uint64_t(34359738367u)), 5u);                          \
+    EXPECT_EQ(function(uint64_t(34359738368u)), 6u);                          \
+    EXPECT_EQ(function(uint64_t(4398046511103u)), 6u);                        \
+    EXPECT_EQ(function(uint64_t(4398046511104u)), 7u);                        \
+    EXPECT_EQ(function(uint64_t(562949953421311u)), 7u);                      \
+    EXPECT_EQ(function(uint64_t(562949953421312u)), 8u);                      \
+    EXPECT_EQ(function(uint64_t(72057594037927935u)), 8u);                    \
+    EXPECT_EQ(function(uint64_t(72057594037927936u)), 9u);                    \
+    EXPECT_EQ(function(uint64_t(9223372036854775807u)), 9u);                  \
+    EXPECT_EQ(function(uint64_t(9223372036854775808u)), 10u);                 \
+    EXPECT_EQ(function(std::numeric_limits<uint64_t>::max()), 10u);           \
+    EXPECT_EQ(                                                                \
+        static_cast<uint64_t>(function(std::numeric_limits<int64_t>::max())), \
+        9u);                                                                  \
+    EXPECT_EQ(function(uint64_t(-1)), 10u);                                   \
+    EXPECT_EQ(                                                                \
+        function(static_cast<uint64_t>(std::numeric_limits<int64_t>::min())), \
+        10u);                                                                 \
+  }                                                                           \
+  static_assert(true)
 
-TEST(Varint, ZigZagEncodedSize) {
-  EXPECT_EQ(ZigZagEncodedSize(int64_t(0)), 1u);
-  EXPECT_EQ(ZigZagEncodedSize(int64_t(-1)), 1u);
-  EXPECT_EQ(ZigZagEncodedSize(int64_t(1)), 1u);
-  EXPECT_EQ(ZigZagEncodedSize(int64_t(-64)), 1u);
-  EXPECT_EQ(ZigZagEncodedSize(int64_t(-65)), 2u);
-  EXPECT_EQ(ZigZagEncodedSize(int64_t(63)), 1u);
-  EXPECT_EQ(ZigZagEncodedSize(int64_t(64)), 2u);
-  EXPECT_EQ(ZigZagEncodedSize(std::numeric_limits<int8_t>::min()), 2u);
-  EXPECT_EQ(ZigZagEncodedSize(std::numeric_limits<int8_t>::max()), 2u);
-  EXPECT_EQ(ZigZagEncodedSize(std::numeric_limits<int16_t>::min()), 3u);
-  EXPECT_EQ(ZigZagEncodedSize(std::numeric_limits<int16_t>::max()), 3u);
-  EXPECT_EQ(ZigZagEncodedSize(std::numeric_limits<int32_t>::min()), 5u);
-  EXPECT_EQ(ZigZagEncodedSize(std::numeric_limits<int32_t>::max()), 5u);
-  EXPECT_EQ(ZigZagEncodedSize(std::numeric_limits<int64_t>::min()), 10u);
-  EXPECT_EQ(ZigZagEncodedSize(std::numeric_limits<int64_t>::max()), 10u);
-}
+ENCODED_SIZE_TEST(EncodedSize);
+ENCODED_SIZE_TEST(pw_varint_EncodedSizeBytes);
+ENCODED_SIZE_TEST(PW_VARINT_ENCODED_SIZE_BYTES);
 
 constexpr uint64_t CalculateMaxValueInBytes(size_t bytes) {
   uint64_t value = 0;
diff --git a/pw_varint/varint_test_c.c b/pw_varint/varint_test_c.c
index 0e66ac9..355db97 100644
--- a/pw_varint/varint_test_c.c
+++ b/pw_varint/varint_test_c.c
@@ -19,24 +19,42 @@
 
 #include "pw_varint/varint.h"
 
-size_t pw_varint_CallEncode(uint64_t integer,
-                            void* output,
-                            size_t output_size) {
-  return pw_varint_Encode(integer, output, output_size);
+size_t pw_varint_CallEncode32(uint32_t integer,
+                              void* output,
+                              size_t output_size_bytes) {
+  return pw_varint_Encode32(integer, output, output_size_bytes);
 }
 
-size_t pw_varint_CallZigZagEncode(int64_t integer,
-                                  void* output,
-                                  size_t output_size) {
-  return pw_varint_ZigZagEncode(integer, output, output_size);
+size_t pw_varint_CallEncode64(uint64_t integer,
+                              void* output,
+                              size_t output_size_bytes) {
+  return pw_varint_Encode64(integer, output, output_size_bytes);
 }
 
-size_t pw_varint_CallDecode(void* input, size_t input_size, uint64_t* output) {
-  return pw_varint_Decode(input, input_size, output);
+size_t pw_varint_CallZigZagAndVarintEncode64(int64_t integer,
+                                             void* output,
+                                             size_t output_size_bytes) {
+  return pw_varint_Encode64(
+      pw_varint_ZigZagEncode64(integer), output, output_size_bytes);
 }
 
-size_t pw_varint_CallZigZagDecode(void* input,
-                                  size_t input_size,
-                                  int64_t* output) {
-  return pw_varint_ZigZagDecode(input, input_size, output);
+size_t pw_varint_CallDecode32(void* input,
+                              size_t input_size,
+                              uint32_t* output) {
+  return pw_varint_Decode32(input, input_size, output);
+}
+
+size_t pw_varint_CallDecode64(void* input,
+                              size_t input_size,
+                              uint64_t* output) {
+  return pw_varint_Decode64(input, input_size, output);
+}
+
+size_t pw_varint_CallZigZagAndVarintDecode64(void* input,
+                                             size_t input_size,
+                                             int64_t* output) {
+  uint64_t value = 0;
+  const size_t bytes_read = pw_varint_Decode64(input, input_size, &value);
+  *output = pw_varint_ZigZagDecode64(value);
+  return bytes_read;
 }
diff --git a/pw_watch/docs.rst b/pw_watch/docs.rst
index 61103fd..25f6468 100644
--- a/pw_watch/docs.rst
+++ b/pw_watch/docs.rst
@@ -55,7 +55,7 @@
 -----------
 Get started
 -----------
-.. code:: bash
+.. code-block:: bash
 
    cd ~/pigweed
    source activate.sh
diff --git a/pw_web/docs.rst b/pw_web/docs.rst
index 87fef7d..02d9a57 100644
--- a/pw_web/docs.rst
+++ b/pw_web/docs.rst
@@ -16,14 +16,14 @@
 -------------
 If you have a bundler set up, you can install ``pigweedjs`` in your web application by:
 
-.. code:: bash
+.. code-block:: bash
 
    $ npm install --save pigweedjs
 
 
 After installing, you can import modules from ``pigweedjs`` in this way:
 
-.. code:: javascript
+.. code-block:: javascript
 
    import { pw_rpc, pw_tokenizer, Device, WebSerial } from 'pigweedjs';
 
@@ -33,7 +33,7 @@
 If you don't want to set up a bundler, you can also load Pigweed directly in
 your HTML page by:
 
-.. code:: html
+.. code-block:: html
 
    <script src="https://unpkg.com/pigweedjs/dist/index.umd.js"></script>
    <script>
@@ -50,7 +50,7 @@
 ``pigweedjs`` provides a ``Device`` API which simplifies common tasks. Here is
 an example to connect to device and call ``EchoService.Echo`` RPC service.
 
-.. code:: html
+.. code-block:: html
 
    <h1>Hello Pigweed</h1>
    <button onclick="connect()">Connect</button>
@@ -79,7 +79,7 @@
 also uses pw_tokenizer to tokenize strings and save device space. Below is an
 example that streams logs using the ``Device`` API.
 
-.. code:: html
+.. code-block:: html
 
    <h1>Hello Pigweed</h1>
    <button onclick="connect()">Connect</button>
@@ -110,7 +110,7 @@
 The above example requires a token database in CSV format. You can generate one
 from the pw_system's ``.elf`` file by running:
 
-.. code:: bash
+.. code-block:: bash
 
    $ pw_tokenizer/py/pw_tokenizer/database.py create \
    --database db.csv out/stm32f429i_disc1_stm32cube.size_optimized/obj/pw_system/bin/system_example.elf
@@ -154,7 +154,7 @@
 class is also included under ``WebSerial.WebSerialTransport``. Here is an
 example usage:
 
-.. code:: javascript
+.. code-block:: javascript
 
    import { WebSerial, pw_hdlc } from 'pigweedjs';
 
@@ -197,7 +197,7 @@
 React-based web app. Web console includes a log viewer and a REPL that supports
 autocomplete. Here's how to run the web console locally:
 
-.. code:: bash
+.. code-block:: bash
 
    $ cd pw_web/webconsole
    $ npm install
@@ -213,7 +213,7 @@
 The component is composed of the component itself and a log source. Here is a
 simple example app that uses a mock log source:
 
-.. code:: html
+.. code-block:: html
 
    <div id="log-viewer-container"></div>
    <script src="https://unpkg.com/pigweedjs/dist/logging.umd.js"></script>
@@ -236,7 +236,7 @@
 It also comes with an RPC log source with support for detokenization. Here is an
 example app using that:
 
-.. code:: html
+.. code-block:: html
 
    <div id="log-viewer-container"></div>
    <script src="https://unpkg.com/pigweedjs/dist/index.umd.js"></script>
@@ -265,7 +265,7 @@
 just extending the abstract `LogSource` class and emitting the `logEntry` events
 like this:
 
-.. code:: typescript
+.. code-block:: typescript
 
   import { LogSource, LogEntry, Severity } from 'pigweedjs/logging';
 
diff --git a/pw_web/log-viewer/README.md b/pw_web/log-viewer/README.md
index 2cf69d2..10862ee 100644
--- a/pw_web/log-viewer/README.md
+++ b/pw_web/log-viewer/README.md
@@ -2,8 +2,6 @@
 
 An embeddable log-viewing web component that is customizable and extensible for use in various developer contexts.
 
-Visit [go/fxd-pigweed-log-viewer](http://goto.google.com/fxd-pigweed-log-viewer) for more information.
-
 ## Installation
 
 1. Clone the main Pigweed repository:
diff --git a/pw_web/log-viewer/package-lock.json b/pw_web/log-viewer/package-lock.json
index 3e0bc72..5f22943 100644
--- a/pw_web/log-viewer/package-lock.json
+++ b/pw_web/log-viewer/package-lock.json
@@ -9,10 +9,9 @@
             "version": "0.0.0",
             "dependencies": {
                 "@lit-labs/virtualizer": "^2.0.3",
-                "@material/web": "^1.0.0-pre.8",
+                "@material/web": "^1.0.0-pre.16",
                 "date-fns": "^2.30.0",
                 "lit": "^3.0.0-pre.0",
-                "peggy": "^3.0.2",
                 "prettier-plugin-jsdoc": "^0.4.2"
             },
             "devDependencies": {
@@ -49,21 +48,21 @@
             }
         },
         "node_modules/@babel/helper-validator-identifier": {
-            "version": "7.19.1",
-            "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz",
-            "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==",
+            "version": "7.22.5",
+            "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.5.tgz",
+            "integrity": "sha512-aJXu+6lErq8ltp+JhkJUfk1MTGyuA4v7f3pA+BJ5HLfNC6nAQ0Cpi9uOquUj8Hehg0aUiHzWQbOVJGao6ztBAQ==",
             "dev": true,
             "engines": {
                 "node": ">=6.9.0"
             }
         },
         "node_modules/@babel/highlight": {
-            "version": "7.18.6",
-            "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz",
-            "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==",
+            "version": "7.22.5",
+            "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.5.tgz",
+            "integrity": "sha512-BSKlD1hgnedS5XRnGOljZawtag7H1yPfQp0tdNJCHoH6AZ+Pcm9VvkrK59/Yy593Ypg0zMxH2BxD1VPYUQ7UIw==",
             "dev": true,
             "dependencies": {
-                "@babel/helper-validator-identifier": "^7.18.6",
+                "@babel/helper-validator-identifier": "^7.22.5",
                 "chalk": "^2.0.0",
                 "js-tokens": "^4.0.0"
             },
@@ -671,12 +670,11 @@
             }
         },
         "node_modules/@material/web": {
-            "version": "1.0.0-pre.8",
-            "resolved": "https://registry.npmjs.org/@material/web/-/web-1.0.0-pre.8.tgz",
-            "integrity": "sha512-CafOQw/C23SP1DlJVYDbs2Cm9Ct4QV7RxjxMBiWhaqlhqfDyXVpnZnKlTVzD5dVDKZyUu3vHsWK7Y/N8P66DYQ==",
+            "version": "1.0.0-pre.16",
+            "resolved": "https://registry.npmjs.org/@material/web/-/web-1.0.0-pre.16.tgz",
+            "integrity": "sha512-BDrwtY7WkbGhh2jxupbRdh1/wEiy69h6XzjgCxISJNNQVyn6rbOv1U57+d9J24Z5DvSTKv8NHrNyXT6iwmG2KQ==",
             "dependencies": {
-                "lit": "^2.3.0",
-                "safevalues": "^0.4.3",
+                "lit": "^2.7.4",
                 "tslib": "^2.4.0"
             }
         },
@@ -1508,14 +1506,6 @@
             "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
             "dev": true
         },
-        "node_modules/commander": {
-            "version": "10.0.1",
-            "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz",
-            "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==",
-            "engines": {
-                "node": ">=14"
-            }
-        },
         "node_modules/comment-parser": {
             "version": "1.3.1",
             "resolved": "https://registry.npmjs.org/comment-parser/-/comment-parser-1.3.1.tgz",
@@ -4082,21 +4072,6 @@
                 "node": ">=8"
             }
         },
-        "node_modules/peggy": {
-            "version": "3.0.2",
-            "resolved": "https://registry.npmjs.org/peggy/-/peggy-3.0.2.tgz",
-            "integrity": "sha512-n7chtCbEoGYRwZZ0i/O3t1cPr6o+d9Xx4Zwy2LYfzv0vjchMBU0tO+qYYyvZloBPcgRgzYvALzGWHe609JjEpg==",
-            "dependencies": {
-                "commander": "^10.0.0",
-                "source-map-generator": "0.8.0"
-            },
-            "bin": {
-                "peggy": "bin/peggy.js"
-            },
-            "engines": {
-                "node": ">=14"
-            }
-        },
         "node_modules/picocolors": {
             "version": "1.0.0",
             "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz",
@@ -4387,11 +4362,6 @@
                 "url": "https://github.com/sponsors/ljharb"
             }
         },
-        "node_modules/safevalues": {
-            "version": "0.4.3",
-            "resolved": "https://registry.npmjs.org/safevalues/-/safevalues-0.4.3.tgz",
-            "integrity": "sha512-pNCNTkx3xs7G5YJ/9CoeZZVUSPRjH0SEPM0QI5Z1FZRlLBviTFWlNKPs8PTvZvERV0gO7ie/t/Zc0S96JS4Xew=="
-        },
         "node_modules/semver": {
             "version": "7.5.3",
             "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.3.tgz",
@@ -4468,14 +4438,6 @@
                 "url": "https://github.com/chalk/slice-ansi?sponsor=1"
             }
         },
-        "node_modules/source-map-generator": {
-            "version": "0.8.0",
-            "resolved": "https://registry.npmjs.org/source-map-generator/-/source-map-generator-0.8.0.tgz",
-            "integrity": "sha512-psgxdGMwl5MZM9S3FWee4EgsEaIjahYV5AzGnwUvPhWeITz/j6rKpysQHlQ4USdxvINlb8lKfWGIXwfkrgtqkA==",
-            "engines": {
-                "node": ">= 10"
-            }
-        },
         "node_modules/source-map-js": {
             "version": "1.0.2",
             "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz",
diff --git a/pw_web/log-viewer/package.json b/pw_web/log-viewer/package.json
index adeb9dd..4236d50 100644
--- a/pw_web/log-viewer/package.json
+++ b/pw_web/log-viewer/package.json
@@ -5,16 +5,15 @@
     "type": "module",
     "scripts": {
         "dev": "vite",
-        "build": "peggy src/assets/filters.pegjs --format es && tsc && vite build --base=/asset/5b095937-1e59-52cf-9255-eb8577d1ff83/",
+        "build": "tsc && vite build --base=/asset/5b095937-1e59-52cf-9255-eb8577d1ff83/",
         "preview": "vite preview",
         "lint": "eslint --max-warnings=0 src"
     },
     "dependencies": {
         "@lit-labs/virtualizer": "^2.0.3",
-        "@material/web": "^1.0.0-pre.8",
+        "@material/web": "^1.0.0-pre.16",
         "date-fns": "^2.30.0",
         "lit": "^3.0.0-pre.0",
-        "peggy": "^3.0.2",
         "prettier-plugin-jsdoc": "^0.4.2"
     },
     "devDependencies": {
diff --git a/pw_web/log-viewer/src/assets/filters.pegjs b/pw_web/log-viewer/src/assets/filters.pegjs
deleted file mode 100644
index 869a93e..0000000
--- a/pw_web/log-viewer/src/assets/filters.pegjs
+++ /dev/null
@@ -1,454 +0,0 @@
-// Copyright 2023 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-/**
-# Filter language
-
-This language allows us to filter logs based on a set of conditions. The language is designed to
-map directly to UI chip elements.
-
-## Examples
-
-1. Show logs of severity info or higher for entries where the moniker is `core/ffx-laboratory:hello`
-   or the message contains “hello”.
-
-  ```
-  (moniker:core/ffx-laboratory:hello | message:hello) severity:info
-  ```
-
-2. Show logs of severity error or higher, where the component url contains a package named hello,
-   where the manifest is named hello or where the message contains “hello world” but where the
-   message doesn’t contain “bye”.
-
-  ```
-  (package_name:hello | manifest:hello | message:"hello world") severity:error !message:"bye"
-  ```
-
-3. Show logs from both core/foo and core/bar
-
-  ```
-  moniker:core/foo|core/bar
-
-  # which is the same as:
-  moniker:core/foo|moniker:core/bar
-  ```
-
-4. Show logs where any field contains foo, bar or baz either in the message, url, moniker, etc.
-
-  ```
-  any:foo|bar|baz
-  ```
-
-## Grammar
-
-```
-
-<expression> := <conjunction> | <conjunction> <or> <expression>
-
-<conjunction> := <primary expression> | <primary expression> <and> <conjunction>
-
-<primary expression> := <not> <primary expression>
-    | '(' <expression> ')'
-    | <filter>
-
-<filter> := <category> <core operator> <core operator values>
-    | severity <operator> <severities>
-    | <pid tid category> <core operator> <numbers>
-    | <time category> <time operator> <time value>
-    | <custom key> <operator> <custom key values>
-    | <regex>
-    | <string>
-
-<core operator> := <contains> | <equality>
-<operator> := <contains> | <equality> | <comparison>
-<contains> := ':'
-<equality> := '='
-<comparison> :=  '>' | '<' | '>=' | '<='
-
-<category> := moniker | tag | package_name | manifest | message | any
-<pid tid category> := pid | tid
-<time operator> := '>' | '<'
-<time values> := <number> <time unit>
-<time unit> := s | ms | min
-<custom key values> := <numbers> | <boolean> | <regex> | <strings>
-<core operator values> := <regex> | <strings>
-
-<severity> := trace | debug | info | warn | error | fatal
-<severities> := <severity> | <severity seq and> | <severity seq or>
-<severity seq and> := <severity> '&' <severity seq and>
-<severity seq or> := <severity> '|' <severity seq or>
-
-<boolean> := true | false
-
-<regex> := '/' <chars with spaces> '/'
-
-<string> := <chars> | "<chars with spaces>"
-<strings> := <string> | <string seq and> | <string seq or>
-<string seq and> := <string> '&' <string seq and>
-<string seq or> := <string> '|' <string seq or>
-
-<chars> := any sequence of characters without space
-<chars with spaces> := any sequence of characters with spaces
-
-<number> := any sequence of numbers, including negative, exp, etc
-<numbers>  := <number> | <number seq and> | <number seq or>
-<number seq and> := <number> '&' <number seq and>
-<number seq or> := <number> '|' <number seq or>
-
-<or> := '|' | or
-<not> := not | '!'
-<and> := and | '&' | ε   # just writing a space will be interpreted as AND in the top level
-```
-
-The severities, logical operators, categories are case insensitive. When doing a `:` (contains)
-operation, the value being matched against will be treated as case insensitive.
-
-The following categories are supported:
-
-1. Moniker: identifies the component that emitted the log.
-
-2. Tag: a log entry might contain one or more tags in its metadata.
-
-3. Package name: The name of the package present in the corresponding section of the url with
-    which the component was launched. Supported operations:
-
-4. Manifest: The name of the manifest present in the corresponding section of the url with which
-    the component was launched.
-
-5. Message: The log message.
-
-6. Severity: The severity of the log.
-
-7. Pid and Tid: the process and thread ids that emitted the log.
-
-8. Custom key: A structured field contained in the payload of a structured log.
-
-9. Any: Matches a value in any of the previous sections.
-
-All categories support `=`(equals) and `:` (contains). The `:` operator will do substring search,
-equality when dealing with numbers of booleans or a minimum severity operation when applied to
-`severity`. Custom keys and severities also support `>`, `<`, `>=`, `<=`, `=`.
-*/
-
-{
-  function buildExpression(args) {
-    if (args.rhs.items.length === 1) {
-      return new options.Filter({
-        category: args.category.toLowerCase(),
-        subCategory: args.subCategory,
-        operator: args.operator,
-        value: args.rhs.items[0]
-      });
-    }
-    const items = args.rhs.items.map((value) => {
-      return new options.Filter({
-        category: args.category.toLowerCase(),
-        subCategory: args.subCategory,
-        operator: args.operator,
-        value,
-      })
-    });
-    switch (args.rhs.reducedExpression) {
-      case 'or':
-        return new options.OrExpression(items);
-      case 'and':
-        return new options.AndExpression(items);
-    }
-  }
-}
-
-Expression
-  = head:ConjunctionExpression tail:(_ Or _ ConjunctionExpression)* {
-    if (tail.length === 0) {
-      return head;
-    }
-    let items = tail.reduce((result, element) => {
-      return result.concat([element[3]]);
-    }, [head]);
-    return new options.OrExpression(items);
-  }
-
-ConjunctionExpression
-  = head:PrimaryExpression tail:(_ (And _)? PrimaryExpression)* {
-    if (tail.length === 0) {
-      return head;
-    }
-    let items = tail.reduce((result, element) => {
-      return result.concat(element[2]);
-    }, [head]);
-    return new options.AndExpression(items);
-  }
-
-PrimaryExpression
-  = Not _ expression:PrimaryExpression {
-    return new options.NotExpression(expression);
-  }
-  / "(" expression:Expression ")" { return expression; }
-  / filter:Filter { return filter; }
-
-Filter
-  = "severity"i operator:Operator severities:Severities {
-    return buildExpression({
-      category: 'severity',
-      subCategory: undefined,
-      operator: operator,
-      rhs: severities,
-    });
-  }
-  / category:Category operator:CoreOperator strings:CoreOperatorValues {
-    return buildExpression({
-      category,
-      subCategory: undefined,
-      operator: operator,
-      rhs: strings,
-    });
-  }
-  / category:"time" operator:TimeOperator strings:TimeNumbers {
-    return buildExpression({
-      category,
-      subCategory: undefined,
-      operator: operator,
-      rhs: strings,
-    });
-  }
-  / category:PidTidCategory operator:CoreOperator numbers:Numbers {
-    return buildExpression({
-      category,
-      subCategory: undefined,
-      operator: operator,
-      rhs: numbers,
-    });
-  }
-  / customKey:CustomKey operator:Operator values:CustomKeyValues {
-    return buildExpression({
-      category: 'custom',
-      subCategory: customKey,
-      operator: operator,
-      rhs: values,
-    });
-  }
-  / regex:Regex {
-    return buildExpression({
-      category: 'any',
-      subCategory: undefined,
-      operator: 'contains',
-      rhs: { items: [regex] },
-    });
-  }
-  / !ReservedWord string:String {
-    return buildExpression({
-      category: 'any',
-      subCategory: undefined,
-      operator: 'contains',
-      rhs: { items: [string] },
-    });
-  };
-
-// NOTE: the returned types must match the ones in the Operator type in filter.ts.
-CoreOperator
-  = ":" { return 'contains'; }
-  / "=" { return 'equal'; }
-
-// NOTE: the returned types must match the ones in the Operator type in filter.ts.
-Operator
-  = ":" { return 'contains'; }
-  / "=" { return 'equal'; }
-  / ">=" { return 'greaterEq'; }
-  / ">" { return 'greater'; }
-  / "<=" { return 'lessEq'; }
-  / "<" { return 'less'; }
-
-// TimeOperator only supports > and <
-TimeOperator
-  = ">" { return 'greater'; }
-  / "<" { return 'less'; }
-
-// NOTE: the returned types must match the ones in the Category type in filter.ts.
-Category
-  = "any"i
-  / "custom"i
-  / "manifest"i
-  / "message"i
-  / "time"i
-  / "moniker"i
-  / "package-name"i
-  / "tag"i;
-
-PidTidCategory
-  = "pid"i
-  / "tid"i;
-
-CustomKeyValues
-  = Numbers
-  / b:Boolean { return { items: [b] }; }
-  / r:Regex { return { items: [r] }; }
-  / Strings
-
-CoreOperatorValues
-  = r:Regex { return { items: [r] }; }
-  / Strings
-
-// NOTE: the returned types must match the ones in the Severity type in filter.ts.
-Severity
-  = "trace"i
-  / "debug"i
-  / "info"i
-  / "warning"i { return "warn"; }
-  / "warn"i
-  / "error"i
-  / "fatal"i
-
-Boolean
-  = "true"i { return true; }
-  / "false"i { return false; }
-
-Regex = "/" chars:RegexChar* "/" {
-  return new RegExp(chars.join(""));
-}
-
-RegexChar
-  = [^/\\]
-  / "\\/" { return "/"; }
-  / "\\\\" { return "\\"; }
-
-Strings
-  = head:String tail:(ShorthandedStringSequence)? {
-    if (tail) {
-      tail.items.unshift(head);
-      return tail;
-    }
-    return { items: [head] };
-  }
-
-ShorthandedStringSequence
-  = items:("|" String)+ {
-    return {
-      reducedExpression: 'or',
-      items: items.map((item) => item[1]),
-    };
-  }
-  / items:("&" String)+ {
-    return {
-      reducedExpression: 'and',
-      items: items.map((item) => item[1]),
-    };
-  }
-
-CustomKey = [^:\\|&\\(\\)><=" ]+ { return text(); }
-
-Numbers
-  = head:Number tail:(ShorthandedNumberSequence)? {
-    if (tail) {
-      tail.items.unshift(head);
-      return tail;
-    }
-    return { items: [head] };
-  }
-
-TimeNumbers
-  = head:TimeNumber {
-    return { items: [head] };
-  }
-
-ShorthandedNumberSequence
-  = items:("|" Number)+ {
-    return {
-      reducedExpression: 'or',
-      items: items.map((item) => item[1]),
-    };
-  }
-  / items:("&" Number)+ {
-    return {
-      reducedExpression: 'and',
-      items: items.map((item) => item[1]),
-    };
-  }
-
-Severities
-  = head:Severity tail:(ShorthandedSeveritySequence)? {
-    if (tail) {
-      tail.items.unshift(head);
-      return tail;
-    }
-    return { items: [head] };
-  }
-
-ShorthandedSeveritySequence
-  = items:("|" Severity)+ {
-    return {
-      reducedExpression: 'or',
-      items: items.map((item) => item[1]),
-    };
-  }
-  / items:("&" Severity)+ {
-    return {
-      reducedExpression: 'and',
-      items: items.map((item) => item[1]),
-    };
-  }
-
-Number
-  = [0-9]+ {
-      return parseInt(text(), 10);
-}
-
-// Time number in nanoseconds.
-TimeNumber
-  = [0-9]+ "s" {
-    return parseInt(text(), 10) * 1e9;
-  }
-  / [0-9]+ "min" {
-    return parseInt(text(), 10) * 1e9 * 60;
-  }
-  / [0-9]+ "ms" {
-    return parseInt(text(), 10) * 1e6;
-  }
-
-String
-  = '"' chars:CharWithinQuotes* '"' {
-    return chars.join('');
-  }
-  /  chars:CharNotInQuotes+ { return chars.join(''); }
-
-CharWithinQuotes
-  = [^"\\]
-  / '\\"' { return '"'; }
-  / "\\\\" { return "\\"; }
-
-CharNotInQuotes
-  = [^"\\|&\\(\\): ]
-  / "\\" char:(
-      '"'
-    / '\\'
-    / '('
-    / ')'
-    / ':'
-    / ' '
-    / '|'
-    / '&'
-  ) {
-    return char;
-  }
-
-Not = "!" / "not"i
-And = "&" / "and"i
-Or = "|" / "or"i
-
-ReservedWord
-  = "and"i
-  / "or"i
-  / "not"i
-
-// Whitespace
-_ = [ \t\n\r]*
diff --git a/pw_web/log-viewer/src/components/log-list/log-list.styles.ts b/pw_web/log-viewer/src/components/log-list/log-list.styles.ts
index 8b6fee6..8b9013c 100644
--- a/pw_web/log-viewer/src/components/log-list/log-list.styles.ts
+++ b/pw_web/log-viewer/src/components/log-list/log-list.styles.ts
@@ -26,13 +26,14 @@
     font-family: 'Roboto Mono', monospace;
     font-size: 1rem;
     height: 100%;
+    overflow: hidden;
     position: relative;
   }
 
   .table-container {
+    display: grid;
     height: 100%;
-    overflow: auto;
-    padding-bottom: 3rem;
+    overflow: scroll;
     scroll-behavior: auto;
     width: 100%;
   }
@@ -118,7 +119,8 @@
   }
 
   th[hidden],
-  td[hidden] {
+  td[hidden],
+  .jump-to-bottom-btn[hidden] {
     display: none;
   }
 
@@ -134,6 +136,16 @@
     align-items: flex-start;
   }
 
+  .jump-to-bottom-btn {
+    --md-filled-button-container-elevation: 4;
+    --md-filled-button-hover-container-elevation: 4;
+    bottom: 2rem;
+    font-family: 'Google Sans', sans-serif;
+    left: 50%;
+    position: absolute;
+    transform: translate(-50%);
+  }
+
   .resize-handle {
     background-color: var(--sys-log-viewer-color-table-cell-outline);
     bottom: 0;
@@ -179,24 +191,27 @@
       'wght' 400,
       'GRAD' 200,
       'opsz' 58;
+    font-size: 1.25rem;
     user-select: none;
   }
 
   .overflow-indicator {
-    height: 100%;
     pointer-events: none;
     position: absolute;
-    top: 0;
     width: 8rem;
   }
 
-  .right-indicator {
+  .bottom-indicator {
+    align-self: flex-end;
     background: linear-gradient(
-      to right,
+      to bottom,
       transparent,
       var(--sys-log-viewer-color-overflow-indicator)
     );
-    right: 0;
+    height: 8rem;
+    pointer-events: none;
+    position: absolute;
+    width: calc(100% - 1rem);
   }
 
   .left-indicator {
@@ -205,7 +220,18 @@
       transparent,
       var(--sys-log-viewer-color-overflow-indicator)
     );
-    left: 0;
+    height: calc(100% - 1rem);
+    justify-self: flex-start;
+  }
+
+  .right-indicator {
+    background: linear-gradient(
+      to right,
+      transparent,
+      var(--sys-log-viewer-color-overflow-indicator)
+    );
+    height: calc(100% - 1rem);
+    justify-self: flex-end;
   }
 
   mark {
@@ -214,4 +240,40 @@
     color: var(--md-sys-color-on-primary-container);
     outline: 1px solid var(--sys-log-viewer-color-table-mark);
   }
+
+  ::-webkit-scrollbar {
+    -webkit-appearance: auto;
+  }
+
+  ::-webkit-scrollbar-corner {
+    background: var(--md-sys-color-surface-container-low);
+  }
+
+  ::-webkit-scrollbar-thumb {
+    min-height: 3rem;
+  }
+
+  ::-webkit-scrollbar-thumb:horizontal {
+    border-radius: 20px;
+    box-shadow: inset 0 0 2rem 2rem var(--md-sys-color-outline-variant);
+    border: inset 3px transparent;
+    border-top: inset 4px transparent;
+  }
+
+  ::-webkit-scrollbar-thumb:vertical {
+    border-radius: 20px;
+    box-shadow: inset 0 0 2rem 2rem var(--md-sys-color-outline-variant);
+    border: inset 3px transparent;
+    border-left: inset 4px transparent;
+  }
+
+  ::-webkit-scrollbar-track:horizontal {
+    box-shadow: inset 0 0 2rem 2rem var(--md-sys-color-surface-container-low);
+    border-top: solid 1px var(--md-sys-color-outline-variant);
+  }
+
+  ::-webkit-scrollbar-track:vertical {
+    box-shadow: inset 0 0 2rem 2rem var(--md-sys-color-surface-container-low);
+    border-left: solid 1px var(--md-sys-color-outline-variant);
+  }
 `;
diff --git a/pw_web/log-viewer/src/components/log-list/log-list.ts b/pw_web/log-viewer/src/components/log-list/log-list.ts
index 2f0343f..f6a39df 100644
--- a/pw_web/log-viewer/src/components/log-list/log-list.ts
+++ b/pw_web/log-viewer/src/components/log-list/log-list.ts
@@ -60,10 +60,14 @@
   @state()
   private _isOverflowingToRight = false;
 
-  /** A number reprecenting the scroll percentage in the horizontal direction. */
+  /** A number representing the scroll percentage in the horizontal direction. */
   @state()
   private _scrollPercentageLeft = 0;
 
+  /** A number representing visibility of vertical scroll indicator. */
+  @state()
+  private _scrollDownOpacity = 0;
+
   /**
    * Indicates whether to automatically scroll the table container to the
    * bottom when new log entries are added.
@@ -71,6 +75,7 @@
   @state()
   private _autoscrollIsEnabled = true;
 
+  @query('.jump-to-bottom-btn') private _jumpBottomBtn!: HTMLButtonElement;
   @query('.table-container') private _tableContainer!: HTMLDivElement;
   @query('table') private _table!: HTMLTableElement;
   @query('tbody') private _tableBody!: HTMLTableSectionElement;
@@ -115,7 +120,7 @@
 
     if (changedProperties.has('logs')) {
       this.setFieldNames(this.logs);
-      this.scrollTableToBottom();
+      this.handleTableScroll();
     }
 
     if (changedProperties.has('colsHidden')) {
@@ -151,19 +156,21 @@
   /** Called when the Lit virtualizer updates its range of entries. */
   private onRangeChanged = () => {
     this.updateGridTemplateColumns();
-    this.scrollTableToBottom();
+    if (this._autoscrollIsEnabled) {
+      this.scrollTableToBottom();
+    }
   };
 
-  /** Scrolls to the bottom of the table container if autoscroll is enabled. */
+  /** Scrolls to the bottom of the table container. */
   private scrollTableToBottom() {
-    if (this._autoscrollIsEnabled) {
-      const container = this._tableContainer;
+    const container = this._tableContainer;
 
-      // TODO(b/289101398): Refactor `setTimeout` usage
-      setTimeout(() => {
-        container.scrollTop = container.scrollHeight;
-      }, 0); // Complete any rendering tasks before scrolling
-    }
+    // TODO(b/289101398): Refactor `setTimeout` usage
+    setTimeout(() => {
+      container.scrollTop = container.scrollHeight;
+      this._jumpBottomBtn.hidden = true;
+      this._scrollDownOpacity = 0;
+    }, 0); // Complete any rendering tasks before scrolling
   }
 
   /** Clears the `gridTemplateColumns` value for all rows in the table. */
@@ -270,19 +277,25 @@
     const container = this._tableContainer;
     const containerWidth = container.offsetWidth;
     const scrollLeft = container.scrollLeft;
+    const scrollY =
+      container.scrollHeight - container.scrollTop - container.clientHeight;
     const maxScrollLeft = container.scrollWidth - containerWidth;
+    const rowHeight = this._tableRows[0].offsetHeight;
 
     this._scrollPercentageLeft = scrollLeft / maxScrollLeft || 0;
 
-    if (
-      container.scrollHeight - container.scrollTop <=
-      container.offsetHeight
-    ) {
+    if (Math.abs(scrollY) <= 1) {
       this._autoscrollIsEnabled = true;
+      this.requestUpdate();
       return;
     }
-    this._autoscrollIsEnabled = false;
-    this.requestUpdate();
+
+    if (Math.round(scrollY - rowHeight) >= 1) {
+      this._autoscrollIsEnabled = false;
+      this._jumpBottomBtn.hidden = false;
+      this._scrollDownOpacity = 1;
+      this.requestUpdate();
+    }
   };
 
   /**
@@ -343,7 +356,7 @@
     for (let i = 0; i < totalColumns; i++) {
       if (i === columnIndex) {
         gridTemplateColumns += `${newWidth}px `;
-        return;
+        continue;
       }
       const otherColumnHeader = this._table.querySelector(
         `th:nth-child(${i + 1})`,
@@ -378,9 +391,17 @@
             })}
           </tbody>
         </table>
-
         ${this.overflowIndicators()}
       </div>
+      <md-filled-button
+        class="jump-to-bottom-btn"
+        title="Jump to Bottom"
+        @click="${this.scrollTableToBottom}"
+        trailing-icon
+      >
+        <md-icon slot="icon" aria-hidden="true">arrow_downward</md-icon>
+        Jump to Bottom
+      </md-filled-button>
     `;
   }
 
@@ -483,6 +504,11 @@
 
   private overflowIndicators = () => html`
     <div
+      class="bottom-indicator"
+      style="opacity: ${this._scrollDownOpacity}"
+    ></div>
+
+    <div
       class="overflow-indicator left-indicator"
       style="opacity: ${this._scrollPercentageLeft}"
       ?hidden="${!this._isOverflowingToRight}"
diff --git a/pw_web/log-viewer/src/components/log-view-controls/log-view-controls.styles.ts b/pw_web/log-viewer/src/components/log-view-controls/log-view-controls.styles.ts
index 8ba7fbf..951407d 100644
--- a/pw_web/log-viewer/src/components/log-view-controls/log-view-controls.styles.ts
+++ b/pw_web/log-viewer/src/components/log-view-controls/log-view-controls.styles.ts
@@ -25,6 +25,7 @@
     height: 3rem;
     justify-content: space-between;
     padding: 0 1rem;
+    --md-list-item-leading-icon-size: 1.5rem;
   }
 
   :host > * {
@@ -66,16 +67,25 @@
     width: 100%;
   }
 
-  input {
+  .input-facade {
+    align-items: center;
     background-color: var(--sys-log-viewer-color-controls-input-bg);
-    border: none;
-    border-radius: 1.5rem;
-    font-family: 'Google Sans';
-    height: 1.75rem;
-    max-width: 20rem;
-    padding: 0 1rem;
-    width: 100%;
     border: 1px solid var(--sys-log-viewer-color-controls-input-outline);
+    border-radius: 1.5rem;
+    cursor: text;
+    display: inline-flex;
+    font-family: 'Google Sans';
+    font-size: 14px;
+    height: 1rem;
+    line-height: 1;
+    max-width: 100%;
+    min-width: 20rem;
+    padding: 0.5rem 1rem;
+    width: fit-content;
+  }
+
+  input[type='text'] {
+    display: none;
   }
 
   input::placeholder {
diff --git a/pw_web/log-viewer/src/components/log-view-controls/log-view-controls.ts b/pw_web/log-viewer/src/components/log-view-controls/log-view-controls.ts
index 11d0dc6..2e9ca9e 100644
--- a/pw_web/log-viewer/src/components/log-view-controls/log-view-controls.ts
+++ b/pw_web/log-viewer/src/components/log-view-controls/log-view-controls.ts
@@ -46,9 +46,6 @@
   @property({ type: Boolean })
   hideCloseButton = false;
 
-  @property({ type: String })
-  _searchText = '';
-
   @property({ type: Array })
   colsHidden: (boolean | undefined)[] = [];
 
@@ -60,13 +57,18 @@
   _state: State;
 
   @state()
-  _viewTitle = '';
+  _viewTitle = 'Log View';
+
+  @state()
+  _settingsMenuOpen = false;
 
   @query('.field-menu') _fieldMenu!: HTMLMenuElement;
 
-  @query('.search-input') _searchInput!: HTMLInputElement;
+  @query('#search-field') _searchField!: HTMLInputElement;
 
-  @queryAll('.item-checkboxeses') _itemCheckboxes!: HTMLCollection[];
+  @query('.input-facade') _inputFacade!: HTMLDivElement;
+
+  @queryAll('.item-checkboxes') _itemCheckboxes!: HTMLCollection[];
 
   private firstCheckboxLoad = false;
 
@@ -76,23 +78,29 @@
   /** The delay (in ms) used for debouncing search input. */
   private readonly INPUT_DEBOUNCE_DELAY = 50;
 
+  @query('.settings-menu-button') settingsMenuButtonEl!: HTMLElement;
+
   constructor() {
     super();
     this._state = this._stateStore.getState();
   }
 
   protected firstUpdated(): void {
+    let searchText = '';
     if (this._state !== null) {
       const viewConfigArr = this._state.logViewConfig;
       for (const i in viewConfigArr) {
         if (viewConfigArr[i].viewID === this.viewId) {
-          this._searchText = viewConfigArr[i].search as string;
-          this._viewTitle = viewConfigArr[i].viewTitle as string;
+          searchText = viewConfigArr[i].search as string;
+          this._viewTitle = viewConfigArr[i].viewTitle
+            ? viewConfigArr[i].viewTitle
+            : this._viewTitle;
         }
       }
     }
-    this._searchInput.value = this._searchText;
-    this._searchInput.dispatchEvent(new CustomEvent('input'));
+
+    this._inputFacade.textContent = searchText;
+    this._inputFacade.dispatchEvent(new CustomEvent('input'));
   }
 
   protected updated(): void {
@@ -118,8 +126,10 @@
       clearTimeout(this._inputDebounceTimer);
     }
 
-    const inputElement = event.target as HTMLInputElement;
-    const inputValue = inputElement.value;
+    const inputFacade = event.target as HTMLDivElement;
+    this.markKeysInText(inputFacade);
+    this._searchField.value = inputFacade.textContent || '';
+    const inputValue = this._searchField.value;
 
     this._inputDebounceTimer = window.setTimeout(() => {
       const customEvent = new CustomEvent('input-change', {
@@ -132,6 +142,26 @@
     }, this.INPUT_DEBOUNCE_DELAY);
   };
 
+  private markKeysInText(target: HTMLElement) {
+    const pattern = /\b(\w+):(?=\w)/;
+    const textContent = target.textContent || '';
+    const conditions = textContent.split(/\s+/);
+    const wordsBeforeColons: string[] = [];
+
+    for (const condition of conditions) {
+      const match = condition.match(pattern);
+      if (match) {
+        wordsBeforeColons.push(match[0]);
+      }
+    }
+  }
+
+  private handleKeydown = (event: KeyboardEvent) => {
+    if (event.key === 'Enter' || event.key === 'Cmd') {
+      event.preventDefault();
+    }
+  };
+
   /**
    * Dispatches a custom event for clearing logs. This event includes a
    * `timestamp` object indicating the date/time in which the 'clear-logs'
@@ -152,7 +182,7 @@
   /**
    * Dispatches a custom event for toggling wrapping.
    */
-  private handleWrapToggle(event: Event) {
+  private handleWrapToggle() {
     const wrapToggle = new CustomEvent('wrap-toggle', {
       bubbles: true,
       composed: true,
@@ -200,61 +230,86 @@
   }
 
   /**
-   * Opens and closes the column visibility dropdown menu.
+   * Dispatches a custom event for downloading a logs file. This event includes
+   * a `format` string indicating the format of the file to be downloaded and a
+   * `viewTitle` string which passes the title of the current view for naming
+   * the file.
    *
    * @param {Event} event - The click event object.
    */
-  private toggleColumnVisibilityMenu(event: Event) {
+  private handleDownloadLogs() {
+    const downloadLogs = new CustomEvent('download-logs', {
+      bubbles: true,
+      composed: true,
+      detail: {
+        format: 'plaintext',
+        viewTitle: this._viewTitle,
+      },
+    });
+
+    this.dispatchEvent(downloadLogs);
+  }
+
+  /**
+   * Opens and closes the column visibility dropdown menu.
+   */
+  private toggleColumnVisibilityMenu() {
     this._fieldMenu.hidden = !this._fieldMenu.hidden;
   }
 
+  /**
+   * Opens and closes the Settings menu.
+   */
+  private toggleSettingsMenu() {
+    this._settingsMenuOpen = !this._settingsMenuOpen;
+  }
+
   render() {
     return html`
       <p class="host-name"> ${this._viewTitle}</p>
 
       <div class="input-container">
-        <input class="search-input" placeholder="Search" type="text" @input=${
+        <div class="input-facade" contenteditable="plaintext-only" @input="${
           this.handleInput
-        }></input>
+        }" @keydown="${this.handleKeydown}"></div>
+        <input id="search-field" type="text"></input>
       </div>
 
       <div class="actions-container">
         <span class="action-button" hidden>
-          <md-standard-icon-button>
+          <md-icon-button>
             <md-icon>pause_circle</md-icon>
-          </md-standard-icon-button>
+          </md-icon-button>
         </span>
 
         <span class="action-button" hidden>
-          <md-standard-icon-button>
+          <md-icon-button>
             <md-icon>wrap_text</md-icon>
-          </md-standard-icon-button>
+          </md-icon-button>
         </span>
 
         <span class="action-button" title="Clear logs">
-          <md-standard-icon-button @click=${this.handleClearLogsClick}>
+          <md-icon-button @click=${this.handleClearLogsClick}>
             <md-icon>delete_sweep</md-icon>
-          </md-standard-icon-button>
+          </md-icon-button>
         </span>
 
         <span class="action-button" title="Toggle Line Wrapping">
-          <md-standard-icon-button @click=${this.handleWrapToggle} toggle>
+          <md-icon-button @click=${this.handleWrapToggle} toggle>
             <md-icon>wrap_text</md-icon>
-          </md-standard-icon-button>
+          </md-icon-button>
         </span>
 
-        <span class='field-toggle' title="Toggle fields">
-          <md-standard-icon-button @click=${
-            this.toggleColumnVisibilityMenu
-          } toggle>
+        <span class='action-button field-toggle' title="Toggle fields">
+          <md-icon-button @click=${this.toggleColumnVisibilityMenu} toggle>
             <md-icon>view_column</md-icon>
-          </md-standard-icon-button>
+          </md-icon-button>
           <menu class='field-menu' hidden>
             ${Array.from(this.fieldKeys).map(
               (field) => html`
                 <li class="field-menu-item">
                   <input
-                    class="item-checkboxeses"
+                    class="item-checkboxes"
                     @click=${this.handleColumnToggle}
                     checked
                     type="checkbox"
@@ -268,18 +323,39 @@
           </menu>
         </span>
 
+        <span class="action-button" title="Toggle fields">
+          <md-icon-button @click=${
+            this.toggleSettingsMenu
+          } class="settings-menu-button">
+            <md-icon >more_vert</md-icon>
+          </md-icon-button>
+
+          <md-menu quick fixed
+            ?open=${this._settingsMenuOpen}
+            .anchor=${this.settingsMenuButtonEl}
+            @closed=${() => {
+              this._settingsMenuOpen = false;
+            }}>
+            <md-menu-item headline="Download logs (.txt)" @click=${
+              this.handleDownloadLogs
+            } role="button">
+              <md-icon slot="start" data-variant="icon">download</md-icon>
+            </md-menu-item>
+          </md-menu>
+        </span>
+
         <span class="action-button" title="Close view" ?hidden=${
           this.hideCloseButton
         }>
-          <md-standard-icon-button @click=${this.handleCloseViewClick}>
+          <md-icon-button @click=${this.handleCloseViewClick}>
             <md-icon>close</md-icon>
-          </md-standard-icon-button>
+          </md-icon-button>
         </span>
 
         <span class="action-button" hidden>
-          <md-standard-icon-button>
+          <md-icon-button>
             <md-icon>more_horiz</md-icon>
-          </md-standard-icon-button>
+          </md-icon-button>
         </span>
       </div>
     `;
diff --git a/pw_web/log-viewer/src/components/log-view/log-view.ts b/pw_web/log-viewer/src/components/log-view/log-view.ts
index 5d670af..8bdbc0d 100644
--- a/pw_web/log-viewer/src/components/log-view/log-view.ts
+++ b/pw_web/log-viewer/src/components/log-view/log-view.ts
@@ -16,17 +16,14 @@
 import { customElement, property, query, state } from 'lit/decorators.js';
 import { styles } from './log-view.styles';
 import { LogList } from '../log-list/log-list';
-import {
-  FieldData,
-  LogColumnState,
-  LogEntry,
-  State,
-} from '../../shared/interfaces';
+import { LogColumnState, LogEntry, State } from '../../shared/interfaces';
 import { LocalStorageState, StateStore } from '../../shared/state';
+import { LogFilter } from '../../utils/log-filter/log-filter';
 import '../log-list/log-list';
 import '../log-view-controls/log-view-controls';
+import { titleCaseToKebabCase } from '../../utils/strings';
 
-type LogFilter = (logEntry: LogEntry) => boolean;
+type FilterFunction = (logEntry: LogEntry) => boolean;
 
 /**
  * A component that filters and displays incoming log entries in an encapsulated
@@ -71,14 +68,14 @@
 
   /** A function used for filtering rows that contain a certain substring. */
   @state()
-  private _stringFilter: LogFilter = () => true;
+  private _stringFilter: FilterFunction = () => true;
 
   /**
    * A function used for filtering rows that contain a timestamp within a
    * certain window.
    */
   @state()
-  private _timeFilter: LogFilter = () => true;
+  private _timeFilter: FilterFunction = () => true;
 
   /** A string representing the value contained in the search field. */
   @state()
@@ -91,17 +88,23 @@
   @state()
   _state: State;
 
-  colsHidden: (boolean | undefined)[] = [];
+  @state()
+  _colsHidden: (boolean | undefined)[] = [];
 
   @query('log-list') _logList!: LogList;
 
+  private _debounceTimeout: NodeJS.Timeout | null = null;
+
+  /** The amount of time, in ms, before the filter expression is executed. */
+  private readonly FILTER_DELAY = 100;
+
   constructor() {
     super();
     this._state = this._stateStore.getState();
   }
 
   protected firstUpdated(): void {
-    this.colsHidden = [];
+    this._colsHidden = [];
 
     if (this._state) {
       const viewConfigArr = this._state.logViewConfig;
@@ -110,9 +113,9 @@
       if (index !== -1) {
         viewConfigArr[index].search = this.searchText;
         viewConfigArr[index].columns.map((i: LogColumnState) => {
-          this.colsHidden.push(i.hidden);
+          this._colsHidden.push(i.hidden);
         });
-        this.colsHidden.unshift(undefined);
+        this._colsHidden.unshift(undefined);
       }
     }
   }
@@ -133,24 +136,41 @@
    *   to update the filter.
    */
   private updateFilter(event: CustomEvent) {
+    this.searchText = event.detail.inputValue;
+    const viewConfigArr = this._state.logViewConfig;
+    const index = viewConfigArr.findIndex((i) => this.id === i.viewID);
+
     switch (event.type) {
       case 'input-change':
-        this.searchText = event.detail.inputValue;
+        if (this._debounceTimeout) {
+          clearTimeout(this._debounceTimeout);
+        }
+
+        if (index !== -1) {
+          viewConfigArr[index].search = this.searchText;
+          this._state = { logViewConfig: viewConfigArr };
+          this._stateStore.setState({ logViewConfig: viewConfigArr });
+        }
 
         if (!this.searchText) {
           this._stringFilter = () => true;
           return;
         }
 
-        this._stringFilter = (logEntry: LogEntry) =>
-          logEntry.fields
-            .filter(
-              // Exclude severity field, since its text is omitted from the table
-              (field: FieldData) => field.key !== 'severity',
-            )
-            .some((field: FieldData) =>
-              new RegExp(this.searchText, 'i').test(field.value.toString()),
-            );
+        // Run the filter after the timeout delay
+        this._debounceTimeout = setTimeout(() => {
+          const filters = LogFilter.parseSearchQuery(this.searchText).map(
+            (condition) => LogFilter.createFilterFunction(condition),
+          );
+          this._stringFilter =
+            filters.length > 0
+              ? (logEntry: LogEntry) =>
+                  filters.some((filter) => filter(logEntry))
+              : () => true;
+
+          this.filterLogs();
+          this.requestUpdate();
+        }, this.FILTER_DELAY);
         break;
       case 'clear-logs':
         this._timeFilter = (logEntry) =>
@@ -160,14 +180,6 @@
         break;
     }
 
-    const viewConfigArr = this._state.logViewConfig;
-    const index = viewConfigArr.findIndex((i) => this.id === i.viewID);
-    if (index !== -1) {
-      viewConfigArr[index].search = this.searchText;
-      this._state = { logViewConfig: viewConfigArr };
-      this._stateStore.setState({ logViewConfig: viewConfigArr });
-    }
-
     this.filterLogs();
     this.requestUpdate();
   }
@@ -204,16 +216,16 @@
     const viewConfigArr = this._state.logViewConfig;
     let colIndex = -1;
 
-    this.colsHidden = [];
+    this._colsHidden = [];
     const index = viewConfigArr
       .map((i) => {
         return i.viewID;
       })
       .indexOf(this.id);
     viewConfigArr[index].columns.map((i: LogColumnState) => {
-      this.colsHidden.push(i.hidden);
+      this._colsHidden.push(i.hidden);
     });
-    this.colsHidden.unshift(undefined);
+    this._colsHidden.unshift(undefined);
 
     this._fieldKeys.forEach((field: string, i: number) => {
       if (field == event.detail.field) {
@@ -222,8 +234,8 @@
       }
     });
 
-    this.colsHidden[colIndex + 1] = !event.detail.isChecked; // Exclude first column (severity)
-    this._logList.colsHidden = [...this.colsHidden];
+    this._colsHidden[colIndex + 1] = !event.detail.isChecked; // Exclude first column (severity)
+    this._logList.colsHidden = [...this._colsHidden];
 
     this._state = { logViewConfig: viewConfigArr };
     this._stateStore.setState({ logViewConfig: viewConfigArr });
@@ -251,9 +263,50 @@
     );
   }
 
+  /**
+   * Generates a log file in the specified format and initiates its download.
+   *
+   * @param {CustomEvent} event - The click event.
+   */
+  private downloadLogs(event: CustomEvent) {
+    const headers = this.logs[0]?.fields.map((field) => field.key) || [];
+    const maxWidths = headers.map((header) => header.length);
+    const viewTitle = event.detail.viewTitle;
+    const fileName = viewTitle ? titleCaseToKebabCase(viewTitle) : 'logs';
+
+    this.logs.forEach((log) => {
+      log.fields.forEach((field, columnIndex) => {
+        maxWidths[columnIndex] = Math.max(
+          maxWidths[columnIndex],
+          field.value.toString().length,
+        );
+      });
+    });
+
+    const headerRow = headers
+      .map((header, columnIndex) => header.padEnd(maxWidths[columnIndex]))
+      .join('\t');
+    const separator = '';
+    const logRows = this.logs.map((log) => {
+      const values = log.fields.map((field, columnIndex) =>
+        field.value.toString().padEnd(maxWidths[columnIndex]),
+      );
+      return values.join('\t');
+    });
+
+    const formattedLogs = [headerRow, separator, ...logRows].join('\n');
+    const blob = new Blob([formattedLogs], { type: 'text/plain' });
+    const downloadLink = document.createElement('a');
+    downloadLink.href = URL.createObjectURL(blob);
+    downloadLink.download = `${fileName}.txt`;
+    downloadLink.click();
+
+    URL.revokeObjectURL(downloadLink.href);
+  }
+
   render() {
     return html` <log-view-controls
-        .colsHidden=${[...this.colsHidden]}
+        .colsHidden=${[...this._colsHidden]}
         .viewId=${this.id}
         .fieldKeys=${this._fieldKeys}
         .hideCloseButton=${!this.isOneOfMany}
@@ -262,12 +315,13 @@
         @clear-logs="${this.updateFilter}"
         @column-toggle="${this.toggleColumns}"
         @wrap-toggle="${this.toggleWrapping}"
+        @download-logs="${this.downloadLogs}"
         role="toolbar"
       >
       </log-view-controls>
 
       <log-list
-        .colsHidden=${[...this.colsHidden]}
+        .colsHidden=${[...this._colsHidden]}
         .lineWrap=${this._lineWrap}
         .viewId=${this.id}
         .logs=${this._filteredLogs}
diff --git a/pw_web/log-viewer/src/components/log-viewer.styles.ts b/pw_web/log-viewer/src/components/log-viewer.styles.ts
index bae4dd5..620a2c0 100644
--- a/pw_web/log-viewer/src/components/log-viewer.styles.ts
+++ b/pw_web/log-viewer/src/components/log-viewer.styles.ts
@@ -23,7 +23,7 @@
     display: flex;
     flex-direction: column;
     gap: 2rem;
-    height: 100%;
+    height: var(--sys-log-viewer-height);
   }
 
   button {
diff --git a/pw_web/log-viewer/src/createLogViewer.ts b/pw_web/log-viewer/src/createLogViewer.ts
index 9219808..d43a908 100644
--- a/pw_web/log-viewer/src/createLogViewer.ts
+++ b/pw_web/log-viewer/src/createLogViewer.ts
@@ -23,8 +23,10 @@
 import '@material/web/field/outlined-field.js';
 import '@material/web/textfield/outlined-text-field.js';
 import '@material/web/textfield/filled-text-field.js';
-import '@material/web/iconbutton/standard-icon-button.js';
 import '@material/web/icon/icon.js';
+import '@material/web/iconbutton/icon-button.js';
+import '@material/web/menu/menu.js';
+import '@material/web/menu/menu-item.js';
 
 export function createLogViewer(
   logSource: LogSource,
@@ -34,12 +36,19 @@
   const logViewer = new RootComponent(state);
   const logs: LogEntry[] = [];
   root.appendChild(logViewer);
+  let lastUpdateTimeoutId: NodeJS.Timeout;
 
   // Define an event listener for the 'logEntry' event
   const logEntryListener = (logEntry: LogEntry) => {
     logs.push(logEntry);
     logViewer.logs = logs;
-    logViewer.requestUpdate('logs', []);
+    if (lastUpdateTimeoutId) {
+      clearTimeout(lastUpdateTimeoutId);
+    }
+    // Call requestUpdate at most once every 100 milliseconds.
+    lastUpdateTimeoutId = setTimeout(() => {
+      logViewer.requestUpdate('logs', []);
+    }, 100);
   };
 
   // Add the event listener to the LogSource instance
diff --git a/pw_web/log-viewer/src/custom/json-log-source.ts b/pw_web/log-viewer/src/custom/json-log-source.ts
new file mode 100644
index 0000000..8914b1b
--- /dev/null
+++ b/pw_web/log-viewer/src/custom/json-log-source.ts
@@ -0,0 +1,131 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+import { LogSource } from '../log-source';
+import { LogEntry, FieldData, Severity } from '../shared/interfaces';
+
+import log_data from './log_data.json';
+
+interface LevelToSeverity {
+  [level: number]: Severity;
+}
+
+export class JsonLogSource extends LogSource {
+  private intervalId: NodeJS.Timeout | null = null;
+  private logIndex: number = 0;
+  private previousLogTime: number = 0;
+
+  private logLevelToSeverity: LevelToSeverity = {
+    10: Severity.DEBUG,
+    20: Severity.INFO,
+    21: Severity.INFO,
+    30: Severity.WARNING,
+    40: Severity.ERROR,
+    50: Severity.CRITICAL,
+    70: Severity.CRITICAL,
+  };
+
+  private nonAdditionalDataFields = [
+    '_hosttime',
+    'levelname',
+    'levelno',
+    'args',
+    'fields',
+    'message',
+    'time',
+  ];
+
+  constructor() {
+    super();
+  }
+
+  start(): void {
+    this.updateLogTime();
+
+    const getInterval = () => {
+      // Get the current log time
+      const next_log_time = Number(log_data[this.logIndex].time);
+      const wait_ms = 1000 * (next_log_time - this.previousLogTime);
+
+      this.updateLogTime();
+      return Math.round(wait_ms);
+    };
+
+    const readLogEntry = () => {
+      const logEntry = this.readLogEntryFromJson();
+      this.emitEvent('logEntry', logEntry);
+
+      const nextInterval = getInterval();
+      setTimeout(readLogEntry, nextInterval);
+    };
+
+    readLogEntry();
+  }
+
+  stop(): void {
+    if (this.intervalId) {
+      clearInterval(this.intervalId);
+      this.intervalId = null;
+    }
+  }
+
+  private updateLogTime(): void {
+    this.previousLogTime = Number(log_data[this.logIndex].time);
+  }
+
+  private updateLogIndex(): void {
+    this.logIndex += 1;
+    if (this.logIndex >= log_data.length) {
+      this.logIndex = 0;
+    }
+  }
+
+  readLogEntryFromJson(): LogEntry {
+    const data = log_data[this.logIndex];
+
+    const host_log_time = new Date(0); // Date set to epoch seconds 0
+    const host_log_epoch_seconds = Number(data.time);
+    host_log_time.setUTCSeconds(Math.trunc(host_log_epoch_seconds));
+    const host_log_epoch_milliseconds = Math.trunc(
+      1000 * (host_log_epoch_seconds - Math.trunc(host_log_epoch_seconds)),
+    );
+    host_log_time.setUTCMilliseconds(host_log_epoch_milliseconds);
+
+    const fields: Array<FieldData> = [
+      { key: 'severity', value: this.logLevelToSeverity[data.levelno] },
+      { key: 'time', value: host_log_time },
+    ];
+
+    Object.keys(data.fields).forEach((columnName) => {
+      if (this.nonAdditionalDataFields.indexOf(columnName) === -1) {
+        // @ts-ignore
+        fields.push({ key: columnName, value: data.fields[columnName] });
+      }
+    });
+
+    fields.push({ key: 'message', value: data.message });
+    fields.push({ key: 'py_file', value: data.py_file || '' });
+    fields.push({ key: 'py_logger', value: data.py_logger || '' });
+
+    const logEntry: LogEntry = {
+      severity: this.logLevelToSeverity[data.levelno],
+      timestamp: new Date(),
+      fields: fields,
+    };
+
+    this.updateLogIndex();
+
+    return logEntry;
+  }
+}
diff --git a/pw_web/log-viewer/src/custom/log_data.json b/pw_web/log-viewer/src/custom/log_data.json
new file mode 100644
index 0000000..28d9af1
--- /dev/null
+++ b/pw_web/log-viewer/src/custom/log_data.json
@@ -0,0 +1,5 @@
+[
+  {"message": "Sample log_data.json", "levelno": 30, "levelname": "WRN", "time": "1692302986.4599075", "time_string": "2023-08-17T13:09:46", "fields": {"module": "pigweedjs", "timestamp": "1.0"}},
+  {"message": "Log message 1", "levelno": 20, "levelname": "INF", "time": "1692302986.4599075", "time_string": "2023-08-17T13:09:46", "fields": {"module": "device", "file": "sample_file.cc", "timestamp": "1.0"}},
+  {"message": "Log message 2", "levelno": 20, "levelname": "INF", "time": "1692303000.1080465", "time_string": "2023-08-17T13:10:00", "fields": {"module": "device", "file": "sample_file.cc", "timestamp": "14.0"}}
+]
diff --git a/pw_web/log-viewer/src/custom/mock-log-source.ts b/pw_web/log-viewer/src/custom/mock-log-source.ts
index 5408388..22cbec6 100644
--- a/pw_web/log-viewer/src/custom/mock-log-source.ts
+++ b/pw_web/log-viewer/src/custom/mock-log-source.ts
@@ -89,7 +89,7 @@
       'Database connection lost. Attempting to reconnect.',
       'User authentication failed. Invalid credentials provided.',
       'System reboot initiated. Please wait for the system to come back online.',
-      'File not found. The requested file does not exist.',
+      'File not found. (The requested file does not exist).',
       'Data corruption detected. Initiating recovery process.',
       'Network congestion detected. Traffic is high, please try again later.',
       'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam condimentum auctor justo, sit amet condimentum nibh facilisis non. Quisque in quam a urna dignissim cursus. Suspendisse egestas nisl sed massa dictum dictum. In tincidunt arcu nec odio eleifend, vel pharetra justo iaculis. Vivamus quis tellus ac velit vehicula consequat. Nam eu felis sed risus hendrerit faucibus ac id lacus. Vestibulum tincidunt tellus in ex feugiat interdum. Nulla sit amet luctus neque. Mauris et aliquet nunc, vel finibus massa. Curabitur laoreet eleifend nibh eget luctus. Fusce sodales augue nec purus faucibus, vel tristique enim vehicula. Aenean eu magna eros. Fusce accumsan dignissim dui auctor scelerisque. Proin ultricies nunc vel tincidunt facilisis.',
diff --git a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h b/pw_web/log-viewer/src/events/download-logs.ts
similarity index 65%
copy from pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
copy to pw_web/log-viewer/src/events/download-logs.ts
index af31532..fc9b54a 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
+++ b/pw_web/log-viewer/src/events/download-logs.ts
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2023 The Pigweed Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
 // use this file except in compliance with the License. You may obtain a copy of
@@ -11,9 +11,18 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
-#pragma once
 
-// The HDLC address to which to write Base64-encoded tokenized logs.
-#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
-#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
-#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+interface DownloadLogsEvent extends CustomEvent {
+  detail: {
+    format: string;
+    viewTitle: string;
+  };
+}
+
+declare global {
+  interface GlobalEventHandlersEventMap {
+    'download-logs': DownloadLogsEvent;
+  }
+}
+
+export default DownloadLogsEvent;
diff --git a/pw_web/log-viewer/src/index.css b/pw_web/log-viewer/src/index.css
index 0cd5b6a..ac1a286 100644
--- a/pw_web/log-viewer/src/index.css
+++ b/pw_web/log-viewer/src/index.css
@@ -40,6 +40,7 @@
     --md-icon-button-unselected-hover-icon-color: var(--md-sys-color-on-primary-container);
 
     /* Log View */
+    --sys-log-viewer-height: 100vh;
     --sys-log-viewer-view-outline-width: 1px;
     --sys-log-viewer-view-corner-radius: 0.5rem;
 }
diff --git a/pw_web/log-viewer/src/index.ts b/pw_web/log-viewer/src/index.ts
index 159a294..b503995 100644
--- a/pw_web/log-viewer/src/index.ts
+++ b/pw_web/log-viewer/src/index.ts
@@ -12,26 +12,17 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-import { MockLogSource } from './custom/mock-log-source';
+import { JsonLogSource } from './custom/json-log-source';
 import { createLogViewer } from './createLogViewer';
 
-const logSource = new MockLogSource();
+const logSource = new JsonLogSource();
 const containerEl = document.querySelector(
   '#log-viewer-container',
 ) as HTMLElement;
 
-let unsubscribe: () => void;
-
 if (containerEl) {
-  unsubscribe = createLogViewer(logSource, containerEl);
+  createLogViewer(logSource, containerEl);
 }
 
-const TIMEOUT_DURATION = 60_000; // ms
 // Start reading log data
 logSource.start();
-
-// Stop reading log data once timeout duration has elapsed
-setTimeout(() => {
-  logSource.stop();
-  unsubscribe();
-}, TIMEOUT_DURATION);
diff --git a/pw_web/log-viewer/src/utils/log-filter/log-filter.models.ts b/pw_web/log-viewer/src/utils/log-filter/log-filter.models.ts
new file mode 100644
index 0000000..2fabd7b
--- /dev/null
+++ b/pw_web/log-viewer/src/utils/log-filter/log-filter.models.ts
@@ -0,0 +1,61 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+export enum ConditionType {
+  StringSearch,
+  ColumnSearch,
+  ExactPhraseSearch,
+  AndExpression,
+  OrExpression,
+  NotExpression,
+}
+
+export type StringSearchCondition = {
+  type: ConditionType.StringSearch;
+  searchString: string;
+};
+
+export type ColumnSearchCondition = {
+  type: ConditionType.ColumnSearch;
+  column: string;
+  value?: string;
+};
+
+export type ExactPhraseSearchCondition = {
+  type: ConditionType.ExactPhraseSearch;
+  exactPhrase: string;
+};
+
+export type AndExpressionCondition = {
+  type: ConditionType.AndExpression;
+  expressions: FilterCondition[];
+};
+
+export type OrExpressionCondition = {
+  type: ConditionType.OrExpression;
+  expressions: FilterCondition[];
+};
+
+export type NotExpressionCondition = {
+  type: ConditionType.NotExpression;
+  expression: FilterCondition;
+};
+
+export type FilterCondition =
+  | ColumnSearchCondition
+  | StringSearchCondition
+  | ExactPhraseSearchCondition
+  | AndExpressionCondition
+  | OrExpressionCondition
+  | NotExpressionCondition;
diff --git a/pw_web/log-viewer/src/utils/log-filter/log-filter.ts b/pw_web/log-viewer/src/utils/log-filter/log-filter.ts
new file mode 100644
index 0000000..344bba7
--- /dev/null
+++ b/pw_web/log-viewer/src/utils/log-filter/log-filter.ts
@@ -0,0 +1,254 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+import { LogEntry } from '../../shared/interfaces';
+import { FilterCondition, ConditionType } from './log-filter.models';
+
+export class LogFilter {
+  /**
+   * Generates a structured representation of filter conditions which can be
+   * used to filter log entries.
+   *
+   * @param {string} searchQuery - The search query string provided.
+   * @returns {function[]} An array of filter functions, each representing a
+   *   set of conditions grouped by logical operators, for filtering log
+   *   entries.
+   */
+  static parseSearchQuery(searchQuery: string): FilterCondition[] {
+    const filters: FilterCondition[] = [];
+    const orGroups = searchQuery.split(/\s*\|\s*/);
+
+    for (let i = 0; i < orGroups.length; i++) {
+      let orGroup = orGroups[i];
+
+      if (orGroup.includes('(') && !orGroup.includes(')')) {
+        let j = i + 1;
+        while (j < orGroups.length && !orGroups[j].includes(')')) {
+          orGroup += ` | ${orGroups[j]}`;
+          j++;
+        }
+
+        if (j < orGroups.length) {
+          orGroup += ` | ${orGroups[j]}`;
+          i = j;
+        }
+      }
+
+      const andConditions = orGroup.match(
+        /\([^()]*\)|"[^"]+"|[^\s:]+:[^\s]+|[^\s]+/g,
+      );
+
+      const andFilters: FilterCondition[] = [];
+
+      if (andConditions) {
+        for (const condition of andConditions) {
+          if (condition.startsWith('(') && condition.endsWith(')')) {
+            const nestedConditions = condition.slice(1, -1).trim();
+            andFilters.push(...this.parseSearchQuery(nestedConditions));
+          } else if (condition.startsWith('"') && condition.endsWith('"')) {
+            const exactPhrase = condition.slice(1, -1).trim();
+            andFilters.push({
+              type: ConditionType.ExactPhraseSearch,
+              exactPhrase,
+            });
+          } else if (condition.startsWith('!')) {
+            const column = condition.slice(1, condition.indexOf(':'));
+            const value = condition.slice(condition.indexOf(':') + 1);
+            andFilters.push({
+              type: ConditionType.NotExpression,
+              expression: {
+                type: ConditionType.ColumnSearch,
+                column,
+                value,
+              },
+            });
+          } else if (condition.endsWith(':')) {
+            const column = condition.slice(0, condition.indexOf(':'));
+            andFilters.push({
+              type: ConditionType.ColumnSearch,
+              column,
+            });
+          } else if (condition.includes(':')) {
+            const column = condition.slice(0, condition.indexOf(':'));
+            const value = condition.slice(condition.indexOf(':') + 1);
+            andFilters.push({
+              type: ConditionType.ColumnSearch,
+              column,
+              value,
+            });
+          } else {
+            andFilters.push({
+              type: ConditionType.StringSearch,
+              searchString: condition,
+            });
+          }
+        }
+      }
+
+      if (andFilters.length > 0) {
+        if (andFilters.length === 1) {
+          filters.push(andFilters[0]);
+        } else {
+          filters.push({
+            type: ConditionType.AndExpression,
+            expressions: andFilters,
+          });
+        }
+      }
+    }
+
+    if (filters.length === 0) {
+      filters.push({
+        type: ConditionType.StringSearch,
+        searchString: '',
+      });
+    }
+
+    if (filters.length > 1) {
+      return [
+        {
+          type: ConditionType.OrExpression,
+          expressions: filters,
+        },
+      ];
+    }
+
+    return filters;
+  }
+
+  /**
+   * Takes a condition node, which represents a specific filter condition, and
+   * recursively generates a filter function that can be applied to log
+   * entries.
+   *
+   * @param {FilterCondition} condition - A filter condition to convert to a
+   *   function.
+   * @returns {function} A function for filtering log entries based on the
+   *   input condition and its logical operators.
+   */
+  static createFilterFunction(
+    condition: FilterCondition,
+  ): (logEntry: LogEntry) => boolean {
+    switch (condition.type) {
+      case ConditionType.StringSearch:
+        return (logEntry) =>
+          this.checkStringInColumns(logEntry, condition.searchString);
+      case ConditionType.ExactPhraseSearch:
+        return (logEntry) =>
+          this.checkExactPhraseInColumns(logEntry, condition.exactPhrase);
+      case ConditionType.ColumnSearch:
+        return (logEntry) =>
+          this.checkColumn(logEntry, condition.column, condition.value);
+      case ConditionType.NotExpression: {
+        const innerFilter = this.createFilterFunction(condition.expression);
+        return (logEntry) => !innerFilter(logEntry);
+      }
+      case ConditionType.AndExpression: {
+        const andFilters = condition.expressions.map((expr) =>
+          this.createFilterFunction(expr),
+        );
+        return (logEntry) => andFilters.every((filter) => filter(logEntry));
+      }
+      case ConditionType.OrExpression: {
+        const orFilters = condition.expressions.map((expr) =>
+          this.createFilterFunction(expr),
+        );
+        return (logEntry) => orFilters.some((filter) => filter(logEntry));
+      }
+      default:
+        // Return a filter that matches all entries
+        return () => true;
+    }
+  }
+
+  /**
+   * Checks if the column exists in a log entry and then performs a value
+   * search on the column's value.
+   *
+   * @param {LogEntry} logEntry - The log entry to be searched.
+   * @param {string} column - The name of the column (log entry field) to be
+   *   checked for filtering.
+   * @param {string} value - An optional string that represents the value used
+   *   for filtering.
+   * @returns {boolean} True if the specified column exists in the log entry,
+   *   or if a value is provided, returns true if the value matches a
+   *   substring of the column's value (case-insensitive).
+   */
+  private static checkColumn(
+    logEntry: LogEntry,
+    column: string,
+    value?: string,
+  ): boolean {
+    const fieldData = logEntry.fields.find((field) => field.key === column);
+    if (!fieldData) return false;
+
+    if (value === undefined) {
+      return true;
+    }
+
+    const searchRegex = new RegExp(value, 'i');
+    return searchRegex.test(fieldData.value.toString());
+  }
+
+  /**
+   * Checks if the provided search string exists in any of the log entry
+   * columns (excluding `severity`).
+   *
+   * @param {LogEntry} logEntry - The log entry to be searched.
+   * @param {string} searchString - The search string to be matched against
+   *   the log entry fields.
+   * @returns {boolean} True if the search string is found in any of the log
+   *   entry fields, otherwise false.
+   */
+  private static checkStringInColumns(
+    logEntry: LogEntry,
+    searchString: string,
+  ): boolean {
+    const escapedSearchString = this.escapeRegEx(searchString);
+    const columnsToSearch = logEntry.fields.filter(
+      (field) => field.key !== 'severity',
+    );
+    return columnsToSearch.some((field) =>
+      new RegExp(escapedSearchString, 'i').test(field.value.toString()),
+    );
+  }
+
+  /**
+   * Checks if the exact phrase exists in any of the log entry columns
+   * (excluding `severity`).
+   *
+   * @param {LogEntry} logEntry - The log entry to be searched.
+   * @param {string} exactPhrase - The exact phrase to search for within the
+   *   log entry columns.
+   * @returns {boolean} True if the exact phrase is found in any column,
+   *   otherwise false.
+   */
+  private static checkExactPhraseInColumns(
+    logEntry: LogEntry,
+    exactPhrase: string,
+  ): boolean {
+    const escapedExactPhrase = this.escapeRegEx(exactPhrase);
+    const searchRegex = new RegExp(escapedExactPhrase, 'i');
+    const columnsToSearch = logEntry.fields.filter(
+      (field) => field.key !== 'severity',
+    );
+    return columnsToSearch.some((field) =>
+      searchRegex.test(field.value.toString()),
+    );
+  }
+
+  private static escapeRegEx(text: string) {
+    return text.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, '\\$&');
+  }
+}
diff --git a/pw_web/log-viewer/src/utils/log-filter/log-filter_test.ts b/pw_web/log-viewer/src/utils/log-filter/log-filter_test.ts
new file mode 100644
index 0000000..63faa39
--- /dev/null
+++ b/pw_web/log-viewer/src/utils/log-filter/log-filter_test.ts
@@ -0,0 +1,173 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+import { LogFilter } from './log-filter';
+import { Severity, LogEntry } from '../../shared/interfaces';
+import { describe, expect, test } from '@jest/globals';
+import testData from './test-data';
+
+describe('LogFilter', () => {
+  describe('parseSearchQuery()', () => {
+    describe('parses search queries correctly', () => {
+      testData.forEach(({ query, expected }) => {
+        test(`parses "${query}" correctly`, () => {
+          const filters = LogFilter.parseSearchQuery(query);
+          expect(filters).toEqual(expected);
+        });
+      });
+    });
+  });
+
+  describe('createFilterFunction()', () => {
+    describe('filters log entries correctly', () => {
+      const logEntry1: LogEntry = {
+        timestamp: new Date(),
+        severity: Severity.INFO,
+        fields: [
+          { key: 'source', value: 'application' },
+          {
+            key: 'message',
+            value: 'Request processed successfully!',
+          },
+        ],
+      };
+
+      const logEntry2: LogEntry = {
+        timestamp: new Date(),
+        severity: Severity.WARNING,
+        fields: [
+          { key: 'source', value: 'database' },
+          {
+            key: 'message',
+            value: 'Database connection lost. Attempting to reconnect.',
+          },
+        ],
+      };
+
+      const logEntry3: LogEntry = {
+        timestamp: new Date(),
+        severity: Severity.ERROR,
+        fields: [
+          { key: 'source', value: 'network' },
+          {
+            key: 'message',
+            value:
+              'An unexpected error occurred while performing the operation.',
+          },
+        ],
+      };
+
+      const logEntries = [logEntry1, logEntry2, logEntry3];
+
+      test('should filter by simple string search', () => {
+        const searchQuery = 'error';
+        const filters = LogFilter.parseSearchQuery(searchQuery).map((node) =>
+          LogFilter.createFilterFunction(node),
+        );
+        expect(filters.length).toBe(1);
+        expect(logEntries.filter(filters[0])).toEqual([logEntry3]);
+      });
+
+      test('should filter by column-specific search', () => {
+        const searchQuery = 'source:database';
+        const filters = LogFilter.parseSearchQuery(searchQuery).map((node) =>
+          LogFilter.createFilterFunction(node),
+        );
+        expect(filters.length).toBe(1);
+        expect(logEntries.filter(filters[0])).toEqual([logEntry2]);
+      });
+
+      test('should filter by exact phrase', () => {
+        const searchQuery = '"Request processed successfully!"';
+        const filters = LogFilter.parseSearchQuery(searchQuery).map((node) =>
+          LogFilter.createFilterFunction(node),
+        );
+        expect(filters.length).toBe(1);
+        expect(logEntries.filter(filters[0])).toEqual([logEntry1]);
+      });
+
+      test('should filter by column presence', () => {
+        const searchQuery = 'source:';
+        const filters = LogFilter.parseSearchQuery(searchQuery).map((node) =>
+          LogFilter.createFilterFunction(node),
+        );
+        expect(filters.length).toBe(1);
+        expect(logEntries.filter(filters[0])).toEqual([
+          logEntry1,
+          logEntry2,
+          logEntry3,
+        ]);
+      });
+
+      test('should handle AND expressions', () => {
+        const searchQuery = 'source:network message:error';
+        const filters = LogFilter.parseSearchQuery(searchQuery).map((node) =>
+          LogFilter.createFilterFunction(node),
+        );
+        expect(filters.length).toBe(1);
+        expect(logEntries.filter(filters[0])).toEqual([logEntry3]);
+      });
+
+      test('should handle OR expressions', () => {
+        const searchQuery = 'source:database | source:network';
+        const filters = LogFilter.parseSearchQuery(searchQuery).map((node) =>
+          LogFilter.createFilterFunction(node),
+        );
+        expect(filters.length).toBe(1);
+        expect(logEntries.filter(filters[0])).toEqual([logEntry2, logEntry3]);
+      });
+
+      test('should handle NOT expressions', () => {
+        const searchQuery = '!source:database';
+        const filters = LogFilter.parseSearchQuery(searchQuery).map((node) =>
+          LogFilter.createFilterFunction(node),
+        );
+        expect(filters.length).toBe(1);
+        expect(logEntries.filter(filters[0])).toEqual([logEntry1, logEntry3]);
+      });
+
+      test('should handle a combination of AND and OR expressions', () => {
+        const searchQuery = '(source:database | source:network) message:error';
+        const filters = LogFilter.parseSearchQuery(searchQuery).map((node) =>
+          LogFilter.createFilterFunction(node),
+        );
+        expect(filters.length).toBe(1);
+        expect(logEntries.filter(filters[0])).toEqual([logEntry3]);
+      });
+
+      test('should handle a combination of AND, OR, and NOT expressions', () => {
+        const searchQuery =
+          '(source:application | source:database) !message:request';
+        const filters = LogFilter.parseSearchQuery(searchQuery).map((node) =>
+          LogFilter.createFilterFunction(node),
+        );
+        expect(filters.length).toBe(1);
+        expect(logEntries.filter(filters[0])).toEqual([logEntry2]);
+      });
+
+      test('should handle an empty query', () => {
+        const searchQuery = '';
+        const filters = LogFilter.parseSearchQuery(searchQuery).map((node) =>
+          LogFilter.createFilterFunction(node),
+        );
+        expect(filters.length).toBe(1);
+        expect(logEntries.filter(filters[0])).toEqual([
+          logEntry1,
+          logEntry2,
+          logEntry3,
+        ]);
+      });
+    });
+  });
+});
diff --git a/pw_web/log-viewer/src/utils/log-filter/test-data.ts b/pw_web/log-viewer/src/utils/log-filter/test-data.ts
new file mode 100644
index 0000000..e7fd7c1
--- /dev/null
+++ b/pw_web/log-viewer/src/utils/log-filter/test-data.ts
@@ -0,0 +1,361 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+import { ConditionType } from './log-filter.models';
+
+const testData = [
+  {
+    query: 'error',
+    expected: [
+      {
+        type: ConditionType.StringSearch,
+        searchString: 'error',
+      },
+    ],
+  },
+  {
+    query: 'source:database',
+    expected: [
+      {
+        type: ConditionType.ColumnSearch,
+        column: 'source',
+        value: 'database',
+      },
+    ],
+  },
+  {
+    query: '"Request processed successfully!"',
+    expected: [
+      {
+        type: ConditionType.ExactPhraseSearch,
+        exactPhrase: 'Request processed successfully!',
+      },
+    ],
+  },
+  {
+    query: 'source:',
+    expected: [
+      {
+        type: ConditionType.ColumnSearch,
+        column: 'source',
+      },
+    ],
+  },
+  {
+    query: 'source:network message:error',
+    expected: [
+      {
+        type: ConditionType.AndExpression,
+        expressions: [
+          {
+            type: ConditionType.ColumnSearch,
+            column: 'source',
+            value: 'network',
+          },
+          {
+            type: ConditionType.ColumnSearch,
+            column: 'message',
+            value: 'error',
+          },
+        ],
+      },
+    ],
+  },
+  {
+    query: 'source:database | source:network',
+    expected: [
+      {
+        type: ConditionType.OrExpression,
+        expressions: [
+          {
+            type: ConditionType.ColumnSearch,
+            column: 'source',
+            value: 'database',
+          },
+          {
+            type: ConditionType.ColumnSearch,
+            column: 'source',
+            value: 'network',
+          },
+        ],
+      },
+    ],
+  },
+  {
+    query: '!source:database',
+    expected: [
+      {
+        type: ConditionType.NotExpression,
+        expression: {
+          type: ConditionType.ColumnSearch,
+          column: 'source',
+          value: 'database',
+        },
+      },
+    ],
+  },
+  {
+    query: 'message:error (source:database | source:network)',
+    expected: [
+      {
+        type: ConditionType.AndExpression,
+        expressions: [
+          {
+            type: ConditionType.ColumnSearch,
+            column: 'message',
+            value: 'error',
+          },
+          {
+            type: ConditionType.OrExpression,
+            expressions: [
+              {
+                type: ConditionType.ColumnSearch,
+                column: 'source',
+                value: 'database',
+              },
+              {
+                type: ConditionType.ColumnSearch,
+                column: 'source',
+                value: 'network',
+              },
+            ],
+          },
+        ],
+      },
+    ],
+  },
+  {
+    query: '(source:database | source:network) message:error',
+    expected: [
+      {
+        type: ConditionType.AndExpression,
+        expressions: [
+          {
+            type: ConditionType.OrExpression,
+            expressions: [
+              {
+                type: ConditionType.ColumnSearch,
+                column: 'source',
+                value: 'database',
+              },
+              {
+                type: ConditionType.ColumnSearch,
+                column: 'source',
+                value: 'network',
+              },
+            ],
+          },
+          {
+            type: ConditionType.ColumnSearch,
+            column: 'message',
+            value: 'error',
+          },
+        ],
+      },
+    ],
+  },
+  {
+    query: '(source:application | source:database) !message:request',
+    expected: [
+      {
+        type: ConditionType.AndExpression,
+        expressions: [
+          {
+            type: ConditionType.OrExpression,
+            expressions: [
+              {
+                type: ConditionType.ColumnSearch,
+                column: 'source',
+                value: 'application',
+              },
+              {
+                type: ConditionType.ColumnSearch,
+                column: 'source',
+                value: 'database',
+              },
+            ],
+          },
+          {
+            type: ConditionType.NotExpression,
+            expression: {
+              type: ConditionType.ColumnSearch,
+              column: 'message',
+              value: 'request',
+            },
+          },
+        ],
+      },
+    ],
+  },
+  {
+    query: '',
+    expected: [
+      {
+        type: ConditionType.StringSearch,
+        searchString: '',
+      },
+    ],
+  },
+  {
+    // Note: AND takes priority over OR in evaluation.
+    query: 'source:database message:error | source:network message:error',
+    expected: [
+      {
+        type: ConditionType.OrExpression,
+        expressions: [
+          {
+            type: ConditionType.AndExpression,
+            expressions: [
+              {
+                type: ConditionType.ColumnSearch,
+                column: 'source',
+                value: 'database',
+              },
+              {
+                type: ConditionType.ColumnSearch,
+                column: 'message',
+                value: 'error',
+              },
+            ],
+          },
+          {
+            type: ConditionType.AndExpression,
+            expressions: [
+              {
+                type: ConditionType.ColumnSearch,
+                column: 'source',
+                value: 'network',
+              },
+              {
+                type: ConditionType.ColumnSearch,
+                column: 'message',
+                value: 'error',
+              },
+            ],
+          },
+        ],
+      },
+    ],
+  },
+  {
+    query: 'source:database | error',
+    expected: [
+      {
+        type: ConditionType.OrExpression,
+        expressions: [
+          {
+            type: ConditionType.ColumnSearch,
+            column: 'source',
+            value: 'database',
+          },
+          {
+            type: ConditionType.StringSearch,
+            searchString: 'error',
+          },
+        ],
+      },
+    ],
+  },
+  {
+    query: 'source:application request',
+    expected: [
+      {
+        type: ConditionType.AndExpression,
+        expressions: [
+          {
+            type: ConditionType.ColumnSearch,
+            column: 'source',
+            value: 'application',
+          },
+          {
+            type: ConditionType.StringSearch,
+            searchString: 'request',
+          },
+        ],
+      },
+    ],
+  },
+
+  {
+    query: 'source: application request',
+    expected: [
+      {
+        type: ConditionType.AndExpression,
+        expressions: [
+          {
+            type: ConditionType.ColumnSearch,
+            column: 'source',
+          },
+          {
+            type: ConditionType.StringSearch,
+            searchString: 'application',
+          },
+          {
+            type: ConditionType.StringSearch,
+            searchString: 'request',
+          },
+        ],
+      },
+    ],
+  },
+  {
+    query: 'source:network | (source:database lorem)',
+    expected: [
+      {
+        type: ConditionType.OrExpression,
+        expressions: [
+          {
+            type: ConditionType.ColumnSearch,
+            column: 'source',
+            value: 'network',
+          },
+          {
+            type: ConditionType.AndExpression,
+            expressions: [
+              {
+                type: ConditionType.ColumnSearch,
+                column: 'source',
+                value: 'database',
+              },
+              {
+                type: ConditionType.StringSearch,
+                searchString: 'lorem',
+              },
+            ],
+          },
+        ],
+      },
+    ],
+  },
+  {
+    query: '"unexpected error" "the operation"',
+    expected: [
+      {
+        type: ConditionType.AndExpression,
+        expressions: [
+          {
+            type: ConditionType.ExactPhraseSearch,
+            exactPhrase: 'unexpected error',
+          },
+          {
+            type: ConditionType.ExactPhraseSearch,
+            exactPhrase: 'the operation',
+          },
+        ],
+      },
+    ],
+  },
+];
+
+export default testData;
diff --git a/pw_web/log-viewer/src/utils/strings.ts b/pw_web/log-viewer/src/utils/strings.ts
new file mode 100644
index 0000000..0214c02
--- /dev/null
+++ b/pw_web/log-viewer/src/utils/strings.ts
@@ -0,0 +1,22 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+export function titleCaseToKebabCase(input: string) {
+  return input
+    .toLowerCase()
+    .replace(/[^\w\s-]/g, '') // Remove special characters except spaces and hyphens
+    .replace(/\s+/g, '-') // Replace spaces with hyphens
+    .replace(/-+/g, '-') // Replace consecutive hyphens with a single hyphen
+    .trim(); // Remove leading and trailing spaces
+}
diff --git a/pw_work_queue/docs.rst b/pw_work_queue/docs.rst
index 7ae6efa..5cb02f6 100644
--- a/pw_work_queue/docs.rst
+++ b/pw_work_queue/docs.rst
@@ -6,99 +6,37 @@
 The ``pw_work_queue`` module contains utilities for deferring work to be
 executed by another thread.
 
-.. Warning::
-  This module is still under construction, the API is not yet stable.
+.. warning::
 
----------
-WorkQueue
----------
-The ``pw::work_queue::WorkQueue`` class enables threads and interrupts to
-enqueue work as a ``pw::work_queue::WorkItem`` for execution by the work queue.
+   This module is still under construction; the API is not yet stable.
 
-The entire API is thread and interrupt safe.
-
-Queue Sizing
-============
-The number of outstanding work requests is limited based on the
-``pw::work_queue::WorkQueue``'s internal queue size. This must be set
-appropriately for the application by the user.
-
-The queue size is set trough either through the size of the ``queue_storage``
-buffer passed into the constructor or by using the templated
-``pw::work_queue::WorkQueueWithBuffer`` helper.
-
-.. Note:: While the queue is full, the queue will not accept further work.
-
-Cooperative Thread Cancellation
-===============================
-The class is a ``pw::thread::ThreadCore``, meaning it should be executed as a
-single thread. In order to facilitate clean shutdown, it provides a
-``RequestStop()`` API for cooperative cancellation which should be invoked
-before joining the thread.
-
-.. Note:: Once stop has been requested the queue will no longer accept further
-          work.
-
-C++
-===
-.. cpp:class:: pw::work_queue::WorkQueue
-
-  .. cpp:function:: Status PushWork(WorkItem work_item)
-
-     Enqueues a work_item for execution by the work queue thread.
-
-     Returns:
-
-     * **Ok** - Success, entry was enqueued for execution.
-     * **FailedPrecondition** - the work queue is shutting down, entries are no
-       longer permitted.
-     * **ResourceExhausted** - internal work queue is full, entry was not
-       enqueued.
-
-  .. cpp:function:: void CheckPushWork(WorkItem work_item)
-
-     Queue work for execution. Crash if the work cannot be queued due to a
-     full queue or a stopped worker thread.
-
-     This call is recommended where possible since it saves error handling code
-     at the callsite; and in many practical cases, it is a bug if the work
-     queue is full (and so a crash is useful to detect the problem).
-
-     **Precondition:** The queue must not overflow, i.e. be full.
-
-     **Precondition:** The queue must not have been requested to stop, i.e. it
-     must not be in the process of shutting down.
-
-  .. cpp:function:: void RequestStop()
-
-     Locks the queue to prevent further work enqueing, finishes outstanding
-     work, then shuts down the worker thread.
-
-     The WorkQueue cannot be resumed after stopping as the ThreadCore thread
-     returns and may be joined. It must be reconstructed for re-use after
-     the thread has been joined.
-
+-------
 Example
 -------
 
 .. code-block:: cpp
 
-  #include "pw_thread/detached_thread.h"
-  #include "pw_work_queue/work_queue.h"
+   #include "pw_thread/detached_thread.h"
+   #include "pw_work_queue/work_queue.h"
 
-  pw::work_queue::WorkQueueWithBuffer<10> work_queue;
+   pw::work_queue::WorkQueueWithBuffer<10> work_queue;
 
-  pw::thread::Options& WorkQueueThreadOptions();
-  void SomeLongRunningProcessing();
+   pw::thread::Options& WorkQueueThreadOptions();
+   void SomeLongRunningProcessing();
 
-  void SomeInterruptHandler() {
-    // Instead of executing the long running processing task in the interrupt,
-    // the work_queue executes it on the interrupt's behalf.
-    work_queue.CheckPushWork(SomeLongRunningProcessing);
-  }
+   void SomeInterruptHandler() {
+       // Instead of executing the long running processing task in the interrupt,
+       // the work_queue executes it on the interrupt's behalf.
+       work_queue.CheckPushWork(SomeLongRunningProcessing);
+   }
 
-  int main() {
-    // Start up the work_queue as a detached thread which runs forever.
-    pw::thread::DetachedThread(WorkQueueThreadOptions(), work_queue);
-  }
+   int main() {
+       // Start up the work_queue as a detached thread which runs forever.
+       pw::thread::DetachedThread(WorkQueueThreadOptions(), work_queue);
+   }
 
+-------------
+API reference
+-------------
+.. doxygennamespace:: pw::work_queue
+   :members:
diff --git a/pw_work_queue/public/pw_work_queue/work_queue.h b/pw_work_queue/public/pw_work_queue/work_queue.h
index 8d6691a..de1fe25 100644
--- a/pw_work_queue/public/pw_work_queue/work_queue.h
+++ b/pw_work_queue/public/pw_work_queue/work_queue.h
@@ -31,47 +31,70 @@
 
 using WorkItem = Function<void()>;
 
-// The WorkQueue class enables threads and interrupts to enqueue work as a
-// pw::work_queue::WorkItem for execution by the work queue.
-//
-// The entire API is thread and interrupt safe.
+/// Enables threads and interrupts to enqueue work as a
+/// `pw::work_queue::WorkItem` for execution by the work queue.
+///
+/// **Queue sizing**: The number of outstanding work requests is limited
+/// based on the internal queue size. The queue size is set through either
+/// the size of the `queue_storage` buffer passed into the constructor or by
+/// using the templated `pw::work_queue::WorkQueueWithBuffer` helper. When the
+/// queue is full, the queue will not accept further work.
+///
+/// **Cooperative thread cancellation**: The class is a
+/// `pw::thread::ThreadCore`, meaning it should be executed as a single thread.
+/// To facilitate clean shutdown, it provides a `RequestStop()` method for
+/// cooperative cancellation which should be invoked before joining the thread.
+/// Once a stop has been requested the queue will no longer accept further work.
+///
+/// The entire API is thread-safe and interrupt-safe.
 class WorkQueue : public thread::ThreadCore {
  public:
-  // Note: the ThreadNotification prevents this from being constexpr.
+  /// @param[in] queue The work entries to enqueue.
+  ///
+  /// @param[in] queue_capacity The internal queue size which limits the number
+  /// of outstanding work requests.
+  ///
+  /// @note The `ThreadNotification` prevents this from being `constexpr`.
   WorkQueue(InlineQueue<WorkItem>& queue, size_t queue_capacity)
       : stop_requested_(false), queue_(queue) {
     min_queue_remaining_.Set(static_cast<uint32_t>(queue_capacity));
   }
 
-  // Enqueues a work_item for execution by the work queue thread.
-  //
-  // Returns:
-  // Ok - Success, entry was enqueued for execution.
-  // FailedPrecondition - the work queue is shutting down, entries are no
-  //     longer permitted.
-  // ResourceExhausted - internal work queue is full, entry was not enqueued.
+  /// Enqueues a `work_item` for execution by the work queue thread.
+  ///
+  /// @param[in] work_item The entry to enqueue.
+  ///
+  /// @returns
+  /// * @pw_status{OK} - Success. Entry was enqueued for execution.
+  /// * @pw_status{FAILED_PRECONDITION} - The work queue is shutting down.
+  ///   Entries are no longer permitted.
+  /// * @pw_status{RESOURCE_EXHAUSTED} - Internal work queue is full.
+  ///   Entry was not enqueued.
   Status PushWork(WorkItem&& work_item) PW_LOCKS_EXCLUDED(lock_) {
     return InternalPushWork(std::move(work_item));
   }
 
-  // Queue work for execution. Crash if the work cannot be queued due to a
-  // full queue or a stopped worker thread.
-  //
-  // This call is recommended where possible since it saves error handling code
-  // at the callsite; and in many practical cases, it is a bug if the work
-  // queue is full (and so a crash is useful to detect the problem).
-  //
-  // Precondition: The queue must not overflow, i.e. be full.
-  // Precondition: The queue must not have been requested to stop, i.e. it must
-  //     not be in the process of shutting down.
+  /// Queues work for execution. Crashes if the work cannot be queued due to a
+  /// full queue or a stopped worker thread.
+  ///
+  /// This call is recommended where possible since it saves error handling code
+  /// at the callsite; and in many practical cases, it is a bug if the work
+  /// queue is full (and so a crash is useful to detect the problem).
+  ///
+  /// @param[in] work_item The entry to enqueue.
+  ///
+  /// @pre
+  /// * The queue must not overflow, i.e. be full.
+  /// * The queue must not have been requested to stop, i.e. it must
+  ///   not be in the process of shutting down.
   void CheckPushWork(WorkItem&& work_item) PW_LOCKS_EXCLUDED(lock_);
 
-  // Locks the queue to prevent further work enqueing, finishes outstanding
-  // work, then shuts down the worker thread.
-  //
-  // The WorkQueue cannot be resumed after stopping as the ThreadCore thread
-  // returns and may be joined. It must be reconstructed for re-use after
-  // the thread has been joined.
+  /// Locks the queue to prevent further work enqueing, finishes outstanding
+  /// work, then shuts down the worker thread.
+  ///
+  /// The `WorkQueue` cannot be resumed after stopping because the `ThreadCore`
+  /// thread returns and may be joined. The `WorkQueue` must be reconstructed
+  /// for re-use after the thread has been joined.
   void RequestStop() PW_LOCKS_EXCLUDED(lock_);
 
  private:
diff --git a/seed/0000-index.rst b/seed/0000-index.rst
index 6a8d278..179719f 100644
--- a/seed/0000-index.rst
+++ b/seed/0000-index.rst
@@ -12,9 +12,11 @@
   0002-template
   0101-pigweed.json
   0102-module-docs
-  0103: pw_protobuf Object Model<https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/133971>
+  0103: pw_protobuf: Past, present, and future<https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/133971>
   0104: display support<https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/150793>
   0105: Nested Tokens and Tokenized Args <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/154190>
   0106: Project Template <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/155430>
-  0107: Communications <http://pigweed-review.googlesource.com/c/pigweed/pigweed/+/157090>
-  0108: Emulators Frontend <http://pigweed-review.googlesource.com/c/pigweed/pigweed/+/158190>
+  0107-communications
+  0108-pw_emu-emulators-frontend
+  0109: Communication Buffers <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168357>
+  0110: Memory Allocation Interfaces <https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/168772>
diff --git a/seed/0001-the-seed-process.rst b/seed/0001-the-seed-process.rst
index ec28f1b..c7e340f 100644
--- a/seed/0001-the-seed-process.rst
+++ b/seed/0001-the-seed-process.rst
@@ -93,25 +93,45 @@
 #. If you haven't already, clone the Pigweed repository and set it up locally,
    following the :ref:`docs-getting-started` guide.
 
-#. Copy the `SEED template <0002-template>`_ to create the RST file for your
-   SEED. As you don't yet have a SEED number, use XXXX as a placeholder,
-   followed by the lowercase title of the proposal, with words separated by
+#. Claim a number for your SEED. This should be the next sequential number
+   listed within the `SEED index`_'s ``toctree`` table. (We will use 5309 for
+   our example.)
+
+   .. _SEED index: https://cs.opensource.google/pigweed/pigweed/+/main:seed/0000-index.rst
+
+#. Create a new RST file for your SEED. Name the file with the number of your
+   SEED followed by the lowercase title of the proposal, with words separated by
    hyphens.
 
    .. code-block:: sh
 
-      cp seed/0002-template.rst seed/XXXX-pw_rpc-over-smoke-signals.rst
+      touch seed/5309-pw_rpc-over-smoke-signals.rst
 
-#. Push up the template to Gerrit, marking it as a Work-In-Progress change.
-   From here, you may fill the template out with the content of your proposal
-   at your convenience.
+   Include your document in the GN build by modifying ``seed/BUILD.gn``:
 
-#. At any point, you may claim a SEED number by opening the
-   `SEED index`_ and taking the next available number by inserting
-   a row into the ``toctree`` table. Link the entry to the WIP change for your
-   SEED.
+   .. code-block::
 
-   .. _SEED index: https://cs.opensource.google/pigweed/pigweed/+/main:seed/0000-index.rst
+      # Insert your dependency to the doc group at the top of the file.
+      pw_doc_group("docs") {
+        group_deps = [
+          ":0001",
+          ...
+          ":5308",
+          ":5309",
+        ]
+      }
+
+      # Define a doc group target for your SEED.
+      pw_doc_group("5309") {
+        sources = [ "5309-pw_rpc-over-smoke-signals.rst" ]
+      }
+
+#. Push up your document to Gerrit, marking it as a Work-In-Progress change,
+   following the :ref:`docs-contributing` guide.
+
+#. Update the ``toctree`` in the SEED index adding a row for your SEED. The
+   entry should be labeled with the title of your SEED and link to your work
+   in progress change.
 
    .. code-block:: rst
 
@@ -130,13 +150,26 @@
       git add seed/0000-index.rst
       git commit -m "SEED-5309: Claim SEED number"
 
-#. Push up a changelist (CL) to Gerrit following the :ref:`docs-contributing`
-   guide and add GWSQ as a reviewer. Set ``Pigweed-Auto-Submit`` to +1.
+#. Push a separate change to Gerrit with your SEED index modification and add
+   GWSQ as a reviewer. Set ``Pigweed-Auto-Submit`` to +1.
 
    .. image:: 0001-the-seed-process/seed-index-gerrit.png
 
-#. Once your CL has been reviewed and submitted, the SEED number belongs to you.
-   Update your document's template and filename with this number.
+#. Fill out your proposal document, using the :ref:`SEED template<seed-0002>` as
+   a guide.
+
+   If your SEED requires additional resources such as images, place them within
+   a subdirectory named identically to your document without the ``.rst``
+   extension. These should be listed as ``inputs`` in your SEED's GN doc group
+   target.
+
+   .. code-block::
+
+      seed/
+        ...
+        5309-pw_rpc-over-smoke-signals.rst
+        5309-pw_rpc-over-smoke-signals/
+          state-diagram.svg
 
 #. When you feel you have enough substantive content in your proposal to be
    reviewed, push it up to Gerrit and switch the change from WIP to Active.
@@ -207,15 +240,19 @@
 - The SEED remains open for as long as necessary. Internally, Pigweed's review
   committee will regularly meet to consider active SEEDs and determine when to
   advance to them the next stage.
+- Open SEEDs are assigned owners in the core Pigweed team, who are primarily
+  responsible for engaging with the author to move the SEED through its review
+  process.
 
 :bdg-warning:`Last Call` **A tentative decision has been reached, but
 commenters may raise final objections.**
 
 - A tentative decision on the SEED has been made. The decision is issued at the
-  best judgement of Pigweed's review committee when they feel there has been
-  sufficient discussion on the tradeoffs of the proposal to do so.
-- Transition is triggered manually by a member of the Pigweed team, with a
-  comment on the likely outcome of the SEED (acceptance / rejection).
+  best judgement of the SEED's owner within the Pigweed team when they feel
+  there has been sufficient discussion on the tradeoffs of the proposal to do
+  so.
+- Transition is triggered manually by its owner, with a comment on the likely
+  outcome of the SEED (acceptance / rejection).
 - On entering Last Call, the visibility of the SEED is widely boosted through
   Pigweed's communication channels (Discord, mailing list, Pigweed Live, etc.)
   to solicit any strong objections from stakeholders.
diff --git a/seed/0101-pigweed.json.rst b/seed/0101-pigweed.json.rst
index 271f0b1..0e207a6 100644
--- a/seed/0101-pigweed.json.rst
+++ b/seed/0101-pigweed.json.rst
@@ -27,26 +27,26 @@
 
 .. code-block::
 
-  {
-    "root_variable": "<PROJNAME>_ROOT",
-    "cipd_package_files": ["tools/default.json"],
-    "virtualenv": {
-      "gn_args": ["dir_pw_third_party_stm32cube=\"\""],
-      "gn_root": ".",
-      "gn_targets": [":python.install"]
-    },
-    "optional_submodules": ["vendor/shhh-secret"],
-    "gni_file": "build_overrides/pigweed_environment.gni"
-  }
+   {
+     "root_variable": "<PROJNAME>_ROOT",
+     "cipd_package_files": ["tools/default.json"],
+     "virtualenv": {
+       "gn_args": ["dir_pw_third_party_stm32cube=\"\""],
+       "gn_root": ".",
+       "gn_targets": [":python.install"]
+     },
+     "optional_submodules": ["vendor/shhh-secret"],
+     "gni_file": "build_overrides/pigweed_environment.gni"
+   }
 
 The plugins to the ``pw`` command-line utility are configured in ``PW_PLUGINS``,
 which looks like this:
 
 .. code-block::
 
-  # <name> <Python module> <function>
-  console pw_console.__main__ main
-  format pw_presubmit.format_code _pigweed_upstream_main
+   # <name> <Python module> <function>
+   console pw_console.__main__ main
+   format pw_presubmit.format_code _pigweed_upstream_main
 
 In addition, changes have been proposed to configure some of the behavior of
 ``pw format`` and the formatting steps of ``pw presubmit`` from config files,
@@ -130,54 +130,54 @@
 
 .. code-block::
 
-  {
-    "pw": {
-      "pw_cli": {
-        "plugins": {
-          "console": {
-            "module": "pw_console.__main__",
-            "function": "main"
-          },
-          "format": {
-            "module": "pw_presubmit.format_code",
-            "function": "_pigweed_upstream_main"
-          }
-        }
-      },
-      "pw_env_setup": {
-        "root_variable": "<PROJNAME>_ROOT",
-        "rosetta": "allow",
-        "gni_file": "build_overrides/pigweed_environment.gni",
-        "cipd": {
-          "package_files": [
-            "tools/default.json"
-          ]
-        },
-        "virtualenv": {
-          "gn_args": [
-            "dir_pw_third_party_stm32cube=\"\""
-          ],
-          "gn_targets": [
-            "python.install"
-          ],
-          "gn_root": "."
-        },
-        "submodules": {
-          "optional": [
-            "vendor/shhh-secret"
-          ]
-        }
-      },
-      "pw_presubmit": {
-        "format": {
-          "python": {
-            "formatter": "black",
-            "black_path": "pyink"
-          }
-        }
-      }
-    }
-  }
+   {
+     "pw": {
+       "pw_cli": {
+         "plugins": {
+           "console": {
+             "module": "pw_console.__main__",
+             "function": "main"
+           },
+           "format": {
+             "module": "pw_presubmit.format_code",
+             "function": "_pigweed_upstream_main"
+           }
+         }
+       },
+       "pw_env_setup": {
+         "root_variable": "<PROJNAME>_ROOT",
+         "rosetta": "allow",
+         "gni_file": "build_overrides/pigweed_environment.gni",
+         "cipd": {
+           "package_files": [
+             "tools/default.json"
+           ]
+         },
+         "virtualenv": {
+           "gn_args": [
+             "dir_pw_third_party_stm32cube=\"\""
+           ],
+           "gn_targets": [
+             "python.install"
+           ],
+           "gn_root": "."
+         },
+         "submodules": {
+           "optional": [
+             "vendor/shhh-secret"
+           ]
+         }
+       },
+       "pw_presubmit": {
+         "format": {
+           "python": {
+             "formatter": "black",
+             "black_path": "pyink"
+           }
+         }
+       }
+     }
+   }
 
 Some teams will resist a new file at the root of their checkout, but this seed
 won't be adding any files, it'll be combining at least one top-level file, maybe
diff --git a/seed/0107-communications.rst b/seed/0107-communications.rst
new file mode 100644
index 0000000..a79a130
--- /dev/null
+++ b/seed/0107-communications.rst
@@ -0,0 +1,638 @@
+.. _seed-0107:
+
+============================
+0107: Pigweed Communications
+============================
+.. seed::
+   :number: 107
+   :name: Communications
+   :status: Accepted
+   :proposal_date: 2023-07-19
+   :cl: 157090
+
+-------
+Summary
+-------
+Pigweed does not currently offer an end-to-end solution for network
+communications. This SEED proposes that Pigweed adopt a new sockets API as its
+primary networking abstraction. The sockets API will be backed by a new,
+lightweight embedded-focused network protocol stack, inspired by the Internet
+protocol suite (TCP/IP). It will also support full TCP/IP via an open source
+embedded TCP/IP stack or OS sockets. The new communications APIs will support
+asynchronous use and zero-copy transmission.
+
+This work is comprised of the following subareas:
+
+- `Sockets API`_
+- `Network protocol stack`_
+- `Async`_ API pattern
+- `Buffer management`_ system
+
+The Pigweed team will revisit :ref:`pw_rpc <module-pw_rpc>` after deploying the
+sockets API and network protocol stack.
+
+----------
+Background
+----------
+Pigweed's primary communications system is :ref:`pw_rpc <module-pw_rpc>`. pw_rpc
+makes it possible to call functions and exchange data with a remote system.
+Requests and responses are encoded as protobufs. pw_rpc was initially deployed
+to a system with its own network protocol stack, so the Pigweed team did not
+invest in building a network stack of its own.
+
+The TCP/IP model, as described in `RFC 1122
+<https://datatracker.ietf.org/doc/html/rfc1122>`_, organizes communications
+systems and protocols into four layers: Application, Transport, Internet (or
+Network), and Link (or Network access). Pigweed's current communications
+offerings fit into the TCP/IP model as follows:
+
++-----------------------+-----------------------------+
+| TCP/IP Model          | Pigweed Modules             |
++=======================+=============================+
+| Application           | | :ref:`module-pw_transfer` |
+|                       | | :ref:`module-pw_rpc`      |
++-----------------------+-----------------------------+
+| Transport             | | :ref:`module-pw_router`   |
++-----------------------+ | :ref:`module-pw_hdlc`     |
+| Internet / Network    |                             |
++-----------------------+                             |
+| Link / Network access |                             |
++-----------------------+-----------------------------+
+
+Notably, Pigweed provides little functionality below the application layer. The
+pw_router and pw_hdlc modules only implement a subset of features needed at
+their layer in the communications stack.
+
+Challenges deploying pw_rpc
+===========================
+pw_rpc is application-layer communications module. It relies on a network layer
+to send packets between endpoints and doesn't provide any networking features
+itself. When initially developing pw_rpc, the Pigweed team focused its limited
+resources solely on this application-layer feature, which made it possible to
+deploy pw_rpc quickly to systems with existing networks.
+
+pw_rpc has been deployed to many projects with great results. However, since
+Pigweed does not provide a network stack, deploying pw_rpc to systems without
+existing stacks can be challenging. These systems have to develop their own
+solutions to transmit and route pw_rpc packets.
+
+As an example, one project based its network communications on Pigweed's
+:ref:`module-pw_hdlc` module. It used HDLC in a way more similar to IP,
+providing network-level addressing and features like quality-of-service. Source
+and destination addresses and ports were packed into the HDLC address field to
+facilitate routing and multiplexing. The :ref:`module-pw_router` module was
+developed to support static routing tables for HDLC frames through nodes in the
+system, and the :ref:`pw_transfer RPC service <module-pw_transfer>` was
+developed to provide reliable delivery of data.
+
+Learning from custom network stacks
+-----------------------------------
+Teams want to use Pigweed to build cool devices. Their goal isn't to build a
+network protocol stack, but they need one to use features like pw_rpc and
+pw_transfer. Given this, teams have little incentive to make the enormous time
+investment to develop a robust, reusable network stack. The practical approach
+is to assemble the minimum viable network stack from what's available.
+
+The Pigweed team has seen a few teams create custom network stacks for pw_rpc.
+While these projects were successful, their network stacks were not their
+primary focus. As a result, they had some shortcomings, including the following:
+
+- **Byte stuffing memory overhead** -- HDLC is a low-level protocol. It uses
+  `byte stuffing
+  <https://en.wikipedia.org/wiki/High-Level_Data_Link_Control#Asynchronous_framing>`_
+  to ensure frame integrity across unreliable links. Byte stuffing makes sense
+  on the wire, but not in memory. Storing byte stuffed frames requires double
+  the memory to account for worst-case byte stuffing. Some projects use HDLC
+  frames as network layer packets, so they are buffered in memory for routing,
+  which requires more memory than necessary.
+- **HDLC protocol overhead** -- HDLC's frame recovery and integrity features are
+  not needed across all links. For example, these features are unnecessary for
+  Bluetooth. However, when projects use HDLC for both the network and link
+  layers, it has to be used across all links.
+- **pw_transfer at the application layer** -- :ref:`pw_transfer
+  <module-pw_transfer>` supports reliable data transfers with :ref:`pw_rpc
+  <module-pw_rpc>`. It required significant investment to develop, but since it
+  is layered on top of pw_rpc, it has additional overhead and limited
+  reusability.
+- **Custom routing** -- Some network nodes have multiple routes between them.
+  Projects have had to write custom, non-portable logic to handle routing.
+- **pw_rpc channel IDs in routing** -- Some projects used pw_rpc channel IDs as
+  a network addresses. Channel IDs were assigned for the whole network ahead of
+  time. This has several downsides:
+
+  - Requires nodes to have knowledge of the global channel ID assignments
+    and routes between them, which can be difficult to keep in sync.
+  - Implies that all traffic is pw_rpc packets.
+  - Requires decoding pw_rpc packets at lower levels of the network stack.
+  - Complicates runtime assignment of channel IDs.
+
+- **Flow control** -- Projects' communications stacks have not supported flow
+  control. The network layer simply has to drop packets it cannot process.
+  There is no mechanism to tell the producer to slow down or wait for the
+  receiver to be ready.
+- **Accounting for the MTU** -- HDLC and pw_rpc have variable overheads, so it
+  is difficult to know how much memory to allocate for RPC payloads. If packets
+  are not sized properly with respect to the maximum transmission unit (MTU),
+  packets may be silently dropped.
+
+Problem summary
+===============
+These are the key issues of Pigweed's communications offerings based on the
+team's experiences deploying pw_rpc.
+
+**No cohesive full stack solution**
+
+Pigweed only provides a handful of communications modules. They were not
+designed to work together, and there is not enough to assemble a functioning
+network stack. Some projects have to create bespoke network protocols with
+limited reusability.
+
+**Layering violations**
+
+pw_transfer runs on top of pw_rpc instead of the transport layer, which adds
+overhead and prevents its use independent of pw_rpc. Using pw_rpc channels for
+routing ties the network to pw_rpc. Projects often use pw_hdlc for multiple
+network layers, which brings the encoding's overhead higher up the stack and
+across links that do not need it.
+
+**Inefficiency**
+
+Reliable data transfer requires pw_transfer, which runs on top of pw_rpc. This
+adds additional overhead and requires more CPU-intensive decoding operations.
+Using pw_rpc channel IDs in lower layers of the network requires expensive
+varint decodes, even when the packets are bound for other nodes.
+
+**Missing features**
+
+Each project has to develop its own version of common features, including:
+
+- **Addressing** -- There are no standard addressing schemes available to
+  Pigweed users.
+- **Routing** -- Projects must implement their own logic for routing packets,
+  which can be complex.
+- **Flow control** -- There is no way for the receiver to signal that it is ready
+  for more data or that it cannot receive any more, either at the protocol or
+  API level anywhere in the stack. Flow control is a crucial feature for
+  realistic networks with limited resources.
+- **Connections** -- Connections ensure the recipient is listening to
+  transmissions, and detect when the other end is no longer communicating.
+  pw_transfer maintains a connection, but it sits atop pw_rpc, so cannot be used
+  elsewhere.
+- **Quality of service (QoS)** -- Projects have developed basic QoS features in
+  HDLC, but there is no support in upstream Pigweed. Every project has to
+  develop its own custom implementation.
+
+-----
+Goals
+-----
+This SEED proposes a new communications system for Pigweed with the following
+goals:
+
+- **Practical end-to-end solution** -- Pigweed provides a full suite of APIs
+  and protocols that support simple and complex networking use cases.
+- **Robust, stable, and reliable** -- Pigweed communications "just work", even
+  under high load. The networking stack is thoroughly tested in both single and
+  multithreaded environments, with functional, load, fuzz, and performance
+  testing. Projects can easily test their own deployments with Pigweed tooling.
+- **Cohesive, yet modular** -- The network stack is holistically designed, but
+  modular. It is organized into layers that can be exchanged and configured
+  independently. Layering simplifies the stack, decouples protocol
+  implementations, and maximizes flexibility within a cohesive system.
+- **Efficient & performant** -- Pigweed’s network stack minimizes code size and
+  CPU usage. It provides for high throughput, low latency data transmission.
+  Memory allocation is configurable and adaptable to a project’s needs.
+- **Usable & easy to learn** -- Pigweed’s communications systems are backed by
+  thorough and up-to-date documentation. Getting started is easy using
+  Pigweed's tutorials and examples.
+
+--------
+Proposal
+--------
+Pigweed will unify its communications systems under a common sockets API. This
+entails the following:
+
+- **Sockets API** -- Pigweed will introduce a `sockets
+  API`_ to serve as its common networking interface.
+- **Lightweight protocol stack** -- Pigweed will provide a custom,
+  :ref:`lightweight network protocol stack <seed-0107-network-stack>` inspired
+  by IPv6, with UDP, TCP, and SCTP-like transport protocols.
+- **TCP/IP integration** -- Pigweed will offer sockets implementations for OS
+  sockets and an existing `embedded TCP/IP stack`_.
+- **Async** -- Pigweed will establish a new pattern for `async`_ programming and
+  use it in its networking APIs.
+- **Zero copy** -- Pigweed will develop a new `buffer management`_ system to
+  enable zero-copy networking.
+
+These features fit fit into the TCP/IP model as follows:
+
++-------------------------------------+-------------------------------------+
+| TCP/IP Model                        | Future Pigweed Comms Stack          |
++=====================================+=====================================+
+| Application                         | | *Various modules including*       |
+|                                     | | *pw_rpc and pw_transfer.*         |
+|                                     |                                     |
+|                                     |                                     |
+|                                     |                                     |
++-------------------------------------+-------------------------------------+
+| .. rst-class:: pw-text-center-align | .. rst-class:: pw-text-center-align |
+|                                     |                                     |
+|    **OS Sockets**                   |    **Pigweed Sockets**              |
++-------------------------------------+-------------------------------------+
+| Transport                           | | UDP-like unreliable protocol      |
+|                                     | | TCP-like reliable protocol        |
+|                                     | | SCTP-like reliable protocol       |
++-------------------------------------+-------------------------------------+
+| Network / Internet                  | | IPv6-like protocol                |
++-------------------------------------+-------------------------------------+
+| Network access / Link               | | HDLC                              |
+|                                     | | others                            |
++-------------------------------------+-------------------------------------+
+
+Sockets API
+===========
+The new sockets API will become the primary networking abstraction in Pigweed.
+The API will support the following:
+
+- Creating sockets for bidirectional communications with other nodes in the
+  network.
+- Opening and closing connections for connection-oriented socket types.
+- Sending and receiving data, optionally :ref:`asynchronously
+  <seed-0107-async>`.
+- Reporting errors.
+
+The sockets API will support runtime polymorphism. In C++, it will be a virtual
+interface.
+
+**Rationale**
+
+A network socket represents a bidirectional communications channel with another
+node, which could be local or across the Internet. Network sockets form the API
+between an application and the network.
+
+Sockets are a proven, well-understood concept. Socket APIs such as Berkeley /
+POSIX sockets are familiar to anyone with Internet programming experience.
+
+Sockets APIs hide the details of the network protocol stack. A socket provides
+well-defined semantics for a communications channel, but applications do not
+need to know how data is sent and received. The same API can be used to exchange
+data with another process on the same machine or with a device across the world.
+
+.. admonition:: Sockets SEED
+
+   The Pigweed sockets API will be explored in an upcoming SEED.
+
+Socket types
+------------
+Pigweed's sockets API will support the following sockets types.
+
+.. list-table::
+   :header-rows: 1
+
+   * - Berkeley socket type
+     - Internet protocol
+     - Payload type
+     - Connection-oriented
+     - Guaranteed, ordered delivery
+     - Description
+   * - ``SOCK_DGRAM``
+     - UDP
+     - Datagram
+     - ❌
+     - ❌
+     - Unreliable datagram
+   * - ``SOCK_STREAM``
+     - TCP
+     - Byte stream
+     - ✅
+     - ✅
+     - Reliable byte stream
+   * - ``SOCK_SEQPACKET``
+     - SCTP
+     - Datagram
+     - ✅
+     - ✅
+     - Reliable datagram
+
+Raw sockets (``SOCK_RAW``) may be supported in the future if required.
+``SOCK_CONN_DGRAM`` (unreliable connection-oriented datagram) sockets are
+uncommon and will not be supported.
+
+The socket's semantics will be expressed in the sockets API, e.g. with a
+different interface or class for each type. Instances of the connection-oriented
+socket types will be generated from a "listener" object.
+
+Pigweed's sockets API will draw inspiration from modern type safe APIs like
+Rust's `std::net sockets <https://doc.rust-lang.org/std/net/index.html>`_,
+rather than traditional APIs like POSIX sockets or Winsock. Pigweed sockets will
+map trivially to these APIs and implementations will be provided upstream.
+
+Using the sockets API
+---------------------
+The Pigweed sockets API will provide the interface between applications and the
+network. Any application can open a socket to communicate across the network.
+A future revision of ``pw_rpc`` will use the sockets API in place of its current
+``Channel`` API.
+
+The sockets API will support both synchronous and :ref:`asynchonous
+<seed-0107-async>` use. The synchronous API may be built using the async API.
+It will also support :ref:`zero-copy <seed-0107-buffers>` data transmission.
+
+Addressing
+----------
+The Pigweed sockets API will be aware of addresses. Addresses are used to refer
+to nodes in a network, including the socket's own node. With TCP/IP, the socket
+address includes an IP address and a port number.
+
+The POSIX sockets API supports different domains through address family
+constants such as ``AF_INET``, ``AF_INET6``, and ``AF_UNIX``. Addresses in these
+families are specified or accessed in various socket operations. Because the
+address format is not specified by the API, working with addresses is not type
+safe.
+
+Pigweed sockets will approach addressing differently, but details are yet to be
+determined. Possible approaches include:
+
+- Use IPv6 addresses exclusively. Systems with other addressing schemes map
+  these into IPv6 for use with Pigweed APIs.
+- Provide a polymorphic address class so sockets can work with addresses
+  generically.
+- Avoid addresses in the base sockets API. Instead, use implementation specific
+  derived classes to access addresses.
+
+Network protocol stack
+======================
+The sockets API will be backed by a network protocol stack. Pigweed will provide
+sockets implementations for following network protocol stacks:
+
+* Third party embedded TCP/IP stack, most likely `lwIP
+  <https://savannah.nongnu.org/projects/lwip/>`_.
+* Operating system TCP/IP stack via POSIX sockets or `Winsock
+  <https://learn.microsoft.com/en-us/windows/win32/winsock/windows-sockets-start-page-2>`_.
+* Custom :ref:`lightweight network protocol stack <seed-0107-network-stack>`.
+
+Embedded TCP/IP stack
+---------------------
+Pigweed will provide a sockets implementation for an embedded TCP/IP stack such
+as `lwIP <https://savannah.nongnu.org/projects/lwip/>`_.
+
+The sockets integration will be structured to avoid unnecessary dependencies on
+network stack features. For example, if a system is using IPv6 exclusively, the
+integration won't require IPv4 support, and the TCP/IP stack can be configured
+without it.
+
+**Rationale**
+
+The Internet protocol suite, or TCP/IP, is informed by decades of research and
+practical experience. It is much more than IP, TCP, and UDP; it's an alphabet
+soup of protocols that address a myriad of use cases and challenges.
+Implementing a functional TCP/IP stack is no small task. At time of writing,
+lwIP has about as many lines of C as Pigweed has C++ (excluding tests).
+
+The Pigweed team does not plan to implement a full TCP/IP stack. This is a major
+undertaking, and there are already established open source embedded TCP/IP
+stacks. Projects needing the full power of TCP/IP can use an embedded stack like
+`lwIP <https://savannah.nongnu.org/projects/lwip/>`_.
+
+Choosing between embedded TCP/IP and :ref:`Pigweed's stack <seed-0107-network-stack>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+lwIP's `website <https://savannah.nongnu.org/projects/lwip/>`_ states that it
+requires tens of KB of RAM and about 40 KB of ROM. Using lwIP means using the
+same TCP/IP protocols that run the Internet. These protocols are feature rich,
+but have more overhead than is necessary for local communications within a small
+embedded system.
+
+Projects that can afford the resource requirements and protocol overhead of
+TCP/IP should use it. These projects can set up a local IPv4 or IPv6 network
+and use it for communications behind the Pigweed sockets API. Projects that
+cannot afford full TCP/IP can opt for Pigweed's :ref:`custom protocol stack
+<seed-0107-network-stack>`. Pigweed's custom stack will not have the depth of
+features and tooling of TCP/IP does, but will be sufficient for many systems.
+
+TCP/IP socket types
+^^^^^^^^^^^^^^^^^^^
+With an embedded TCP/IP stack, the Pigweed sockets API will be implemented as
+follows:
+
+- Unreliable datagram (``SOCK_DGRAM``) -- UDP
+- Reliable byte stream (``SOCK_STREAM``) -- TCP
+- Reliable datagram (``SOCK_SEQPACKET``) -- Lightweight framing over TCP. This
+  will be semantically similar to `SCTP
+  <https://datatracker.ietf.org/doc/html/rfc9260>`_, but integrations will not
+  use SCTP since it is not widely supported.
+
+.. _seed-0107-network-stack:
+
+Pigweed's custom network protocol stack
+---------------------------------------
+Pigweed will develop a custom, lightweight network protocol stack.
+
+This new protocol stack will be designed for small devices with relatively
+simple networks. It will scale to several interconnected cores that interface
+with a few external devices (e.g. over USB or Bluetooth). Depending on project
+requirements, it may or may not support dynamic network host configuration (e.g.
+DHCP or SLAAC).
+
+Pigweed's network protocol stack will be a strict subset of TCP/IP. This will
+include minimal, reduced overhead versions of UDP, TCP, and IPv6. Portions of
+other protocols such as ICMPv6 may be implemented as required.
+
+**Rationale**
+
+TCP/IP is too large and complex for some embedded systems. Systems for which
+TCP/IP is unnecessary can use Pigweed's lightweight embedded network protocol
+stack.
+
+Transport layer
+^^^^^^^^^^^^^^^
+Pigweed will provide transport layer protocols that implement the semantics of
+``SOCK_DGRAM``, ``SOCK_STREAM``, and ``SOCK_SEQPACKET``-like sockets.
+
+- ``SOCK_DRAM``-like sockets will be backed by a UDP-like protocol. This will
+  add source and destination ports to the IP-style packets for multiplexing on
+  top of the network layer.
+- ``SOCK_STREAM``-like sockets will be backed by a TCP-like protocol that uses
+  network layer packets to implement a reliable byte stream. It will be based on
+  TCP, but will not implement all of its features. The :ref:`module-pw_transfer`
+  module may serve as a starting point for the new protocol implementation.
+- ``SOCK_SEQPACKET``-like sockets will be implemented with a simple
+  message-oriented protocol on top of the TCP-like protocol. Applications like
+  pw_rpc will use ``SOCK_SEQPACKET`` sockets.
+
+Network layer
+^^^^^^^^^^^^^
+Pigweed will create a new network-layer protocol closely based on IPv6. Details
+are still to be determined, but the protocol is intended to be a strict subset
+of IPv6 and related protocols (e.g. ICMP, NDP) as needed. If a need arises, it
+is met by implementing the associated IP suite protocol. Packets will use
+compressed version of an IPv6 header (e.g. omit fields, use smaller addresses).
+
+This protocol will provide:
+
+- Unreliable packet delivery between source and destination.
+- Routing based on the source and destination addresses.
+- Quality of service (e.g. via the traffic class field).
+
+Packets may be routed at this layer independently of the link layer. Wire format
+details stay on the wire.
+
+Network access / link layer
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Pigweed's network stack will interact with the link layer through a generic
+interface. This will allow Pigweed to send network packets with any protocol
+over any physical interface.
+
+Pigweed already provides minimal support for one link layer protocol, HDLC.
+Other protocols (e.g. COBS, PPP) may be implemented. Some hardware interfaces
+(e.g. Bluetooth, USB) may not require an additional link-layer protocol.
+
+Language support
+----------------
+Pigweed today is primarily C++, but it supports Rust, C, Python, TypeScript, and
+Java to varying extents.
+
+Pigweed’s communications stack will be developed in either C++ or Rust to start,
+but it will be ported to all supported languages in time. The stack may have C
+APIs to facilitate interoperability between C++ and Rust.
+
+.. admonition:: Network protocol stack SEED
+
+   Pigweed's network protocol stack will be explored in an upcoming SEED.
+
+.. _seed-0107-async:
+
+Async
+=====
+Pigweed will develop a model for asynchronous programming and use it in its
+networking APIs, including sockets. Sockets will also support synchronous
+operations, but these may be implemented in terms of the asynchronous API.
+
+The Pigweed async model has not been designed yet. The :ref:`pw_async
+<module-pw_async>` module has a task dispatcher, but the pattern for async APIs
+has not been established. Further exploration is needed, but C++20 coroutines
+may be used for Pigweed async APIs where supported.
+
+**Rationale**
+
+Synchronous APIs require the thread to block while an operation completes. The
+thread and its resources cannot be used by the system until the task completes.
+Async APIs allow a single thread to handle multiple simultaneous tasks. The
+thread advances tasks until they need to wait for an external operation to
+complete, then switches to another task to avoid blocking.
+
+Threads are expensive in embedded systems. Each thread requires significant
+memory for its stack and kernel structures for bookkeeping. They occupy this
+memory all the time, even when they are not running. Furthermore, context
+switches between threads can take significant CPU time.
+
+Asynchronous programming avoids these downsides. Many asynchronous threads run
+on a single thread. Fewer threads are needed, and the resources of one thread
+are shared by multiple tasks. Since asynchronous systems run within one thread,
+no thread context switches occur.
+
+Networking involves many asynchronous tasks. For example, waiting for data to be
+sent through a network interface, for a connection request, or for data to
+arrive on one or more interfaces are all operations that benefit from
+asynchronous APIs. Network protocols themselves are heavily asynchronous.
+
+.. admonition:: Async SEED
+
+   Pigweed's async pattern will be explored in an upcoming SEED.
+
+.. _seed-0107-buffers:
+
+Buffer management
+=================
+Pigweed's networking APIs will support zero-copy data transmission. Applications
+will be able to request a buffer from a socket. When one is available, they fill
+it with data for transmission.
+
+Pigweed will develop a general purpose module for allocating and managing
+buffers. This will be used to implement zero-copy features for Pigweed's
+networking stack.
+
+As an example, zero-copy buffer allocation could work as follows:
+
+- The user requests a buffer from a socket.
+- The network protocol layer under the socket requests a buffer from the next
+  lower layer.
+- The bottom protocol layer allocates a buffer.
+- Each layer reserves part of the buffer for its headers or footers.
+- The remaining buffer is provided to the user to populate with their payload.
+- When the user is done, the buffer is released. Each layer of the network stack
+  processes the buffer as necessary.
+- Finally, at the lowest layer, the final buffer is sent over the hardware
+  interface.
+
+Zero-copy APIs will be :ref:`asynchronous <seed-0107-async>`.
+
+**Rationale**
+
+Networking involves transmitting large amounts of data. Copying network traffic
+can result in substantial CPU usage, particularly in nodes that route traffic to
+other nodes.
+
+A buffer management system that minimizes copying saves precious CPU cycles and
+power on constrained systems.
+
+.. admonition:: Buffer management SEED
+
+   Pigweed's buffer management system will be explored in an upcoming SEED.
+
+Vectored I/O
+------------
+Vectored or scatter/gather I/O allows users to serialize data from multiple
+buffers into a single output stream, or vice versa. For Pigweed's networking
+APIs, this could be used to, for example, store a packet header in one buffer
+and packet contents in one or more other buffers. These isolated chunks are
+serialized in order to the network interface.
+
+Vectored I/O minimizes copying, but is complex. Additionally, simple DMA engines
+may only operate on a single DMA buffer. Thus, vectored I/O could require
+either:
+
+- a copy into the DMA engine's buffer, which costs CPU time and memory, or
+- multiple, small DMAs, which involves extra interrupts and CPU time.
+
+Vectored I/O may be supported in Pigweed's communications stack, depending on
+project requirements.
+
+----------
+Next steps
+----------
+Pigweed's communications revamp will proceed loosely as follows:
+
+* Write SEEDs to explore existing solutions, distill requirements, and propose
+  new Pigweed features for these areas:
+
+  - Sockets API
+  - Async pattern
+  - Buffer management
+  - Network protocol stack
+
+* Implement the Sockets API.
+
+  - Document, integrate, and deploy the async programming pattern for Pigweed.
+  - Develop and test Pigweed's buffer management system.
+  - Use these features in the sockets API. If necessary, the synchronous,
+    copying API could be implemented first.
+
+* Deploy the sockets API for TCP/IP.
+
+  - Implement and unit test sockets for TCP/IP with POSIX and Winsock sockets.
+  - Implement and unit test sockets for an embedded TCP/IP stack.
+
+* Develop a test suite for Pigweed network communications.
+
+  - Create integration tests for networks with multiple nodes that cover basic
+    operation, high load, and packet loss.
+  - Write performance tests against the sockets API to measure network stack
+    performance.
+
+* Develop Pigweed's lightweight network protocol stack.
+
+  - Test the lightweight network protocol stack on hardware and in a simulated
+    environment.
+  - Write fuzz tests for the protocol stack.
+  - Write performance tests for the protocol stack.
+
+* Revisit other communications systems, including pw_rpc and pw_transfer.
diff --git a/seed/0108-pw_emu-emulators-frontend.rst b/seed/0108-pw_emu-emulators-frontend.rst
new file mode 100644
index 0000000..187410c
--- /dev/null
+++ b/seed/0108-pw_emu-emulators-frontend.rst
@@ -0,0 +1,685 @@
+.. role:: python(code)
+   :language: python
+   :class: highlight
+
+.. _seed-0108:
+
+========================
+0108: Emulators Frontend
+========================
+.. seed::
+   :number: 108
+   :name: Emulators Frontend
+   :status: Accepted
+   :proposal_date: 2023-06-24
+   :cl: 158190
+
+-------
+Summary
+-------
+This SEED proposes a new Pigweed module that allows users to define emulator
+targets, start, control and interact with multiple running emulator instances,
+either through a command line interface or programmatically through Python APIs.
+
+-----------
+Definitions
+-----------
+An **emulator** is a program that allows users to run unmodified images compiled
+for :ref:`target <docs-targets>` on the host machine. The **host** is the machine that
+runs the Pigweed environment.
+
+An emulated **machine** or **board** is an emulator abstraction that typically
+has a correspondence in the real world - a product, an evaluation board, a
+hardware platform.
+
+An emulated machine can be extended / tweaked through runtime configuration
+options: add sensors on an i2c bus, connect a disk drive to a disk controller,
+etc.
+
+An emulator may use an **object model**, a hierarchical arrangement of emulator
+**objects** which are emulated devices (e.g. SPI controller) or internal
+emulator structures.
+
+An object can be accessed through an **object path** and can have
+**properties**. Device properties controls how the device is emulated
+(e.g. enables or disables certain features, defines memory sizes, etc.).
+
+A **channel** is a communication abstraction between the emulator and host
+processes. Examples of channels that an emulator can expose to the host:
+
+* An emulated UART could be exposed on the host as a `PTY
+  <https://en.wikipedia.org/wiki/Pseudoterminal>`_ or a socket.
+
+* A flash device could be exposed on the host as file.
+
+* A network device could be exposed on the host as a tun/tap interface.
+
+* A remote gdb interface could be exposed to the host as socket.
+
+A **monitor** is a control channel that allows the user to interactively or
+programmatically control the emulator: pause execution, inspect the emulator
+internal state, connect new devices, etc.
+
+----------
+Motivation
+----------
+While it is already possible to use emulators directly, there is a significant
+learning curve for using a specific emulator. Even for the same emulator each
+emulated machine (board) has its own peculiarities and it often requires tweaks
+to customize it to a specific project's needs through command line options or
+scripts (either native emulator scripts, if supported, or through helper shell
+scripts).
+
+Once started, the user is responsible for managing the emulator life-cycle,
+potentially for multiple instances. They also have to interact with it through
+various channels (monitor, debugger, serial ports) that requires some level of
+host resource management. Especially in the case of using multiple emulator
+instances manually managing host resources are burdensome.
+
+A frequent example is the default debugger ``localhost:1234`` port that can
+conflict with multiple emulator instances or with other debuggers running on the
+host. Another example: serials exposed over PTY have the pts number in
+``/dev/pts/`` allocated dynamically and it requires the user to retrieve it
+somehow.
+
+This gets even more complex when using different operating systems where some
+type of host resources are not available (e.g. no PTYs on Windows) or with
+limited functionality (e.g. UNIX sockets are supported on Windows > Win10 but
+only for stream sockets and there is no Python support available yet).
+
+Using emulators in CI is also difficult, in part because host resource
+management is getting more complicated due scaling (e.g. more chances of TCP
+port conflicts) and restrictions in the execution environment. But also because
+of a lack of high level APIs to control emulators and access their channels.
+
+--------
+Proposal
+--------
+Add a new Pigweed module that:
+
+* Allows users to define emulation :ref:`targets <docs-targets>` that
+  encapsulate the emulated machine configuration, the tools configuration and
+  the host channels configuration.
+
+* Provides a command line interface that manages multiple emulator instances and
+  provides interactive access to the emulator's host channels.
+
+* Provides a Python API to control emulator instances and access the emulator's
+  host channels.
+
+* Supports multiple emulators, QEMU and renode as a starting point.
+
+* Expose channels for gdb, monitor and user selected devices through
+  configurable host resources, sockets and PTYs as a starting point.
+
+The following sections will add more details about the configuration, the
+command line interface, the API for controlling and accessing emulators and the
+API for adding support for more emulators.
+
+
+Configuration
+=============
+The emulators configuration is part of the Pigweed root configuration file
+(``pigweed.json``) and reside in the ``pw:pw_emu`` namespace.
+
+Projects can define emulation targets in the Pigweed root configuration file and
+can also import predefined targets from other files. The pw_emu module provides
+a set of targets as examples and to promote reusability.
+
+For example, the following top level ``pigweed.json`` configuration includes a
+target fragment from the ``pw_emu/qemu-lm3s6965evb.json`` file:
+
+.. code-block::
+
+   {
+     "pw": {
+       "pw_emu": {
+         "target_files": [
+           "pw_emu/qemu-lm3s6965evb.json"
+         ]
+       }
+     }
+   }
+
+
+``pw_emu/qemu-lm3s6965evb.json`` defines the ``qemu-lm3s6965evb`` target
+that uses qemu as the emulator and lm3s6965evb as the machine, with the
+``serial0`` chardev exposed as ``serial0``:
+
+.. code-block::
+
+   {
+     "targets": {
+       "qemu-lm3s6965evb": {
+         "gdb": "arm-none-eabi-gdb",
+         "qemu": {
+           "executable": "qemu-system-arm",
+           "machine": "lm3s6965evb",
+           "channels": {
+             "chardevs": {
+               "serial0": {
+                 "id": "serial0"
+               }
+             }
+           }
+         }
+       }
+     }
+   }
+
+This target emulates a stm32f405 SoC and is compatible with the
+:ref:`target-lm3s6965evb-qemu` Pigweed build target.
+
+The configuration defines a ``serial0`` channel to be the QEMU **chardev** with
+the ``serial0`` id. The default type of the channel is used, which is TCP and
+which is supported by all platforms. The user can change the type by adding a
+``type`` key set to the desired type (e.g. ``pty``).
+
+The following configuration fragment defines a target that uses renode:
+
+.. code-block::
+
+   {
+     "targets": {
+       "renode-stm32f4_discovery": {
+         "gdb": "arm-none-eabi-gdb",
+         "renode": {
+           "executable": "renode",
+           "machine": "platforms/boards/stm32f4_discovery-kit.repl",
+           "channels": {
+             "terminals": {
+               "serial0": {
+                 "device-path": "sysbus.uart0",
+                 "type": "pty"
+               }
+             }
+           }
+         }
+       }
+     }
+   }
+
+Note that ``machine`` is used to identify which renode script to use to load the
+plaform description from and ``terminals`` to define which UART devices to
+expose to the host. Also note the ``serial0`` channel is set to be exposed as a
+PTY on the host.
+
+The following channel types are currently supported:
+
+* ``pty``: supported on Mac and Linux; renode only supports PTYs for
+  ``terminals`` channels.
+
+* ``tcp``: supported on all platforms and for all channels; it is also the
+  default type if no channel type is configured.
+
+The channel configuration can be set at multiple levels: emulator, target, or
+specific channel. The channel configuration takes precedence, then the target
+channel configuration then the emulator channel configuration.
+
+The following expressions are replaced in the configuration strings:
+
+* ``$pw_emu_wdir{relative-path}``: replaces statement with an absolute path by
+  concatenating the emulator's working directory with the given relative path.
+
+* ``$pw_emu_channel_port{channel-name}``: replaces the statement with the port
+  number for the given channel name; the channel type should be ``tcp``.
+
+* ``$pw_emu_channel_host{channel-name}``: replaces the statement with the host
+  for the given channel name; the channel type should be ``tcp``.
+
+* ``$pw_emu_channel_path{channel-name}``: replaces the statement with the path
+  for the given channel name; the channel type should be ``pty``.
+
+Besides running QEMU and renode as the main emulator, the target configuration
+allows users to start other programs before or after starting the main emulator
+process. This allows extending the emulated target with simulation or emulation
+outside of the main emulator. For example, for BLE emulation the main emulator
+could emulate just the serial port while the HCI emulation done is in an
+external program (e.g. `bumble <https://google.github.io/bumble>`_, `netsim
+<https://android.googlesource.com/platform/tools/netsim>`_).
+
+
+Command line interface
+======================
+The command line interfaces enables users to control emulator instances and
+access their channels interactively.
+
+.. code-block:: text
+
+   usage: pw emu [-h] [-i STRING] [-w WDIR] {command} ...
+
+   Pigweed Emulators Frontend
+
+    start               Launch the emulator and start executing, unless pause
+                        is set.
+    restart             Restart the emulator and start executing, unless pause
+                        is set.
+    run                 Start the emulator and connect the terminal to a
+                        channel. Stop the emulator when exiting the terminal
+    stop                Stop the emulator
+    load                Load an executable image via gdb. If pause is not set
+                        start executing it.
+    reset               Perform a software reset.
+    gdb                 Start a gdb interactive session
+    prop-ls             List emulator object properties.
+    prop-get            Show the emulator's object properties.
+    prop-set            Show emulator's object properties.
+    gdb-cmds            Run gdb commands in batch mode.
+    term                Connect with an interactive terminal to an emulator
+                        channel
+
+   optional arguments:
+    -h, --help            show this help message and exit
+    -i STRING, --instance STRING
+                          instance to use (default: default)
+    -w WDIR, --wdir WDIR  path to working directory (default: None)
+
+   commands usage:
+       usage: pw emu start [-h] [--file FILE] [--runner {None,qemu,renode}]
+                     [--args ARGS] [--pause] [--debug] [--foreground]
+                           {qemu-lm3s6965evb,qemu-stm32vldiscovery,qemu-netduinoplus2}
+        usage: pw emu restart [-h] [--file FILE] [--runner {None,qemu,renode}]
+                      [--args ARGS] [--pause] [--debug] [--foreground]
+                      {qemu-lm3s6965evb,qemu-stm32vldiscovery,qemu-netduinoplus2}
+        usage: pw emu stop [-h]
+        usage: pw emu run [-h] [--args ARGS] [--channel CHANNEL]
+                      {qemu-lm3s6965evb,qemu-stm32vldiscovery,qemu-netduinoplus2} FILE
+        usage: pw emu load [-h] [--pause] FILE
+        usage: pw emu reset [-h]
+        usage: pw emu gdb [-h] [--executable FILE]
+        usage: pw emu prop-ls [-h] path
+        usage: pw emu prop-get [-h] path property
+        usage: pw emu prop-set [-h] path property value
+        usage: pw emu gdb-cmds [-h] [--pause] [--executable FILE] gdb-command [gdb-command ...]
+        usage: pw emu term [-h] channel
+
+For example, the ``run`` command is useful for quickly running ELF binaries on an
+emulated target and seeing / interacting with a serial channel. It starts an
+emulator, loads an images, connects to a channel and starts executing.
+
+.. code-block::
+
+   $ pw emu run qemu-netduinoplus2 out/stm32f429i_disc1_debug/obj/pw_snapshot/test/cpp_compile_test.elf
+
+   --- Miniterm on serial0 ---
+   --- Quit: Ctrl+] | Menu: Ctrl+T | Help: Ctrl+T followed by Ctrl+H ---
+   INF  [==========] Running all tests.
+   INF  [ RUN      ] Status.CompileTest
+   INF  [       OK ] Status.CompileTest
+   INF  [==========] Done running all tests.
+   INF  [  PASSED  ] 1 test(s).
+   --- exit ---
+
+Multiple emulator instances can be run and each emulator instance is identified
+by its working directory. The default working directory for ``pw emu`` is
+``$PW_PROJECT_ROOT/.pw_emu/<instance-id>`` where ``<instance-id>`` is a command
+line option that defaults to ``default``.
+
+For more complex usage patterns, the ``start`` command can be used which will
+launch an emulator instance in the background. Then, the user can debug the
+image with the ``gdb`` command, connect to a channel (e.g. serial port) with the
+``term`` command, reset the emulator with the ``reset`` command, inspect or
+change emulator properties with the ``prop-ls``, ``prop-get``, ``prop-set`` and
+finally stop the emulator instance with ``stop``.
+
+
+Python APIs
+===========
+The pw_emu module offers Python APIs to launch, control and interact with an
+emulator instance.
+
+The following is an example of using these APIs which implements a simplified
+version of the ``run`` pw_emu CLI command:
+
+.. code-block:: python
+
+   # start an emulator instance and load the image to execute
+   # pause the emulator after loading the image
+   emu = Emulator(args.wdir)
+   emu.start(args.target, args.file, pause=True)
+
+   # determine the channel type and create a pyserial compatible URL
+   chan_type = emu.get_channel_type(args.chan)
+   if chan_type == 'tcp':
+       host, port = emu.get_channel_addr(chan)
+       url = f'socket://{host}:{port}'
+    elif chan_type == 'pty':
+        url =  emu.get_channel_path(chan)
+    else:
+        raise Error(f'unknown channel type `{chan_type}`')
+
+   # open the serial port and create a miniterm instance
+   serial = serial_for_url(url)
+   serial.timeout = 1
+   miniterm = Miniterm(serial)
+   miniterm.raw = True
+   miniterm.set_tx_encoding('UTF-8')
+   miniterm.set_rx_encoding('UTF-8')
+
+   # now that we are connected to the channel we can unpause
+   # this approach will prevent and data loses
+   emu.cont()
+
+   miniterm.start()
+   try:
+       miniterm.join(True)
+   except KeyBoardInterrupt:
+       pass
+   miniterm.stop()
+   miniterm.join()
+   miniterm.close()
+
+For convenience, a ``TemporaryEmulator`` class is also provided.
+
+It manages emulator instances that run in temporary working directories. The
+emulator instance is stopped and the working directory is cleared when the with
+block completes.
+
+It also supports interoperability with the pw emu cli, i.e.  starting the
+emulator with the CLI and controlling / interacting with it from the API.
+
+Usage example:
+
+.. code-block:: python
+
+   # programmatically start and load an executable then access it
+   with TemporaryEmulator() as emu:
+       emu.start(target, file)
+       with emu.get_channel_stream(chan) as stream:
+           ...
+
+
+    # or start it form the command line then access it programmatically
+    with TemporaryEmulator() as emu:
+        build.bazel(
+            ctx,
+            "run",
+            exec_path,
+            "--run_under=pw emu start <target> --file "
+        )
+
+        with emu.get_channel_stream(chan) as stream:
+            ...
+
+
+Intended API shape
+------------------
+This is not an API reference, just an example of the probable shape of the final
+API.
+
+:python:`class Emulator` is used to launch, control and interact with an
+emulator instance:
+
+.. code-block:: python
+
+   def start(
+       self,
+       target: str,
+       file: Optional[os.PathLike] = None,
+       pause: bool = False,
+       debug: bool = False,
+       foreground: bool = False,
+       args: Optional[str] = None,
+   ) -> None:
+
+|nbsp|
+   Start the emulator for the given target.
+
+   If file is set that the emulator will load the file before starting.
+
+   If pause is True the emulator is paused until the debugger is connected.
+
+   If debug is True the emulator is run in foreground with debug output
+   enabled. This is useful for seeing errors, traces, etc.
+
+   If foreground is True the emulator is run in foreground otherwise it is
+   started in daemon mode. This is useful when there is another process
+   controlling the emulator's life cycle (e.g. cuttlefish)
+
+   args are passed directly to the emulator
+
+:python:`def running(self) -> bool:`
+   Check if the main emulator process is already running.
+
+:python:`def stop(self) -> None`
+   Stop the emulator
+
+:python:`def get_gdb_remote(self) -> str:`
+   Return a string that can be passed to the target remote gdb command.
+
+:python:`def get_gdb(self) -> Optional[str]:`
+   Returns the gdb command for current target.
+
+.. code-block:: python
+
+   def run_gdb_cmds(
+       commands : List[str],
+       executable: Optional[Path] = None,
+       pause: bool = False
+   ) -> subprocess.CompletedProcess:
+
+|nbsp|
+   Connect to the target and run the given commands silently
+   in batch mode.
+
+   The executable is optional but it may be required by some gdb
+   commands.
+
+   If pause is set do not continue execution after running the
+   given commands.
+
+:python:`def reset() -> None`
+   Performs a software reset
+
+:python:`def list_properties(self, path: str) -> List[Dict]`
+   Returns the property list for an emulator object.
+
+   The object is identified by a full path. The path is target specific and
+   the format of the path is emulator specific.
+
+   QEMU path example: /machine/unattached/device[10]
+
+   renode path example: sysbus.uart
+
+:python:`def set_property(path: str, prop: str, value: str) -> None`
+   Sets the value of an emulator's object property.
+
+:python:`def get_property(self, path: str, prop: str) -> None`
+   Returns the value of an emulator's object property.
+
+:python:`def get_channel_type(self, name: str) -> str`
+   Returns the channel type.
+
+   Currently ``pty`` or ``tcp`` are the only supported types.
+
+:python:`def get_channel_path(self, name: str) -> str:`
+   Returns the channel path. Raises InvalidChannelType if this is not a PTY
+   channel.
+
+:python:`def get_channel_addr(self, name: str) -> tuple:`
+   Returns a pair of (host, port) for the channel. Raises InvalidChannelType
+   if this is not a tcp channel.
+
+.. code-block:: python
+
+   def get_channel_stream(
+       name: str,
+       timeout: Optional[float] = None
+   ) -> io.RawIOBase:
+
+|nbsp|
+   Returns a file object for a given host exposed device.
+
+   If timeout is None than reads and writes are blocking. If timeout is zero the
+   stream is operating in non-blocking mode. Otherwise read and write will
+   timeout after the given value.
+
+:python:`def get_channels(self) -> List[str]:`
+   Returns the list of available channels.
+
+:python:`def cont(self) -> None:`
+   Resume the emulator's execution
+
+---------------------
+Problem investigation
+---------------------
+Pigweed is missing a tool for basic emulators control and as shown in the
+motivation section directly using emulators directly is difficult.
+
+While emulation is not a goal for every project, it is appealing for some due
+to the low cost and scalability. Offering a customizable emulators frontend in
+Pigweed will make this even better for downstream projects as the investment to
+get started with emulation will be lower - significantly lower for projects
+looking for basic usage.
+
+There are two main use-cases that this proposal is addressing:
+
+* Easier and robust interactive debugging and testing on emulators.
+
+* Basic APIs for controlling and accessing emulators to help with emulator
+  based testing (and trivial CI deployment - as long as the Pigweed bootstrap
+  process can run in CI).
+
+The proposal focuses on a set of fairly small number of commands and APIs in
+order to minimize complexity and gather feedback from users before adding more
+features.
+
+Since the state of emulated boards may different between emulators, to enable
+users access to more emulated targets, the goal of the module is to support
+multiple emulators from the start.
+
+Two emulators were selected for the initial implementation: QEMU and
+renode. Both run on all Pigweed currently supported hosts (Linux, Mac, Windows)
+and there is good overlap in terms of APIs to configure, start, control and
+access host exposed channels to start with the two for the initial
+implementation. These emulators also have good support for embedded targets
+(with QEMU more focused on MMU class machines and renode fully focused on
+microcontrollers) and are widely used in this space for emulation purposes.
+
+
+Prior art
+=========
+While there are several emulators frontends available, their main focus is on
+graphical interfaces (`aqemu <https://sourceforge.net/projects/aqemu/>`_,
+`GNOME Boxes <https://wiki.gnome.org/Apps/Boxes>`_,
+`QtEmu <https://gitlab.com/qtemu/gui>`_,
+`qt-virt-manager <https://f1ash.github.io/qt-virt-manager/>`_,
+`virt-manager <https://virt-manager.org/>`_) and virtualization (
+`virsh <https://www.libvirt.org/>`_,
+`guestfish <https://libguestfs.org/>`_).
+`qemu-init <https://github.com/mm1ke/qemu-init>`_ is a qemu CLI frontend but since
+it is written in bash it does not work on Windows nor is easy to retrofit it to
+add Python APIs for automation.
+
+.. inclusive-language: disable
+
+The QEMU project has a few `Python modules
+<https://github.com/qemu/qemu/tree/master/python/qemu>`_ that are used
+internally for testing and debugging QEMU. :python:`qemu.machine.QEMUMachine`
+implements a QEMU frontend that can start a QEMU process and can interact with
+it. However, it is clearly marked for internal use only, it is not published on
+pypi or with the QEMU binaries. It is also not as configurable for pw_emu's
+use-cases (e.g. does not support running the QEMU process in the background,
+does not multiple serial ports, does not support configuring how to expose the
+serial port, etc.). The :python:`qemu.qmp` module is `published on pypi
+<https://pypi.org/project/qemu.qmp/>`_ and can be potentially used by `pw_emu`
+to interact with the emulator over the QMP channel.
+
+.. inclusive-language: enable
+
+---------------
+Detailed design
+---------------
+The implementation supports QEMU and renode as emulators and works on
+Linux, Mac and Windows.
+
+Multiple instances are supported in order to enable developers who work on
+multiple downstream Pigweed projects to work unhindered and also to run
+multiple test instances in parallel on the same machine.
+
+Each instance is identified by a system absolute path that is also used to store
+state about the running instance such as pid files for running processes,
+current emulator and target, etc. This directory also contains information about
+how to access the emulator channels (e.g. socket ports, PTY paths)
+
+.. mermaid::
+
+   graph TD;
+       TemporaryEmulator & pw_emu_cli[pw emu cli] <--> Emulator
+       Emulator <--> Launcher & Connector
+       Launcher  <--> Handles
+       Connector <--- Handles
+       Launcher <--> Config
+       Handles --Save--> WD --Load--> Handles
+       WD[Working Directory]
+
+The implementation uses the following classes:
+
+* :py:class:`pw_emu.Emulator`: the user visible APIs
+
+* :py:class:`pw_emu.core.Launcher`: an abstract class that starts an emulator
+  instance for a given configuration and target
+
+* :py:class:`pw_emu.core.Connector`: an abstract class that is the interface
+  between a running emulator and the user visible APIs
+
+* :py:class:`pw_emu.core.Handles`: class that stores specific information about
+  a running emulator instance such as ports to reach emulator channels; it is
+  populated by :py:class:`pw_emu.core.Launcher` and saved in the working
+  directory and used by :py:class:`pw_emu.core.Connector` to access the emulator
+  channels, process pids, etc.
+
+* :py:class:`pw_emu.core.Config`: loads the pw_emu configuration and provides
+  helper methods to get and validate configuration options
+
+
+Documentation update
+====================
+The following documentation should probably be updated to use ``pw emu`` instead
+of direct QEMU invocation: :ref:`module-pw_rust`,
+:ref:`target-lm3s6965evb-qemu`. The referenced QEMU targets are defined in
+fragment configuration files in the pw_emu module and included in the top level
+pigweed.json file.
+
+------------
+Alternatives
+------------
+UNIX sockets were investigated as an alternative to TCP for the host exposed
+channels. UNIX sockets main advantages over TCP is that it does not require
+dynamic port allocation which simplifies the bootstrap of the emulator (no need
+to query the emulator to determine which ports were allocated). Unfortunately,
+while Windows supports UNIX sockets since Win10, Python still does not support
+them on win32 platforms. renode also does not support UNIX sockets.
+
+--------------
+Open questions
+--------------
+
+Renode dynamic ports
+====================
+While renode allows passing 0 for ports to allocate a dynamic port, it does not
+have APIs to retrieve the allocated port. Until support for such a feature is
+added upstream, the following technique can be used to allocate a port
+dynamically:
+
+.. code-block:: python
+
+   sock = socket.socket(socket.SOCK_INET, socket.SOCK_STREAM)
+   sock.bind(('', 0))
+   _, port = socket.getsockname()
+   sock.close()
+
+There is a race condition that allows another program to fetch the same port,
+but it should work in most light use cases until the issue is properly resolved
+upstream.
+
+qemu_gcc target
+===============
+It should still be possible to call QEMU directly as described in
+:ref:`target-lm3s6965evb-qemu` however, once ``pw_emu`` is implemented it is
+probably better to define a lm3s6965evb emulation target and update the
+documentation to use ``pw emu`` instead of the direct QEMU invocation.
+
+
+.. |nbsp| unicode:: 0xA0
+   :trim:
diff --git a/seed/BUILD.gn b/seed/BUILD.gn
index 6a5158f..9853e34 100644
--- a/seed/BUILD.gn
+++ b/seed/BUILD.gn
@@ -23,6 +23,8 @@
     ":0002",
     ":0101",
     ":0102",
+    ":0107",
+    ":0108",
   ]
 }
 
@@ -42,3 +44,11 @@
 pw_doc_group("0102") {
   sources = [ "0102-module-docs.rst" ]
 }
+
+pw_doc_group("0107") {
+  sources = [ "0107-communications.rst" ]
+}
+
+pw_doc_group("0108") {
+  sources = [ "0108-pw_emu-emulators-frontend.rst" ]
+}
diff --git a/targets/android/target_docs.rst b/targets/android/target_docs.rst
index cb6be04..371a7c1 100644
--- a/targets/android/target_docs.rst
+++ b/targets/android/target_docs.rst
@@ -25,20 +25,20 @@
 To build for this Pigweed target, simply build the top-level "android" Ninja
 target. You can set Pigweed build options using ``gn args out`` or by running:
 
-.. code:: sh
+.. code-block:: sh
 
   gn gen out --args='
     pw_android_toolchain_NDK_PATH="/path/to/android/ndk"'
 
 On a Windows machine it's easier to run:
 
-.. code:: sh
+.. code-block:: sh
 
   gn args out
 
 That will open a text file where you can paste the args in:
 
-.. code:: text
+.. code-block:: text
 
   pw_android_toolchain_NDK_PATH = "/path/to/android/ndk"
 
@@ -46,7 +46,7 @@
 
 Then build with:
 
-.. code:: sh
+.. code-block:: sh
 
   ninja -C out android
 
@@ -55,12 +55,12 @@
 
 To build for a specific CPU target only, at the default optimization level:
 
-.. code:: sh
+.. code-block:: sh
 
   ninja -C out arm64_android
 
 Or to build for a specific CPU target and optimization level:
 
-.. code:: sh
+.. code-block:: sh
 
   ninja -C out arm64_android_size_optimized
diff --git a/targets/apollo4/BUILD.bazel b/targets/apollo4/BUILD.bazel
new file mode 100644
index 0000000..5f65670
--- /dev/null
+++ b/targets/apollo4/BUILD.bazel
@@ -0,0 +1,27 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+# This is just a stub to silence warnings saying that files are
+# missing from the bazel build.
+filegroup(
+    name = "apollo4_files",
+    srcs = [
+        "boot.cc",
+        "vector_table.c",
+    ],
+)
diff --git a/targets/apollo4/BUILD.gn b/targets/apollo4/BUILD.gn
new file mode 100644
index 0000000..fada563
--- /dev/null
+++ b/targets/apollo4/BUILD.gn
@@ -0,0 +1,107 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pigweed/third_party/ambiq/ambiq.gni")
+import("$dir_pw_build/error.gni")
+import("$dir_pw_build/linker_script.gni")
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_cpu_exception/backend.gni")
+import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_malloc/backend.gni")
+import("$dir_pw_system/backend.gni")
+import("$dir_pw_toolchain/generate_toolchain.gni")
+import("target_toolchains.gni")
+
+generate_toolchains("target_toolchains") {
+  toolchains = pw_target_toolchain_apollo4_list
+}
+
+pw_build_assert("check_ambiq_product_defined") {
+  condition = pw_third_party_ambiq_PRODUCT == "apollo4p" ||
+              pw_third_party_ambiq_PRODUCT == "apollo4b"
+  message = "Build argument pw_third_party_ambiq_PRODUCT must be one of " +
+            "the following values: 'apollo4p' or 'apollo4b'."
+  visibility = [ ":*" ]
+}
+
+config("disable_warnings") {
+  cflags = [ "-Wno-undef" ]
+  visibility = [ ":*" ]
+}
+
+config("pw_malloc_active") {
+  if (pw_malloc_BACKEND != "") {
+    defines = [ "PW_MALLOC_ACTIVE=1" ]
+  } else {
+    defines = [ "PW_MALLOC_ACTIVE=0" ]
+  }
+}
+
+config("pw_system_active") {
+  if (pw_system_TARGET_HOOKS_BACKEND != "") {
+    ldflags = [
+      "-Wl,--defsym=SysTick_Handler=xPortSysTickHandler",
+      "-Wl,--defsym=PendSV_Handler=xPortPendSVHandler",
+      "-Wl,--defsym=SVC_Handler=vPortSVCHandler",
+    ]
+  }
+}
+
+config("pw_cpu_exception_active") {
+  if (pw_cpu_exception_ENTRY_BACKEND != "") {
+    ldflags = [
+      "-Wl,--defsym=HardFault_Handler=pw_cpu_exception_Entry",
+      "-Wl,--defsym=MemManage_Handler=pw_cpu_exception_Entry",
+      "-Wl,--defsym=BusFault_Handler=pw_cpu_exception_Entry",
+      "-Wl,--defsym=UsageFault_Handler=pw_cpu_exception_Entry",
+      "-Wl,--defsym=NMI_Handler=pw_cpu_exception_Entry",
+    ]
+  }
+}
+
+if (current_toolchain != default_toolchain) {
+  pw_source_set("boot") {
+    public_configs = [ ":pw_malloc_active" ]
+
+    all_dependent_configs = [
+      ":pw_system_active",
+      ":pw_cpu_exception_active",
+    ]
+
+    public_deps = [ "$dir_pw_third_party/ambiq:sdk" ]
+
+    deps = [
+      ":check_ambiq_product_defined",
+      "$dir_pw_boot",
+      "$dir_pw_boot_cortex_m",
+      "$dir_pw_preprocessor",
+      "$dir_pw_sys_io_ambiq_sdk",
+    ]
+
+    if (pw_malloc_BACKEND != "") {
+      deps += [ "$dir_pw_malloc" ]
+    }
+
+    sources = [
+      "boot.cc",
+      "vector_table.c",
+    ]
+  }
+}
+
+pw_doc_group("target_docs") {
+  sources = [ "target_docs.rst" ]
+}
diff --git a/targets/apollo4/OWNERS b/targets/apollo4/OWNERS
new file mode 100644
index 0000000..0eb639f
--- /dev/null
+++ b/targets/apollo4/OWNERS
@@ -0,0 +1 @@
+elizarovv@google.com
diff --git a/targets/apollo4/apollo4_executable.gni b/targets/apollo4/apollo4_executable.gni
new file mode 100644
index 0000000..49878f2
--- /dev/null
+++ b/targets/apollo4/apollo4_executable.gni
@@ -0,0 +1,33 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+import("$dir_pw_malloc/backend.gni")
+
+# Executable wrapper that includes some baremetal startup code.
+template("apollo4_executable") {
+  target("executable", target_name) {
+    forward_variables_from(invoker, "*")
+    if (!defined(deps)) {
+      deps = []
+    }
+    deps += [ "$dir_pigweed/targets/apollo4:boot" ]
+    if (pw_malloc_BACKEND != "") {
+      if (!defined(configs)) {
+        configs = []
+      }
+      configs += [ "$dir_pw_malloc:pw_malloc_wrapper_config" ]
+    }
+  }
+}
diff --git a/targets/apollo4/boot.cc b/targets/apollo4/boot.cc
new file mode 100644
index 0000000..5072f69
--- /dev/null
+++ b/targets/apollo4/boot.cc
@@ -0,0 +1,37 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_boot_cortex_m/boot.h"
+
+#include "pw_malloc/malloc.h"
+#include "pw_sys_io_ambiq_sdk/init.h"
+
+PW_EXTERN_C void pw_boot_PreStaticMemoryInit() {}
+
+PW_EXTERN_C void pw_boot_PreStaticConstructorInit() {
+  if constexpr (PW_MALLOC_ACTIVE == 1) {
+    pw_MallocInit(&pw_boot_heap_low_addr, &pw_boot_heap_high_addr);
+  }
+
+  pw_sys_io_Init();
+}
+
+PW_EXTERN_C void pw_boot_PreMainInit() {}
+
+PW_EXTERN_C PW_NO_RETURN void pw_boot_PostMain() {
+  // In case main() returns, just sit here until the device is reset.
+  while (true) {
+  }
+  PW_UNREACHABLE;
+}
diff --git a/targets/apollo4/target_docs.rst b/targets/apollo4/target_docs.rst
new file mode 100644
index 0000000..7462be0
--- /dev/null
+++ b/targets/apollo4/target_docs.rst
@@ -0,0 +1,165 @@
+.. _target-apollo4:
+
+===========================
+Ambiq Apollo4
+===========================
+-----
+Setup
+-----
+To use this target, Pigweed must be set up to use the `AmbiqSuite SDK`_ HAL
+for the Apollo4 series which can be downloaded from the Ambiq website.
+
+.. _AmbiqSuite SDK: https://ambiq.com/apollo4-blue-plus
+
+Once the AmbiqSuite SDK package has been downloaded and extracted, the user
+needs to set ``dir_pw_third_party_ambiq_SDK`` build arg to the location of
+extracted directory:
+
+.. code-block:: sh
+
+  $ gn args out
+
+Then add the following lines to that text file:
+
+.. code-block::
+
+   # Path to the extracted AmbiqSuite SDK package.
+   dir_pw_third_party_ambiq_SDK = "/path/to/AmbiqSuite_R4.3.0"
+
+Usage
+=====
+The Apollo4 is configured to output logs and test results over the UART to an
+on-board J-Link Debug Probe (Virtual COM Port) at a baud rate of 115200.
+
+Once the AmbiqSuite SDK is configured, the unit tests for the Apollo4 board
+can be build with a command:
+
+.. code-block:: sh
+
+  ninja -C out apollo4
+
+If using out as a build directory, tests will be located in out/apollo4/obj/[module name]/[test_name].elf.
+
+Flashing using SEGGER's J-Link
+==============================
+Flashing the Apollo4 board can be done using the J-Flash Lite GUI program or from
+a command line using ``JLinkExe`` program. The latter requires a script which
+describes the steps of programming. Here is an example bash script to flash
+an Apollo4 board using ``JLinkExe`` program:
+
+.. code-block:: sh
+
+  #!/bin/bash
+  function flash_jlink()
+  {
+     local TMP_FLASH_SCRIPT=/tmp/gdb-flash.txt
+
+     cat > $TMP_FLASH_SCRIPT <<- EOF
+        r
+        h
+        loadfile $1
+        r
+        q
+     EOF
+
+     JLinkExe -NoGui 1 -device AMAP42KK-KBR -if SWD -speed 4000 -autoconnect 1 -CommanderScript $TMP_FLASH_SCRIPT
+
+     rm "$TMP_FLASH_SCRIPT"
+  }
+
+  flash_jlink $@
+
+Then call this script:
+
+.. code-block:: sh
+
+  bash ./flash_amap4.sh ./out/apollo4_debug/obj/pw_log/test/basic_log_test.elf
+
+In this case the basic log test is debugged, but substitute your own ELF file.
+
+Debugging
+=========
+Debugging can be done using the on-board J-Link Debug Probe. First you need to
+start ``JLinkGDBServer`` and connect to the on-board J-Link Debug Probe.
+
+.. code-block:: sh
+
+  JLinkGDBServer -select USB      \
+            -device AMAP42KK-KBR  \
+            -endian little        \
+            -if SWD               \
+            -speed 4000           \
+            -noir -LocalhostOnly  \
+            -singlerun            \
+            -nogui                \
+            -excdbg               \
+            -rtos GDBServer/RTOSPlugin_FreeRTOS.dylib
+
+The ``-rtos`` option is for `Thread Aware Debugging`_.
+
+.. _Thread Aware Debugging: https://www.segger.com/products/debug-probes/j-link/tools/j-link-gdb-server/thread-aware-debugging/
+
+Then on the second terminal window use ``arm-none-eabi-gdb`` to load an executable
+into the target, debug, and run it.
+
+.. code-block:: sh
+
+  arm-none-eabi-gdb -q out/apollo4_debug/obj/pw_log/test/basic_log_test.elf
+
+This can be combined with a simple bash script. Here is an example of one:
+
+.. code-block:: sh
+
+  #!/bin/bash
+
+  function debug_jlink()
+  {
+     local TMP_GDB_SCRIPT=/tmp/gdb-debug.txt
+
+     # Create GDB script.
+
+     cat > $TMP_GDB_SCRIPT <<- EOF
+
+     # Backtrace all threads.
+
+     define btall
+       thread apply all backtrace
+     end
+
+     target remote localhost:2331
+     load
+     monitor reset
+     monitor halt
+     b pw_boot_Entry
+
+     EOF
+
+     # Start GDB server.
+
+     set -m
+     JLinkGDBServer -select USB       \
+                -device AMAP42KK-KBR  \
+                -endian little        \
+                -if SWD               \
+                -speed 4000           \
+                -noir -LocalhostOnly  \
+                -singlerun            \
+                -nogui                \
+                -excdbg               \
+                -rtos GDBServer/RTOSPlugin_FreeRTOS.dylib &
+     set +m
+
+     # Debug program.
+
+     arm-none-eabi-gdb -q $1 -x $TMP_GDB_SCRIPT
+
+     rm "$TMP_GDB_SCRIPT"
+  }
+
+  debug_jlink $@
+
+Then call this script:
+
+.. code-block:: sh
+
+  bash ./debug_amap4.sh ./out/apollo4_debug/obj/pw_log/test/basic_log_test.elf
diff --git a/targets/apollo4/target_toolchains.gni b/targets/apollo4/target_toolchains.gni
new file mode 100644
index 0000000..e614f71
--- /dev/null
+++ b/targets/apollo4/target_toolchains.gni
@@ -0,0 +1,149 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_log/backend.gni")
+import("$dir_pw_toolchain/arm_gcc/toolchains.gni")
+
+_target_config = {
+  pw_third_party_ambiq_PRODUCT = "apollo4p"
+
+  # Use the logging main.
+  pw_unit_test_MAIN = "$dir_pw_unit_test:logging_main"
+
+  # Use ARM Cycle Counts
+  pw_perf_test_TIMER_INTERFACE_BACKEND = "$dir_pw_perf_test:arm_cortex_timer"
+
+  # Configuration options for Pigweed executable targets.
+  pw_build_EXECUTABLE_TARGET_TYPE = "apollo4_executable"
+
+  pw_build_EXECUTABLE_TARGET_TYPE_FILE =
+      get_path_info("apollo4_executable.gni", "abspath")
+
+  # Path to the bloaty config file for the output binaries.
+  pw_bloat_BLOATY_CONFIG = "$dir_pw_boot_cortex_m/bloaty_config.bloaty"
+
+  # Configure backend for assert facade.
+  pw_assert_BACKEND = dir_pw_assert_basic
+
+  pw_boot_BACKEND = "$dir_pw_boot_cortex_m"
+  pw_cpu_exception_ENTRY_BACKEND =
+      "$dir_pw_cpu_exception_cortex_m:cpu_exception"
+  pw_cpu_exception_HANDLER_BACKEND = "$dir_pw_cpu_exception:basic_handler"
+  pw_cpu_exception_SUPPORT_BACKEND = "$dir_pw_cpu_exception_cortex_m:support"
+  pw_sync_INTERRUPT_SPIN_LOCK_BACKEND =
+      "$dir_pw_sync_baremetal:interrupt_spin_lock"
+
+  # Configure backends for pw_sync's facades.
+  pw_sync_MUTEX_BACKEND = "$dir_pw_sync_baremetal:mutex"
+
+  # Configure backend for logging facade.
+  pw_log_BACKEND = "$dir_pw_log_basic"
+
+  # Configure backend for pw_sys_io facade.
+  pw_sys_io_BACKEND = "$dir_pw_sys_io_ambiq_sdk"
+
+  # Configure backend for pw_rpc_system_server.
+  pw_rpc_system_server_BACKEND = "$dir_pw_hdlc:hdlc_sys_io_system_server"
+  pw_rpc_CONFIG = "$dir_pw_rpc:disable_global_mutex"
+
+  pw_malloc_BACKEND = "$dir_pw_malloc_freelist"
+
+  pw_boot_cortex_m_LINK_CONFIG_DEFINES = [
+    "PW_BOOT_VECTOR_TABLE_BEGIN=0x00018000",
+    "PW_BOOT_VECTOR_TABLE_SIZE=512",
+
+    "PW_BOOT_FLASH_BEGIN=0x00018200",
+    "PW_BOOT_FLASH_SIZE=1951K",
+
+    "PW_BOOT_HEAP_SIZE=100K",
+    "PW_BOOT_MIN_STACK_SIZE=1K",
+
+    "PW_BOOT_RAM_BEGIN=0x10000000",
+    "PW_BOOT_RAM_SIZE=1408K",
+  ]
+
+  pw_build_LINK_DEPS = []  # Explicit list overwrite required by GN
+  pw_build_LINK_DEPS = [
+    "$dir_pw_assert:impl",
+    "$dir_pw_log:impl",
+    "$dir_pw_cpu_exception:entry_impl",
+    "$dir_pw_toolchain/arm_gcc:arm_none_eabi_gcc_support",
+  ]
+
+  current_cpu = "arm"
+  current_os = ""
+}
+
+_toolchain_properties = {
+  final_binary_extension = ".elf"
+}
+
+_target_default_configs = [
+  "$dir_pw_build:extra_strict_warnings",
+  "$dir_pw_toolchain/arm_gcc:enable_float_printf",
+]
+
+pw_target_toolchain_apollo4 = {
+  _excluded_members = [
+    "defaults",
+    "name",
+  ]
+
+  debug = {
+    name = "apollo4_debug"
+    _toolchain_base = pw_toolchain_arm_gcc.cortex_m4f_debug
+    forward_variables_from(_toolchain_base, "*", _excluded_members)
+    forward_variables_from(_toolchain_properties, "*")
+    defaults = {
+      forward_variables_from(_toolchain_base.defaults, "*")
+      forward_variables_from(_target_config, "*")
+      default_configs += _target_default_configs
+    }
+  }
+
+  speed_optimized = {
+    name = "apollo4_speed_optimized"
+    _toolchain_base = pw_toolchain_arm_gcc.cortex_m4f_speed_optimized
+    forward_variables_from(_toolchain_base, "*", _excluded_members)
+    forward_variables_from(_toolchain_properties, "*")
+    defaults = {
+      forward_variables_from(_toolchain_base.defaults, "*")
+      forward_variables_from(_target_config, "*")
+      default_configs += _target_default_configs
+    }
+  }
+
+  size_optimized = {
+    name = "apollo4_size_optimized"
+    _toolchain_base = pw_toolchain_arm_gcc.cortex_m4f_size_optimized
+    forward_variables_from(_toolchain_base, "*", _excluded_members)
+    forward_variables_from(_toolchain_properties, "*")
+    defaults = {
+      forward_variables_from(_toolchain_base.defaults, "*")
+      forward_variables_from(_target_config, "*")
+      default_configs += _target_default_configs
+    }
+  }
+}
+
+# This list just contains the members of the above scope for convenience to make
+# it trivial to generate all the toolchains in this file via a
+# `generate_toolchains` target.
+pw_target_toolchain_apollo4_list = [
+  pw_target_toolchain_apollo4.debug,
+  pw_target_toolchain_apollo4.speed_optimized,
+  pw_target_toolchain_apollo4.size_optimized,
+]
diff --git a/targets/apollo4/vector_table.c b/targets/apollo4/vector_table.c
new file mode 100644
index 0000000..a05e153
--- /dev/null
+++ b/targets/apollo4/vector_table.c
@@ -0,0 +1,222 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <stdbool.h>
+
+#include "pw_boot/boot.h"
+#include "pw_boot_cortex_m/boot.h"
+#include "pw_preprocessor/compiler.h"
+
+// Default handler to insert into the ARMv7-M vector table (below).
+// This function exists for convenience. If a device isn't doing what you
+// expect, it might have hit a fault and ended up here.
+static void DefaultFaultHandler(void) {
+  while (true) {
+    // Wait for debugger to attach.
+  }
+}
+
+// This typedef is for convenience when building the vector table. With the
+// exception of SP_main (0th entry in the vector table), all the entries of the
+// vector table are function pointers.
+typedef void (*InterruptHandler)(void);
+
+void SVC_Handler(void) PW_ALIAS(DefaultFaultHandler);
+void PendSV_Handler(void) PW_ALIAS(DefaultFaultHandler);
+void SysTick_Handler(void) PW_ALIAS(DefaultFaultHandler);
+
+void MemManage_Handler(void) PW_ALIAS(DefaultFaultHandler);
+void BusFault_Handler(void) PW_ALIAS(DefaultFaultHandler);
+void UsageFault_Handler(void) PW_ALIAS(DefaultFaultHandler);
+void DebugMon_Handler(void) PW_ALIAS(DefaultFaultHandler);
+
+void am_brownout_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_watchdog_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_rtc_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_vcomp_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_ioslave_ios_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_ioslave_acc_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_iomaster0_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_iomaster1_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_iomaster2_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_iomaster3_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_iomaster4_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_iomaster5_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_iomaster6_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_iomaster7_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_ctimer_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_uart_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_uart1_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_uart2_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_uart3_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_adc_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_mspi0_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_mspi1_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_mspi2_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_clkgen_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_cryptosec_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_sdio_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_usb_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_gpu_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_disp_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_dsi_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_stimer_cmpr0_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_stimer_cmpr1_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_stimer_cmpr2_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_stimer_cmpr3_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_stimer_cmpr4_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_stimer_cmpr5_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_stimer_cmpr6_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_stimer_cmpr7_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_stimerof_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_audadc0_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_dspi2s0_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_dspi2s1_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_dspi2s2_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_dspi2s3_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_pdm0_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_pdm1_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_pdm2_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_pdm3_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_gpio0_001f_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_gpio0_203f_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_gpio0_405f_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_gpio0_607f_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_gpio1_001f_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_gpio1_203f_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_gpio1_405f_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_gpio1_607f_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_timer00_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_timer01_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_timer02_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_timer03_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_timer04_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_timer05_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_timer06_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_timer07_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_timer08_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_timer09_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_timer10_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_timer11_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_timer12_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_timer13_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_timer14_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_timer15_isr(void) PW_ALIAS(DefaultFaultHandler);
+void am_cachecpu_isr(void) PW_ALIAS(DefaultFaultHandler);
+
+PW_KEEP_IN_SECTION(".vector_table")
+const InterruptHandler vector_table[] = {
+    // Cortex-M CPU specific interrupt handlers.
+    (InterruptHandler)(&pw_boot_stack_high_addr),
+    pw_boot_Entry,        // The reset handler
+    DefaultFaultHandler,  // The NMI handler
+    DefaultFaultHandler,  // The hard fault handler
+    DefaultFaultHandler,  // The MemManage_Handler
+    DefaultFaultHandler,  // The BusFault_Handler
+    DefaultFaultHandler,  // The UsageFault_Handler
+    0,                    // Reserved
+    0,                    // Reserved
+    0,                    // Reserved
+    0,                    // Reserved
+    SVC_Handler,          // SVCall handler
+    DefaultFaultHandler,  // Debug monitor handler
+    0,                    // Reserved
+    PendSV_Handler,       // The PendSV handler
+    SysTick_Handler,      // The SysTick handler
+    // Vendor specific peripheral interrupt handlers.
+    am_brownout_isr,      //  0: Brownout (rstgen)
+    am_watchdog_isr,      //  1: Watchdog (WDT)
+    am_rtc_isr,           //  2: RTC
+    am_vcomp_isr,         //  3: Voltage Comparator
+    am_ioslave_ios_isr,   //  4: I/O Responder general
+    am_ioslave_acc_isr,   //  5: I/O Responder access
+    am_iomaster0_isr,     //  6: I/O Controller 0
+    am_iomaster1_isr,     //  7: I/O Controller 1
+    am_iomaster2_isr,     //  8: I/O Controller 2
+    am_iomaster3_isr,     //  9: I/O Controller 3
+    am_iomaster4_isr,     // 10: I/O Controller 4
+    am_iomaster5_isr,     // 11: I/O Controller 5
+    am_iomaster6_isr,     // 12: I/O Controller 6 (I3C/I2C/SPI)
+    am_iomaster7_isr,     // 13: I/O Controller 7 (I3C/I2C/SPI)
+    am_ctimer_isr,        // 14: OR of all timerX interrupts
+    am_uart_isr,          // 15: UART0
+    am_uart1_isr,         // 16: UART1
+    am_uart2_isr,         // 17: UART2
+    am_uart3_isr,         // 18: UART3
+    am_adc_isr,           // 19: ADC
+    am_mspi0_isr,         // 20: MSPI0
+    am_mspi1_isr,         // 21: MSPI1
+    am_mspi2_isr,         // 22: MSPI2
+    am_clkgen_isr,        // 23: ClkGen
+    am_cryptosec_isr,     // 24: Crypto Secure
+    DefaultFaultHandler,  // 25: Reserved
+    am_sdio_isr,          // 26: SDIO
+    am_usb_isr,           // 27: USB
+    am_gpu_isr,           // 28: GPU
+    am_disp_isr,          // 29: DISP
+    am_dsi_isr,           // 30: DSI
+    DefaultFaultHandler,  // 31: Reserved
+    am_stimer_cmpr0_isr,  // 32: System Timer Compare0
+    am_stimer_cmpr1_isr,  // 33: System Timer Compare1
+    am_stimer_cmpr2_isr,  // 34: System Timer Compare2
+    am_stimer_cmpr3_isr,  // 35: System Timer Compare3
+    am_stimer_cmpr4_isr,  // 36: System Timer Compare4
+    am_stimer_cmpr5_isr,  // 37: System Timer Compare5
+    am_stimer_cmpr6_isr,  // 38: System Timer Compare6
+    am_stimer_cmpr7_isr,  // 39: System Timer Compare7
+    am_stimerof_isr,      // 40: System Timer Cap Overflow
+    DefaultFaultHandler,  // 41: Reserved
+    am_audadc0_isr,       // 42: Audio ADC
+    DefaultFaultHandler,  // 43: Reserved
+    am_dspi2s0_isr,       // 44: I2S0
+    am_dspi2s1_isr,       // 45: I2S1
+    am_dspi2s2_isr,       // 46: I2S2
+    am_dspi2s3_isr,       // 47: I2S3
+    am_pdm0_isr,          // 48: PDM0
+    am_pdm1_isr,          // 49: PDM1
+    am_pdm2_isr,          // 50: PDM2
+    am_pdm3_isr,          // 51: PDM3
+    DefaultFaultHandler,  // 52: Reserved
+    DefaultFaultHandler,  // 53: Reserved
+    DefaultFaultHandler,  // 54: Reserved
+    DefaultFaultHandler,  // 55: Reserved
+    am_gpio0_001f_isr,    // 56: GPIO N0 pins  0-31
+    am_gpio0_203f_isr,    // 57: GPIO N0 pins 32-63
+    am_gpio0_405f_isr,    // 58: GPIO N0 pins 64-95
+    am_gpio0_607f_isr,    // 59: GPIO N0 pins 96-104, virtual 105-127
+    am_gpio1_001f_isr,    // 60: GPIO N1 pins  0-31
+    am_gpio1_203f_isr,    // 61: GPIO N1 pins 32-63
+    am_gpio1_405f_isr,    // 62: GPIO N1 pins 64-95
+    am_gpio1_607f_isr,    // 63: GPIO N1 pins 96-104, virtual 105-127
+    DefaultFaultHandler,  // 64: Reserved
+    DefaultFaultHandler,  // 65: Reserved
+    DefaultFaultHandler,  // 66: Reserved
+    am_timer00_isr,       // 67: timer0
+    am_timer01_isr,       // 68: timer1
+    am_timer02_isr,       // 69: timer2
+    am_timer03_isr,       // 70: timer3
+    am_timer04_isr,       // 71: timer4
+    am_timer05_isr,       // 72: timer5
+    am_timer06_isr,       // 73: timer6
+    am_timer07_isr,       // 74: timer7
+    am_timer08_isr,       // 75: timer8
+    am_timer09_isr,       // 76: timer9
+    am_timer10_isr,       // 77: timer10
+    am_timer11_isr,       // 78: timer11
+    am_timer12_isr,       // 79: timer12
+    am_timer13_isr,       // 80: timer13
+    am_timer14_isr,       // 81: timer14
+    am_timer15_isr,       // 82: timer15
+    am_cachecpu_isr       // 83: CPU cache
+};
diff --git a/targets/apollo4_pw_system/BUILD.bazel b/targets/apollo4_pw_system/BUILD.bazel
new file mode 100644
index 0000000..d83f06e
--- /dev/null
+++ b/targets/apollo4_pw_system/BUILD.bazel
@@ -0,0 +1,27 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+# This is just a stub to silence warnings saying that files are
+# missing from the bazel build.
+filegroup(
+    name = "apollo4_pw_system_files",
+    srcs = [
+        "config/FreeRTOSConfig.h",
+        "main.cc",
+    ],
+)
diff --git a/targets/apollo4_pw_system/BUILD.gn b/targets/apollo4_pw_system/BUILD.gn
new file mode 100644
index 0000000..d37d20a
--- /dev/null
+++ b/targets/apollo4_pw_system/BUILD.gn
@@ -0,0 +1,87 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_system/system_target.gni")
+
+config("config_includes") {
+  include_dirs = [ "config" ]
+}
+
+if (current_toolchain != default_toolchain) {
+  pw_source_set("main") {
+    deps = [
+      "$dir_pigweed/targets/apollo4:boot",
+      "$dir_pw_system",
+      "$dir_pw_third_party/freertos",
+    ]
+
+    sources = [ "main.cc" ]
+  }
+
+  pw_source_set("apollo4_freertos_config") {
+    public_configs = [ ":config_includes" ]
+    public_deps = [ "$dir_pw_third_party/freertos:config_assert" ]
+    public = [ "config/FreeRTOSConfig.h" ]
+  }
+}
+
+pw_system_target("apollo4_pw_system") {
+  cpu = PW_SYSTEM_CPU.CORTEX_M4F
+  scheduler = PW_SYSTEM_SCHEDULER.FREERTOS
+
+  build_args = {
+    pw_third_party_ambiq_PRODUCT = "apollo4p"
+    pw_log_BACKEND = dir_pw_log_tokenized
+
+    pw_third_party_freertos_CONFIG =
+        "$dir_pigweed/targets/apollo4_pw_system:apollo4_freertos_config"
+    pw_third_party_freertos_PORT = "$dir_pw_third_party/freertos:arm_cm4f"
+    pw_sys_io_BACKEND = "$dir_pw_sys_io_ambiq_sdk"
+    pw_log_BACKEND = "$dir_pw_log_basic"
+    pw_cpu_exception_ENTRY_BACKEND =
+        "$dir_pw_cpu_exception_cortex_m:cpu_exception"
+    pw_cpu_exception_HANDLER_BACKEND = "$dir_pw_cpu_exception:basic_handler"
+    pw_cpu_exception_SUPPORT_BACKEND = "$dir_pw_cpu_exception_cortex_m:support"
+
+    pw_boot_cortex_m_LINK_CONFIG_DEFINES = [
+      "PW_BOOT_VECTOR_TABLE_BEGIN=0x00018000",
+      "PW_BOOT_VECTOR_TABLE_SIZE=512",
+
+      "PW_BOOT_FLASH_BEGIN=0x00018200",
+      "PW_BOOT_FLASH_SIZE=1951K",
+
+      "PW_BOOT_HEAP_SIZE=100K",
+      "PW_BOOT_MIN_STACK_SIZE=1K",
+
+      "PW_BOOT_RAM_BEGIN=0x10000000",
+      "PW_BOOT_RAM_SIZE=1408K",
+    ]
+
+    pw_build_LINK_DEPS += [
+      "$dir_pigweed/targets/apollo4_pw_system:main",
+      "$dir_pw_assert:impl",
+      "$dir_pw_log:impl",
+      "$dir_pw_cpu_exception:entry_impl",
+      "$dir_pw_toolchain/arm_gcc:arm_none_eabi_gcc_support",
+    ]
+  }
+}
+
+pw_doc_group("target_docs") {
+  sources = [ "target_docs.rst" ]
+}
diff --git a/targets/apollo4_pw_system/OWNERS b/targets/apollo4_pw_system/OWNERS
new file mode 100644
index 0000000..0eb639f
--- /dev/null
+++ b/targets/apollo4_pw_system/OWNERS
@@ -0,0 +1 @@
+elizarovv@google.com
diff --git a/targets/apollo4_pw_system/config/FreeRTOSConfig.h b/targets/apollo4_pw_system/config/FreeRTOSConfig.h
new file mode 100644
index 0000000..8ccd4c9
--- /dev/null
+++ b/targets/apollo4_pw_system/config/FreeRTOSConfig.h
@@ -0,0 +1,81 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <stdint.h>
+
+// Externally defined variables that must be forward-declared for FreeRTOS to
+// use them.
+extern uint32_t SystemCoreClock;
+
+// Disable formatting to make it easier to compare with other config files.
+// clang-format off
+
+#define configUSE_PREEMPTION                    1
+#define configUSE_PORT_OPTIMISED_TASK_SELECTION 1
+
+#define configSUPPORT_STATIC_ALLOCATION         1
+#define configSUPPORT_DYNAMIC_ALLOCATION        0
+
+#define configCPU_CLOCK_HZ                      SystemCoreClock
+#define configTICK_RATE_HZ                      1000
+#define configMAX_PRIORITIES                    16
+#define configMINIMAL_STACK_SIZE                128
+
+#define configMAX_TASK_NAME_LEN                 16
+#define configUSE_16_BIT_TICKS                  0
+
+#define configUSE_MUTEXES                       1
+#define configUSE_RECURSIVE_MUTEXES             0
+#define configUSE_COUNTING_SEMAPHORES           1
+#define configQUEUE_REGISTRY_SIZE               8
+#define configUSE_QUEUE_SETS                    0
+#define configUSE_NEWLIB_REENTRANT              0
+#define configENABLE_BACKWARD_COMPATIBILITY     0
+#define configRECORD_STACK_HIGH_ADDRESS         1
+
+#define configUSE_IDLE_HOOK                     0
+#define configUSE_TICK_HOOK                     0
+#define configCHECK_FOR_STACK_OVERFLOW          2
+#define configUSE_MALLOC_FAILED_HOOK            1
+
+#define configGENERATE_RUN_TIME_STATS           0
+#define configUSE_TRACE_FACILITY                0
+
+#define configUSE_TIMERS                        1
+#define configTIMER_TASK_PRIORITY               3
+#define configTIMER_QUEUE_LENGTH                10
+#define configTIMER_TASK_STACK_DEPTH            configMINIMAL_STACK_SIZE
+
+#define configKERNEL_INTERRUPT_PRIORITY             (0x7 << 5)
+#define configMAX_SYSCALL_INTERRUPT_PRIORITY        (0x4 << 5)
+#define NVIC_configKERNEL_INTERRUPT_PRIORITY        (0x7)
+#define NVIC_configMAX_SYSCALL_INTERRUPT_PRIORITY   (0x4)
+
+/* Optional functions - most linkers will remove unused functions anyway. */
+#define INCLUDE_vTaskPrioritySet                1
+#define INCLUDE_uxTaskPriorityGet               1
+#define INCLUDE_vTaskDelete                     1
+#define INCLUDE_vTaskSuspend                    1
+#define INCLUDE_xResumeFromISR                  0
+#define INCLUDE_vTaskDelayUntil                 0
+#define INCLUDE_vTaskDelay                      1
+#define INCLUDE_xTaskGetSchedulerState          1
+#define INCLUDE_xTaskGetCurrentTaskHandle       1
+#define INCLUDE_uxTaskGetStackHighWaterMark2    1
+#define INCLUDE_uxTaskGetStackHighWaterMark     1
+
+// Instead of defining configASSERT(), include a header that provides a
+// definition that redirects to pw_assert.
+#include "pw_third_party/freertos/config_assert.h"
diff --git a/targets/apollo4_pw_system/main.cc b/targets/apollo4_pw_system/main.cc
new file mode 100644
index 0000000..35d383f
--- /dev/null
+++ b/targets/apollo4_pw_system/main.cc
@@ -0,0 +1,74 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#define PW_LOG_MODULE_NAME "pw_system"
+
+#include "FreeRTOS.h"
+#include "pw_log/log.h"
+#include "pw_preprocessor/compiler.h"
+#include "pw_string/util.h"
+#include "pw_system/init.h"
+#include "task.h"
+
+// System core clock value definition, usually provided by the CMSIS package.
+uint32_t SystemCoreClock = 96'000'000ul;
+
+namespace {
+
+#if configCHECK_FOR_STACK_OVERFLOW != 0
+std::array<char, configMAX_TASK_NAME_LEN> temp_thread_name_buffer;
+#endif  // configCHECK_FOR_STACK_OVERFLOW
+
+#if configUSE_TIMERS == 1
+std::array<StackType_t, configTIMER_TASK_STACK_DEPTH> freertos_timer_stack;
+StaticTask_t freertos_timer_tcb;
+#endif  // configUSE_TIMERS == 1
+
+std::array<StackType_t, configMINIMAL_STACK_SIZE> freertos_idle_stack;
+StaticTask_t freertos_idle_tcb;
+}  // namespace
+
+#if configCHECK_FOR_STACK_OVERFLOW != 0
+PW_EXTERN_C void vApplicationStackOverflowHook(TaskHandle_t, char* pcTaskName) {
+  pw::string::Copy(pcTaskName, temp_thread_name_buffer);
+  PW_CRASH("Stack OVF for task %s", temp_thread_name_buffer.data());
+}
+#endif  // configCHECK_FOR_STACK_OVERFLOW
+
+#if configUSE_TIMERS == 1
+PW_EXTERN_C void vApplicationGetTimerTaskMemory(
+    StaticTask_t** ppxTimerTaskTCBBuffer,
+    StackType_t** ppxTimerTaskStackBuffer,
+    uint32_t* pulTimerTaskStackSize) {
+  *ppxTimerTaskTCBBuffer = &freertos_timer_tcb;
+  *ppxTimerTaskStackBuffer = freertos_timer_stack.data();
+  *pulTimerTaskStackSize = freertos_timer_stack.size();
+}
+#endif  // configUSE_TIMERS == 1
+
+PW_EXTERN_C void vApplicationGetIdleTaskMemory(
+    StaticTask_t** ppxIdleTaskTCBBuffer,
+    StackType_t** ppxIdleTaskStackBuffer,
+    uint32_t* pulIdleTaskStackSize) {
+  *ppxIdleTaskTCBBuffer = &freertos_idle_tcb;
+  *ppxIdleTaskStackBuffer = freertos_idle_stack.data();
+  *pulIdleTaskStackSize = freertos_idle_stack.size();
+}
+
+int main() {
+  pw::system::Init();
+  vTaskStartScheduler();
+
+  PW_UNREACHABLE;
+}
diff --git a/targets/apollo4_pw_system/target_docs.rst b/targets/apollo4_pw_system/target_docs.rst
new file mode 100644
index 0000000..6877360
--- /dev/null
+++ b/targets/apollo4_pw_system/target_docs.rst
@@ -0,0 +1,111 @@
+.. _target-apollo4-pw-system:
+
+============================
+Ambiq Apollo4 with pw_system
+============================
+
+.. warning::
+
+  This target is in a very preliminary state and is under active development.
+  This demo gives a preview of the direction we are heading with
+  :ref:`pw_system<module-pw_system>`, but it is not yet ready for production
+  use.
+
+This target configuration uses :ref:`pw_system<module-pw_system>` on top of
+FreeRTOS and the `AmbiqSuite SDK
+<https://ambiq.com/apollo4-blue-plus>`_ HAL.
+
+-----
+Setup
+-----
+To use this target, Pigweed must be set up to use FreeRTOS and the AmbiqSuite
+SDK HAL for the Apollo4 series. The FreeRTOS repository can be downloaded via
+``pw package``, and the `AmbiqSuite SDK`_ can be downloaded from the Ambiq
+website.
+
+.. _AmbiqSuite SDK: https://ambiq.com/apollo4-blue-plus
+
+Once the AmbiqSuite SDK package has been downloaded and extracted, the user
+needs to set ``dir_pw_third_party_ambiq_SDK`` build arg to the location of
+extracted directory:
+
+.. code-block:: sh
+
+   $ gn args out
+
+Then add the following lines to that text file:
+
+.. code-block::
+
+   # Path to the extracted AmbiqSuite SDK package.
+   dir_pw_third_party_ambiq_SDK = "/path/to/AmbiqSuite_R4.3.0"
+
+   # Path to the FreeRTOS source directory.
+   dir_pw_third_party_freertos = "/path/to/pigweed/third_party/freertos"
+
+-----------------------------
+Building and Running the Demo
+-----------------------------
+This target has an associated demo application that can be built and then
+flashed to a device with the following commands:
+
+.. code-block:: sh
+
+   ninja -C out pw_system_demo
+
+.. seealso::
+
+   The :ref:`target-apollo4` for more info on flashing the Apollo4 board.
+
+Once the board has been flashed, you can connect to it and send RPC commands
+via the Pigweed console:
+
+.. code-block:: sh
+
+   pw-system-console -d /dev/{ttyX} -b 115200 \
+     --proto-globs pw_rpc/echo.proto \
+     --token-databases \
+       out/apollo4_pw_system.size_optimized/obj/pw_system/bin/system_example.elf
+
+Replace ``{ttyX}`` with the appropriate device on your machine. On Linux this
+may look like ``ttyACM0``, and on a Mac it may look like ``cu.usbmodem***``.
+
+When the console opens, try sending an Echo RPC request. You should get back
+the same message you sent to the device.
+
+.. code-block:: pycon
+
+   >>> device.rpcs.pw.rpc.EchoService.Echo(msg="Hello, Pigweed!")
+   (Status.OK, pw.rpc.EchoMessage(msg='Hello, Pigweed!'))
+
+You can also try out our thread snapshot RPC service, which should return a
+stack usage overview of all running threads on the device in Host Logs.
+
+.. code-block:: pycon
+
+   >>> device.rpcs.pw.thread.proto.ThreadSnapshotService.GetPeakStackUsage()
+
+Example output:
+
+.. code-block::
+
+   20220826 09:47:22  INF  PendingRpc(channel=1, method=pw.thread.ThreadSnapshotService.GetPeakStackUsage) completed: Status.OK
+   20220826 09:47:22  INF  Thread State
+   20220826 09:47:22  INF    5 threads running.
+   20220826 09:47:22  INF
+   20220826 09:47:22  INF  Thread (UNKNOWN): IDLE
+   20220826 09:47:22  INF  Est CPU usage: unknown
+   20220826 09:47:22  INF  Stack info
+   20220826 09:47:22  INF    Current usage:   0x20002da0 - 0x???????? (size unknown)
+   20220826 09:47:22  INF    Est peak usage:  390 bytes, 76.77%
+   20220826 09:47:22  INF    Stack limits:    0x20002da0 - 0x20002ba4 (508 bytes)
+   20220826 09:47:22  INF
+   20220826 09:47:22  INF  ...
+
+You are now up and running!
+
+.. seealso::
+
+   The :ref:`module-pw_console`
+   :bdg-ref-primary-line:`module-pw_console-user_guide` for more info on using
+   the the pw_console UI.
diff --git a/targets/arduino/target_docs.rst b/targets/arduino/target_docs.rst
index 5bb9d44..1735e67 100644
--- a/targets/arduino/target_docs.rst
+++ b/targets/arduino/target_docs.rst
@@ -51,7 +51,7 @@
 The ``arduino_builder`` utility can install Arduino cores automatically. It's
 recommended to install them to into ``third_party/arduino/cores/``.
 
-.. code:: sh
+.. code-block:: sh
 
    # Setup pigweed environment.
    . ./activate.sh
@@ -63,7 +63,7 @@
 To build for this Pigweed target, simply build the top-level "arduino" Ninja
 target. You can set Arduino build options using ``gn args out`` or by running:
 
-.. code:: sh
+.. code-block:: sh
 
    gn gen out --args='
      pw_arduino_build_CORE_PATH = "//environment/packages"
@@ -74,13 +74,13 @@
 
 On a Windows machine it's easier to run:
 
-.. code:: sh
+.. code-block:: sh
 
    gn args out
 
 That will open a text file where you can paste the args in:
 
-.. code:: text
+.. code-block:: text
 
    pw_arduino_build_CORE_PATH = "//environment/packages"
    pw_arduino_build_CORE_NAME = "teensy"
@@ -92,19 +92,19 @@
 
 Then build with:
 
-.. code:: sh
+.. code-block:: sh
 
   ninja -C out arduino
 
 To see supported boards and Arduino menu options for a given core:
 
-.. code:: sh
+.. code-block:: sh
 
    arduino_builder --arduino-package-path ./environment/packages/teensy \
                    --arduino-package-name avr/1.58.1 \
                    list-boards
 
-.. code:: text
+.. code-block:: text
 
    Board Name  Description
    teensy41    Teensy 4.1
@@ -116,7 +116,7 @@
 You may wish to set different arduino build options in
 ``pw_arduino_build_MENU_OPTIONS``. Run this to see what's available for your core:
 
-.. code:: sh
+.. code-block:: sh
 
    arduino_builder --arduino-package-path ./environment/packages/teensy \
                    --arduino-package-name avr/1.58.1 \
@@ -125,7 +125,7 @@
 
 That will show all menu options that can be added to ``gn args out``.
 
-.. code:: text
+.. code-block:: text
 
    All Options
    ----------------------------------------------------------------
@@ -160,7 +160,7 @@
 Tests can be flashed and run using the `arduino_unit_test_runner` tool. Here is
 a sample bash script to run all tests on a Linux machine.
 
-.. code:: sh
+.. code-block:: sh
 
    #!/bin/bash
    gn gen out --export-compile-commands \
@@ -185,7 +185,7 @@
 The server must be run with an `arduino_builder` config file so it can locate
 the correct Arduino core, compiler path, and Arduino board used.
 
-.. code:: sh
+.. code-block:: sh
 
   arduino_test_server --verbose \
       --config-file ./out/arduino_debug/gen/arduino_builder_config.json
@@ -210,7 +210,7 @@
 Here is an example `pw_executable` gn rule that includes some Teensyduino
 libraries.
 
-.. code:: text
+.. code-block:: text
 
   import("//build_overrides/pigweed.gni")
   import("$dir_pw_arduino_build/arduino.gni")
diff --git a/targets/default_config.BUILD b/targets/default_config.BUILD
index 1720121..7ccb796 100644
--- a/targets/default_config.BUILD
+++ b/targets/default_config.BUILD
@@ -39,6 +39,11 @@
 )
 
 label_flag(
+    name = "pw_assert_backend_impl",
+    build_setting_default = "@pigweed//pw_assert:backend_impl_multiplexer",
+)
+
+label_flag(
     name = "pw_async_task_backend",
     build_setting_default = "@pigweed//pw_async_basic:task",
 )
diff --git a/targets/docs/target_docs.rst b/targets/docs/target_docs.rst
index e130e8a..92ad55e 100644
--- a/targets/docs/target_docs.rst
+++ b/targets/docs/target_docs.rst
@@ -14,7 +14,7 @@
 To build for this target, invoke ninja with the top-level "docs" group as the
 target to build.
 
-.. code:: sh
+.. code-block:: sh
 
   $ gn gen out
   $ ninja -C out docs
diff --git a/targets/emcraft_sf2_som/target_docs.rst b/targets/emcraft_sf2_som/target_docs.rst
index 1d9a5db..272312e 100644
--- a/targets/emcraft_sf2_som/target_docs.rst
+++ b/targets/emcraft_sf2_som/target_docs.rst
@@ -15,7 +15,7 @@
 downloaded via ``pw package``, and then the build must be manually configured
 to point to the locations the repositories were downloaded to.
 
-.. code:: sh
+.. code-block:: sh
 
   pw package install freertos
   pw package install smartfusion_mss
@@ -25,11 +25,11 @@
 
 Then add the following lines to that text file:
 
-.. code::
+.. code-block::
 
-    dir_pw_third_party_freertos = getenv("PW_PACKAGE_ROOT") + "/freertos"
-    dir_pw_third_party_smartfusion_mss = getenv("PW_PACKAGE_ROOT") + "/smartfusion_mss"
-    dir_pw_third_party_nanopb = getenv("PW_PACKAGE_ROOT") + "/nanopb"
+   dir_pw_third_party_freertos = getenv("PW_PACKAGE_ROOT") + "/freertos"
+   dir_pw_third_party_smartfusion_mss = getenv("PW_PACKAGE_ROOT") + "/smartfusion_mss"
+   dir_pw_third_party_nanopb = getenv("PW_PACKAGE_ROOT") + "/nanopb"
 
 Building and running the demo
 =============================
diff --git a/targets/host_device_simulator/target_docs.rst b/targets/host_device_simulator/target_docs.rst
index 8da6f16..1693825 100644
--- a/targets/host_device_simulator/target_docs.rst
+++ b/targets/host_device_simulator/target_docs.rst
@@ -20,7 +20,7 @@
 :bdg-ref-primary-line:`target-stm32f429i-disc1-stm32cube` target at the same
 time.
 
-.. code:: sh
+.. code-block:: sh
 
    pw package install nanopb
    pw package install freertos
@@ -37,13 +37,13 @@
    Instead of the ``gn gen out`` with args set on the command line above you can
    run:
 
-   .. code:: sh
+   .. code-block:: sh
 
       gn args out
 
    Then add the following lines to that text file:
 
-   .. code::
+   .. code-block::
 
       dir_pw_third_party_nanopb = getenv("PW_PACKAGE_ROOT") + "/nanopb"
       dir_pw_third_party_freertos = getenv("PW_PACKAGE_ROOT") + "/freertos"
@@ -55,17 +55,17 @@
 This target has an associated demo application that can be built and then
 run with the following commands:
 
-.. code:: sh
+.. code-block:: sh
 
    ninja -C out pw_system_demo
 
-.. code:: sh
+.. code-block:: sh
 
    ./out/host_device_simulator.speed_optimized/obj/pw_system/bin/system_example
 
 To communicate with the launched process run this in a separate shell:
 
-.. code:: sh
+.. code-block:: sh
 
    pw-system-console -s default --proto-globs pw_rpc/echo.proto \
      --token-databases out/host_device_simulator.speed_optimized/obj/pw_system/bin/system_example
@@ -75,7 +75,7 @@
    Alternatively you can run the system_example app in the background, then
    launch the console on the same line with:
 
-   .. code:: sh
+   .. code-block:: sh
 
       ./out/host_device_simulator.speed_optimized/obj/pw_system/bin/system_example
         & \
@@ -86,7 +86,7 @@
    Exit the console via the menu or pressing :kbd:`Ctrl-d` twice. Then stop the
    system_example app with:
 
-   .. code:: sh
+   .. code-block:: sh
 
       killall system_example
 
@@ -94,14 +94,14 @@
 commands to the simulated device process. For example, you can send an RPC
 message that will be echoed back:
 
-.. code:: pycon
+.. code-block:: pycon
 
    >>> device.rpcs.pw.rpc.EchoService.Echo(msg='Hello, world!')
    (Status.OK, pw.rpc.EchoMessage(msg='Hello, world!'))
 
 Or run unit tests included on the simulated device:
 
-.. code:: pycon
+.. code-block:: pycon
 
    >>> device.run_tests()
    True
diff --git a/targets/lm3s6965evb_qemu/target_docs.rst b/targets/lm3s6965evb_qemu/target_docs.rst
index b5f5b84..123d2d4 100644
--- a/targets/lm3s6965evb_qemu/target_docs.rst
+++ b/targets/lm3s6965evb_qemu/target_docs.rst
@@ -15,7 +15,7 @@
 To build for this Pigweed target, simply build the top-level "qemu_gcc" Ninja
 target.
 
-.. code:: sh
+.. code-block:: sh
 
   $ ninja -C out qemu_gcc
 
@@ -34,7 +34,7 @@
 When running without GDB, the firmware will execute normally without requiring
 further interaction.
 
-.. code:: sh
+.. code-block:: sh
 
   $ qemu-system-arm -cpu cortex-m3 -machine lm3s6965evb \
     -nographic -no-reboot \
@@ -46,7 +46,7 @@
 begin running the code, you must connect using GDB, set any breakpoints you
 wish, and then continue execution.
 
-.. code:: sh
+.. code-block:: sh
 
   # Start the VM and GDB server.
   $ qemu-system-arm -cpu cortex-m3 -machine lm3s6965evb \
@@ -56,7 +56,7 @@
 
 In another window
 
-.. code:: sh
+.. code-block:: sh
 
   $ arm-none-eabi-gdb path/to/firmare.elf
   (gdb) target remote :3333
diff --git a/targets/mimxrt595_evk/target_docs.rst b/targets/mimxrt595_evk/target_docs.rst
index 7b6b3c8..47a9505 100644
--- a/targets/mimxrt595_evk/target_docs.rst
+++ b/targets/mimxrt595_evk/target_docs.rst
@@ -29,7 +29,7 @@
 ``pw_third_party_mcuxpresso_SDK`` to the ``sample_sdk`` source set within the
 Pigweed target directory.
 
-.. code:: sh
+.. code-block:: sh
 
   $ gn args out
   # Modify and save the args file to use the sample SDK.
@@ -41,7 +41,7 @@
 Once configured, to build for this Pigweed target, simply build the top-level
 "mimxrt595" Ninja target.
 
-.. code:: sh
+.. code-block:: sh
 
   $ ninja -C out mimxrt595
 
@@ -103,29 +103,29 @@
 it.
 
 .. code-block::
-  :emphasize-lines: 1,6,10,12,20
+   :emphasize-lines: 1,6,10,12,20
 
-  (gdb) target remote :2331
-  Remote debugging using :2331
-  warning: No executable has been specified and target does not support
-  determining executable automatically.  Try using the "file" command.
-  0x08000000 in ?? ()
-  (gdb) file out/mimxrt595_evk_debug/obj/pw_status/test/status_test.elf
-  A program is being debugged already.
-  Are you sure you want to change the file? (y or n) y
-  Reading symbols from out/mimxrt595_evk_debug/obj/pw_status/test/status_test.elf...
-  (gdb) monitor reset
-  Resetting target
-  (gdb) load
-  Loading section .flash_config, size 0x200 lma 0x8000400
-  Loading section .vector_table, size 0x168 lma 0x8001000
-  Loading section .code, size 0xb34c lma 0x8001180
-  Loading section .ARM, size 0x8 lma 0x800c4d0
-  Loading section .static_init_ram, size 0x3c8 lma 0x800c4d8
-  Start address 0x080048d0, load size 47748
-  Transfer rate: 15542 KB/sec, 6821 bytes/write.
-  (gdb) monitor reset
-  Resetting target
+   (gdb) target remote :2331
+   Remote debugging using :2331
+   warning: No executable has been specified and target does not support
+   determining executable automatically.  Try using the "file" command.
+   0x08000000 in ?? ()
+   (gdb) file out/mimxrt595_evk_debug/obj/pw_status/test/status_test.elf
+   A program is being debugged already.
+   Are you sure you want to change the file? (y or n) y
+   Reading symbols from out/mimxrt595_evk_debug/obj/pw_status/test/status_test.elf...
+   (gdb) monitor reset
+   Resetting target
+   (gdb) load
+   Loading section .flash_config, size 0x200 lma 0x8000400
+   Loading section .vector_table, size 0x168 lma 0x8001000
+   Loading section .code, size 0xb34c lma 0x8001180
+   Loading section .ARM, size 0x8 lma 0x800c4d0
+   Loading section .static_init_ram, size 0x3c8 lma 0x800c4d8
+   Start address 0x080048d0, load size 47748
+   Transfer rate: 15542 KB/sec, 6821 bytes/write.
+   (gdb) monitor reset
+   Resetting target
 
 You can now set any breakpoints you wish, and ``continue`` to run the
 executable.
diff --git a/targets/mimxrt595_evk_freertos/BUILD.bazel b/targets/mimxrt595_evk_freertos/BUILD.bazel
index cc52310..f4318bc 100644
--- a/targets/mimxrt595_evk_freertos/BUILD.bazel
+++ b/targets/mimxrt595_evk_freertos/BUILD.bazel
@@ -70,6 +70,7 @@
         "FreeRTOSConfig.h",
     ],
     includes = ["./"],
+    deps = ["//third_party/freertos:config_assert"],
 )
 
 pw_cc_library(
diff --git a/targets/mimxrt595_evk_freertos/BUILD.gn b/targets/mimxrt595_evk_freertos/BUILD.gn
index b253381..c692fad 100644
--- a/targets/mimxrt595_evk_freertos/BUILD.gn
+++ b/targets/mimxrt595_evk_freertos/BUILD.gn
@@ -182,6 +182,7 @@
       ":disable_warnings",
       ":freertos_config_public_includes",
     ]
+    public_deps = [ "$dir_pw_third_party/freertos:config_assert" ]
     public = [ "FreeRTOSConfig.h" ]
   }
 
diff --git a/targets/mimxrt595_evk_freertos/FreeRTOSConfig.h b/targets/mimxrt595_evk_freertos/FreeRTOSConfig.h
index c8b36ef..de343b2 100644
--- a/targets/mimxrt595_evk_freertos/FreeRTOSConfig.h
+++ b/targets/mimxrt595_evk_freertos/FreeRTOSConfig.h
@@ -101,8 +101,9 @@
 #define configTIMER_QUEUE_LENGTH                10
 #define configTIMER_TASK_STACK_DEPTH            (configMINIMAL_STACK_SIZE * 2)
 
-/* Define to trap errors during development. */
-#define configASSERT(x) if((x) == 0) {taskDISABLE_INTERRUPTS(); for (;;);}
+// Instead of defining configASSERT(), include a header that provides a
+// definition that redirects to pw_assert.
+#include "pw_third_party/freertos/config_assert.h"
 
 /* Optional functions - most linkers will remove unused functions anyway. */
 #define INCLUDE_vTaskPrioritySet                1
diff --git a/targets/mimxrt595_evk_freertos/target_docs.rst b/targets/mimxrt595_evk_freertos/target_docs.rst
index 3fd9039..e8a405e 100644
--- a/targets/mimxrt595_evk_freertos/target_docs.rst
+++ b/targets/mimxrt595_evk_freertos/target_docs.rst
@@ -32,7 +32,7 @@
 ``pw_third_party_mcuxpresso_SDK`` to the ``sdk`` source set within the
 Pigweed target directory.
 
-.. code:: sh
+.. code-block:: sh
 
   $ gn args out
   # Modify and save the args file to use the sample SDK.
@@ -42,7 +42,7 @@
 Step 3: Install Freertos Source and Configure Location
 ======================================================
 
-.. code:: sh
+.. code-block:: sh
 
   pw package install freertos
 
@@ -50,9 +50,9 @@
 
 Then add the following line to that text file:
 
-.. code::
+.. code-block::
 
-    dir_pw_third_party_freertos = getenv("PW_PACKAGE_ROOT") + "/freertos"
+   dir_pw_third_party_freertos = getenv("PW_PACKAGE_ROOT") + "/freertos"
 
 --------
 Building
@@ -60,7 +60,7 @@
 Once configured, to build for this Pigweed target, build the top-level
 "mimxrt595_freertos" Ninja target.
 
-.. code:: sh
+.. code-block:: sh
 
   $ ninja -C out mimxrt595_freertos
 
@@ -123,29 +123,29 @@
 it.
 
 .. code-block::
-  :emphasize-lines: 1,6,10,12,20
+   :emphasize-lines: 1,6,10,12,20
 
-  (gdb) target remote :2331
-  Remote debugging using :2331
-  warning: No executable has been specified and target does not support
-  determining executable automatically.  Try using the "file" command.
-  0x08000000 in ?? ()
-  (gdb) file out/mimxrt595_evk_freertos_debug/obj/pw_status/test/status_test.elf
-  A program is being debugged already.
-  Are you sure you want to change the file? (y or n) y
-  Reading symbols from out/mimxrt595_evk_freertos_debug/obj/pw_status/test/status_test.elf...
-  (gdb) monitor reset
-  Resetting target
-  (gdb) load
-  Loading section .flash_config, size 0x200 lma 0x8000400
-  Loading section .vector_table, size 0x168 lma 0x8001000
-  Loading section .code, size 0xb34c lma 0x8001180
-  Loading section .ARM, size 0x8 lma 0x800c4d0
-  Loading section .static_init_ram, size 0x3c8 lma 0x800c4d8
-  Start address 0x080048d0, load size 47748
-  Transfer rate: 15542 KB/sec, 6821 bytes/write.
-  (gdb) monitor reset
-  Resetting target
+   (gdb) target remote :2331
+   Remote debugging using :2331
+   warning: No executable has been specified and target does not support
+   determining executable automatically.  Try using the "file" command.
+   0x08000000 in ?? ()
+   (gdb) file out/mimxrt595_evk_freertos_debug/obj/pw_status/test/status_test.elf
+   A program is being debugged already.
+   Are you sure you want to change the file? (y or n) y
+   Reading symbols from out/mimxrt595_evk_freertos_debug/obj/pw_status/test/status_test.elf...
+   (gdb) monitor reset
+   Resetting target
+   (gdb) load
+   Loading section .flash_config, size 0x200 lma 0x8000400
+   Loading section .vector_table, size 0x168 lma 0x8001000
+   Loading section .code, size 0xb34c lma 0x8001180
+   Loading section .ARM, size 0x8 lma 0x800c4d0
+   Loading section .static_init_ram, size 0x3c8 lma 0x800c4d8
+   Start address 0x080048d0, load size 47748
+   Transfer rate: 15542 KB/sec, 6821 bytes/write.
+   (gdb) monitor reset
+   Resetting target
 
 You can now set any breakpoints you wish, and ``continue`` to run the
 executable.
diff --git a/targets/rp2040/target_docs.rst b/targets/rp2040/target_docs.rst
index 413e9e5..04c77c4 100644
--- a/targets/rp2040/target_docs.rst
+++ b/targets/rp2040/target_docs.rst
@@ -14,7 +14,7 @@
 Pico SDK. This can be downloaded via ``pw package``, and then the build must be
 manually configured to point to the location of the downloaded SDK.
 
-.. code:: sh
+.. code-block:: sh
 
    pw package install pico_sdk
 
@@ -27,7 +27,7 @@
 On linux, you may need to update your udev rules at
 ``/etc/udev/rules.d/49-pico.rules`` to include the following:
 
-.. code:: none
+.. code-block:: none
 
    SUBSYSTEMS=="usb", ATTRS{idVendor}=="2e8a", ATTRS{idProduct}=="0004", MODE:="0666"
    KERNEL=="ttyACM*", ATTRS{idVendor}=="2e8a", ATTRS{idProduct}=="0004", MODE:="0666"
@@ -44,7 +44,7 @@
 Once the pico SDK is configured, the Pi Pico will build as part of the default
 GN build:
 
-.. code:: sh
+.. code-block:: sh
 
    ninja -C out
 
@@ -66,7 +66,7 @@
 To run a test, flash it to the RP2040 and connect to the serial port and then
 press the spacebar to start the test:
 
-.. code:: none
+.. code-block:: none
 
    $ python -m serial.tools.miniterm --raw /dev/ttyACM0 115200
    --- Miniterm on /dev/cu.usbmodem142401  115200,8,N,1 ---
@@ -126,7 +126,7 @@
 it for testing. To override this behavior, provide a custom server configuration
 file with ``--server-config``.
 
-.. code:: sh
+.. code-block:: sh
 
    $ python -m rp2040_utils.unit_test_server
 
@@ -146,7 +146,7 @@
 ``pw_targets_ENABLE_RP2040_TEST_RUNNER`` build arg tells GN to send requests to
 a running ``rp2040_utils.unit_test_server``.
 
-.. code:: sh
+.. code-block:: sh
 
    $ gn args out
    # Modify and save the args file to use pw_target_runner.
diff --git a/targets/rp2040_pw_system/target_docs.rst b/targets/rp2040_pw_system/target_docs.rst
index d48ad7c..4201a70 100644
--- a/targets/rp2040_pw_system/target_docs.rst
+++ b/targets/rp2040_pw_system/target_docs.rst
@@ -23,7 +23,7 @@
 the build must be manually configured to point to the locations the repositories
 were downloaded to.
 
-.. code:: sh
+.. code-block:: sh
 
    pw package install nanopb
    pw package install freertos
@@ -40,13 +40,13 @@
    Instead of the ``gn gen out`` with args set on the command line above you can
    run:
 
-   .. code:: sh
+   .. code-block:: sh
 
       gn args out
 
    Then add the following lines to that text file:
 
-   .. code::
+   .. code-block::
 
       dir_pw_third_party_nanopb = getenv("PW_PACKAGE_ROOT") + "/nanopb"
       dir_pw_third_party_freertos = getenv("PW_PACKAGE_ROOT") + "/freertos"
@@ -60,7 +60,7 @@
 
 **Build**
 
-.. code:: sh
+.. code-block:: sh
 
    ninja -C out pw_system_demo
 
@@ -82,7 +82,7 @@
 
   **Install RaspberryPi's OpenOCD Fork:**
 
-  .. code:: sh
+  .. code-block:: sh
 
      git clone https://github.com/raspberrypi/openocd.git \
        --branch picoprobe \
@@ -99,7 +99,7 @@
 
   **Setup udev rules (Linux only):**
 
-  .. code:: sh
+  .. code-block:: sh
 
      cat <<EOF > 49-picoprobe.rules
      SUBSYSTEMS=="usb", ATTRS{idVendor}=="2e8a", ATTRS{idProduct}=="000[43a]", MODE:="0666"
@@ -110,7 +110,7 @@
 
   **Flash the Pico:**
 
-  .. code:: sh
+  .. code-block:: sh
 
      ~/apps/openocd/bin/openocd -f ~/apps/openocd/share/openocd/scripts/interface/picoprobe.cfg -f ~/apps/openocd/share/openocd/scripts/target/rp2040.cfg -c 'program out/rp2040_pw_system.size_optimized/obj/pw_system/bin/system_example.elf verify reset exit'
 
@@ -119,7 +119,7 @@
 Once the board has been flashed, you can connect to it and send RPC commands
 via the Pigweed console:
 
-.. code:: sh
+.. code-block:: sh
 
    pw-system-console -d /dev/{ttyX} -b 115200 \
      --proto-globs pw_rpc/echo.proto \
@@ -132,7 +132,7 @@
 When the console opens, try sending an Echo RPC request. You should get back
 the same message you sent to the device.
 
-.. code:: pycon
+.. code-block:: pycon
 
    >>> device.rpcs.pw.rpc.EchoService.Echo(msg="Hello, Pigweed!")
    (Status.OK, pw.rpc.EchoMessage(msg='Hello, Pigweed!'))
@@ -140,13 +140,13 @@
 You can also try out our thread snapshot RPC service, which should return a
 stack usage overview of all running threads on the device in Host Logs.
 
-.. code:: pycon
+.. code-block:: pycon
 
    >>> device.snapshot_peak_stack_usage()
 
 Example output:
 
-.. code::
+.. code-block::
 
    20220826 09:47:22  INF  PendingRpc(channel=1, method=pw.thread.ThreadSnapshotService.GetPeakStackUsage) completed: Status.OK
    20220826 09:47:22  INF  Thread State
diff --git a/targets/stm32f429i_disc1/target_docs.rst b/targets/stm32f429i_disc1/target_docs.rst
index fcf6d73..30d6ce5 100644
--- a/targets/stm32f429i_disc1/target_docs.rst
+++ b/targets/stm32f429i_disc1/target_docs.rst
@@ -11,7 +11,7 @@
 To build for this Pigweed target, simply build the top-level "stm32f429i" Ninja
 target.
 
-.. code:: sh
+.. code-block:: sh
 
   $ ninja -C out stm32f429i
 
@@ -27,7 +27,7 @@
 on device, the stm32f429i-disc1 target provides a helper script that flashes the
 test to a device and then runs it.
 
-.. code:: sh
+.. code-block:: sh
 
   # Setup pigweed environment.
   $ source activate.sh
@@ -43,7 +43,7 @@
 the ``--group`` argument. Alternatively, individual test binaries can be
 specified with the ``--test`` option.
 
-.. code:: sh
+.. code-block:: sh
 
   # Setup Pigweed environment.
   $ source activate.sh
@@ -76,7 +76,7 @@
   If you unplug or plug in any boards, you'll need to restart the test server
   for hardware changes to properly be detected.
 
-.. code:: sh
+.. code-block:: sh
 
   $ stm32f429i_disc1_test_server
 
@@ -86,7 +86,7 @@
 ``pw_target_runner`` disabled. Enabling the ``pw_use_test_server`` build arg
 tells GN to send requests to a running ``stm32f429i_disc1_test_server``.
 
-.. code:: sh
+.. code-block:: sh
 
   $ gn args out
   # Modify and save the args file to use pw_target_runner.
@@ -113,20 +113,20 @@
 
 #. Connect OpenOCD to the device in terminal A. Leave this running
 
-   .. code:: sh
+   .. code-block:: sh
 
      $ openocd -f targets/stm32f429i_disc1/py/stm32f429i_disc1_utils/openocd_stm32f4xx.cfg
 
 #. Connect GDB to the running OpenOCD instance in terminal B
 
-   .. code:: sh
+   .. code-block:: sh
 
      $ arm-none-eabi-gdb -ex "target remote :3333" \
        out/stm32f429i_disc1_debug/obj/pw_assert/test/assert_facade_test.elf
 
 #. Flash (``load``), run (``mon reset run; continue``), and debug
 
-   .. code:: none
+   .. code-block:: none
 
      (gdb) set print pretty on
      (gdb) load
@@ -141,13 +141,13 @@
 OpenOCD is a persistent server that you run and leave running to bridge between
 GDB and the device. To run it for the Discovery board:
 
-.. code:: sh
+.. code-block:: sh
 
   $ openocd -f targets/stm32f429i_disc1/py/stm32f429i_disc1_utils/openocd_stm32f4xx.cfg
 
 Typical output:
 
-.. code:: none
+.. code-block:: none
 
   Open On-Chip Debugger 0.10.0+dev-01243-ge41c0f49-dirty (2020-05-21-10:27)
   Licensed under GNU GPL v2
@@ -171,7 +171,7 @@
 Start GDB pointing to the correct .elf file, and tell it to connect to the
 OpenOCD server (running on port 333 by default).
 
-.. code:: sh
+.. code-block:: sh
 
   $ arm-none-eabi-gdb -ex "target remote :3333" \
     out/stm32f429i_disc1_debug/obj/pw_assert/test/assert_facade_test.elf
@@ -179,7 +179,7 @@
 In this case the assert facade test is debugged, but substitute your own ELF
 file. This should produce output similar to the following:
 
-.. code:: none
+.. code-block:: none
 
   GNU gdb (GNU Arm Embedded Toolchain 9-2020-q2-update) 8.3.1.20191211-git
   Copyright (C) 2019 Free Software Foundation, Inc.
@@ -207,13 +207,13 @@
 
 To flash
 
-.. code:: none
+.. code-block:: none
 
   (gdb) load
 
 This will produce output similar to:
 
-.. code:: none
+.. code-block:: none
 
   (gdb) load
   Loading section .vector_table, size 0x10 lma 0x8000000
@@ -225,14 +225,14 @@
 
 To reset the device and halt on the first instruction (before main):
 
-.. code:: none
+.. code-block:: none
 
   (gdb) mon reset run
 
 
 This will produce output similar to:
 
-.. code:: none
+.. code-block:: none
 
   (gdb) mon reset run
   Unable to match requested speed 2000 kHz, using 1800 kHz
diff --git a/targets/stm32f429i_disc1_stm32cube/BUILD.bazel b/targets/stm32f429i_disc1_stm32cube/BUILD.bazel
index 7922f29..e6bdfa4 100644
--- a/targets/stm32f429i_disc1_stm32cube/BUILD.bazel
+++ b/targets/stm32f429i_disc1_stm32cube/BUILD.bazel
@@ -49,9 +49,11 @@
 platform(
     name = "platform",
     constraint_values = [
-        ":freertos_config_cv",
+        "@pw_toolchain//constraints/arm_mcpu:cortex-m4",
         "//pw_build/constraints/rtos:freertos",
+        ":freertos_config_cv",
         "@freertos//:port_ARM_CM4F",
+        "@platforms//cpu:armv7e-m",
     ],
     parents = ["@bazel_embedded//platforms:cortex_m4_fpu"],
 )
diff --git a/targets/stm32f429i_disc1_stm32cube/target_docs.rst b/targets/stm32f429i_disc1_stm32cube/target_docs.rst
index 57d69a8..7ddc806 100644
--- a/targets/stm32f429i_disc1_stm32cube/target_docs.rst
+++ b/targets/stm32f429i_disc1_stm32cube/target_docs.rst
@@ -24,7 +24,7 @@
 ``pw package``, and then the build must be manually configured to point to the
 locations the repositories were downloaded to.
 
-.. code:: sh
+.. code-block:: sh
 
    pw package install nanopb
    pw package install freertos
@@ -41,13 +41,13 @@
    Instead of the ``gn gen out`` with args set on the command line above you can
    run:
 
-   .. code:: sh
+   .. code-block:: sh
 
       gn args out
 
    Then add the following lines to that text file:
 
-   .. code::
+   .. code-block::
 
       dir_pw_third_party_nanopb = getenv("PW_PACKAGE_ROOT") + "/nanopb"
       dir_pw_third_party_freertos = getenv("PW_PACKAGE_ROOT") + "/freertos"
@@ -59,11 +59,11 @@
 This target has an associated demo application that can be built and then
 flashed to a device with the following commands:
 
-.. code:: sh
+.. code-block:: sh
 
    ninja -C out pw_system_demo
 
-.. code:: sh
+.. code-block:: sh
 
    openocd -f targets/stm32f429i_disc1/py/stm32f429i_disc1_utils/openocd_stm32f4xx.cfg \
      -c "program out/stm32f429i_disc1_stm32cube.size_optimized/obj/pw_system/bin/system_example.elf reset exit"
@@ -71,7 +71,7 @@
 Once the board has been flashed, you can connect to it and send RPC commands
 via the Pigweed console:
 
-.. code:: sh
+.. code-block:: sh
 
    pw-system-console -d /dev/{ttyX} -b 115200 \
      --proto-globs pw_rpc/echo.proto \
@@ -84,7 +84,7 @@
 When the console opens, try sending an Echo RPC request. You should get back
 the same message you sent to the device.
 
-.. code:: pycon
+.. code-block:: pycon
 
    >>> device.rpcs.pw.rpc.EchoService.Echo(msg="Hello, Pigweed!")
    (Status.OK, pw.rpc.EchoMessage(msg='Hello, Pigweed!'))
@@ -92,13 +92,13 @@
 You can also try out our thread snapshot RPC service, which should return a
 stack usage overview of all running threads on the device in Host Logs.
 
-.. code:: pycon
+.. code-block:: pycon
 
    >>> device.snapshot_peak_stack_usage()
 
 Example output:
 
-.. code::
+.. code-block::
 
    20220826 09:47:22  INF  PendingRpc(channel=1, method=pw.thread.ThreadSnapshotService.GetPeakStackUsage) completed: Status.OK
    20220826 09:47:22  INF  Thread State
diff --git a/third_party/abseil-cpp/configs/BUILD.gn b/third_party/abseil-cpp/configs/BUILD.gn
index 55a14e2..1ac2454 100644
--- a/third_party/abseil-cpp/configs/BUILD.gn
+++ b/third_party/abseil-cpp/configs/BUILD.gn
@@ -12,6 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_third_party/abseil-cpp/abseil-cpp.gni")
+
 # Targets that include Abseil C++ headers need to include this config.
 config("disabled_warnings") {
   cflags = [ "-Wno-gcc-compat" ]
@@ -19,6 +23,17 @@
 
 # This config should only be used to build the Abseil C++ library itself.
 config("internal_disabled_warnings") {
-  cflags = [ "-Wno-switch-enum" ]
-  configs = [ ":disabled_warnings" ]
+  cflags = [
+    "-Wno-gcc-compat",
+    "-Wno-switch-enum",
+  ]
+}
+
+# Include path for Abseil C++.
+#
+# This is needed as the library is used to build FuzzTest in a dedicated
+# toolchain, and `public_configs` do not propagate across toolchain boundaries
+# by default.
+config("public_include_path") {
+  include_dirs = [ "$dir_pw_third_party_abseil_cpp" ]
 }
diff --git a/third_party/ambiq/BUILD.gn b/third_party/ambiq/BUILD.gn
new file mode 100644
index 0000000..eaaf514
--- /dev/null
+++ b/third_party/ambiq/BUILD.gn
@@ -0,0 +1,216 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
+import("ambiq.gni")
+
+pw_doc_group("docs") {
+  sources = [ "docs.rst" ]
+}
+
+# This file defines a GN source_set for an external installation of Ambiq SDK.
+# To use, checkout the Ambiq Apollo4 SDK source into a directory, then set
+# the build arg dir_pw_third_party_ambiq_SDK to point to that directory. The
+# Ambiq Apollo 4 SDK library will be available in GN
+# at "$dir_pw_third_party/apollo4".
+if (dir_pw_third_party_ambiq_SDK != "") {
+  config("apollo4p_sdk_defines") {
+    defines = [
+      "AM_PART_APOLLO4P=1",
+      "apollo4p_evb=1",
+      "gcc=1",
+      "AM_PACKAGE_BGA=1",
+    ]
+    visibility = [ ":*" ]
+  }
+
+  config("apollo4b_sdk_defines") {
+    defines = [
+      "AM_PART_APOLLO4B=1",
+      "apollo4b_evb=1",
+      "gcc=1",
+      "AM_PACKAGE_BGA=1",
+    ]
+    visibility = [ ":*" ]
+  }
+
+  config("disable_warnings") {
+    cflags = [
+      "-Wno-sign-compare",
+      "-Wno-unused-parameter",
+      "-Wno-cast-qual",
+      "-Wno-shadow",
+      "-Wno-implicit-fallthrough",
+      "-Wno-maybe-uninitialized",
+    ]
+    cflags_c = [ "-Wno-old-style-declaration" ]
+    visibility = [ ":*" ]
+  }
+
+  config("apollo4_include_dirs") {
+    include_dirs = [
+      "$dir_pw_third_party_ambiq_SDK/devices",
+      "$dir_pw_third_party_ambiq_SDK/utils",
+      "$dir_pw_third_party_ambiq_SDK/CMSIS/ARM/Include",
+      "$dir_pw_third_party_ambiq_SDK/CMSIS/AmbiqMicro/Include",
+    ]
+  }
+
+  config("apollo4p_include_dirs") {
+    include_dirs = [
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p",
+      "$dir_pw_third_party_ambiq_SDK/boards/apollo4p_evb/bsp",
+    ]
+  }
+
+  config("apollo4b_include_dirs") {
+    include_dirs = [
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b",
+      "$dir_pw_third_party_ambiq_SDK/boards/apollo4b_evb/bsp",
+    ]
+  }
+
+  pw_source_set("apollo4p") {
+    remove_configs = [ "$dir_pw_build:extra_strict_warnings" ]
+
+    public_configs = [
+      ":disable_warnings",
+      ":apollo4_include_dirs",
+      ":apollo4p_include_dirs",
+      ":apollo4p_sdk_defines",
+    ]
+
+    sources = [
+      "$dir_pw_third_party_ambiq_SDK/boards/apollo4p_evb/bsp/am_bsp.c",
+      "$dir_pw_third_party_ambiq_SDK/boards/apollo4p_evb/bsp/am_bsp_pins.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_access.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_adc.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_audadc.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_dcu.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_global.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_gpio.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_i2s.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_otp.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_pdm.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_pin.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_pwrctrl.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_queue.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_security.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_stimer.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_timer.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_usb.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/am_hal_wdt.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_bootrom_helper.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_cachectrl.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_card.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_card_host.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_clkgen.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_cmdq.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_dsi.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_fault.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_interrupt.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_iom.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_ios.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_itm.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_itm.h",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_mcuctrl.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_mpu.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_mram.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_mspi.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_reset.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_rtc.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_sdhc.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_secure_ota.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_sysctrl.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_systick.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_tpiu.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_uart.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4p/hal/mcu/am_hal_utils.c",
+    ]
+  }
+
+  pw_source_set("apollo4b") {
+    remove_configs = [ "$dir_pw_build:extra_strict_warnings" ]
+
+    public_configs = [
+      ":disable_warnings",
+      ":apollo4_include_dirs",
+      ":apollo4b_include_dirs",
+      ":apollo4b_sdk_defines",
+    ]
+
+    sources = [
+      "$dir_pw_third_party_ambiq_SDK/boards/apollo4b_evb/bsp/am_bsp.c",
+      "$dir_pw_third_party_ambiq_SDK/boards/apollo4b_evb/bsp/am_bsp_pins.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_access.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_adc.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_audadc.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_dcu.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_global.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_gpio.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_i2s.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_otp.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_pdm.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_pin.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_pwrctrl.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_queue.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_security.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_stimer.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_timer.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_usb.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/am_hal_wdt.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_bootrom_helper.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_cachectrl.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_card.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_card_host.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_clkgen.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_cmdq.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_dsi.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_fault.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_interrupt.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_iom.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_ios.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_itm.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_itm.h",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_mcuctrl.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_mpu.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_mram.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_mspi.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_reset.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_rtc.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_sdhc.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_secure_ota.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_sysctrl.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_systick.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_tpiu.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_uart.c",
+      "$dir_pw_third_party_ambiq_SDK/mcu/apollo4b/hal/mcu/am_hal_utils.c",
+    ]
+  }
+
+  pw_source_set("sdk") {
+    if (pw_third_party_ambiq_PRODUCT == "apollo4p") {
+      public_deps = [ ":apollo4p" ]
+    } else if (pw_third_party_ambiq_PRODUCT == "apollo4b") {
+      public_deps = [ ":apollo4b" ]
+    }
+  }
+}
diff --git a/third_party/ambiq/ambiq.gni b/third_party/ambiq/ambiq.gni
new file mode 100644
index 0000000..c0db57e
--- /dev/null
+++ b/third_party/ambiq/ambiq.gni
@@ -0,0 +1,24 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+import("//build_overrides/pigweed.gni")
+
+declare_args() {
+  # If compiling a project against an Ambiq Apollo SDK, this variable is set to the path to the
+  # Ambiq Suite SDK directory.
+  dir_pw_third_party_ambiq_SDK = ""
+
+  # The Product specified in as much detail as possible.
+  # i.e. "apollo4p", "apollo4b", etc.
+  pw_third_party_ambiq_PRODUCT = ""
+}
diff --git a/third_party/boringssl/docs.rst b/third_party/boringssl/docs.rst
index f2442f5..7e16e2c 100644
--- a/third_party/boringssl/docs.rst
+++ b/third_party/boringssl/docs.rst
@@ -49,7 +49,7 @@
 
      .. code-block::
 
-       dir_pw_third_party_boringssl = "//third_party/boringssl/src"
+        dir_pw_third_party_boringssl = "//third_party/boringssl/src"
 
 #. Having a non-empty ``dir_pw_third_party_boringssl`` variable causes GN to
    attempt to include the ``BUILD.generated.gni`` file from the sources even
diff --git a/third_party/chre/BUILD.bazel b/third_party/chre/BUILD.bazel
new file mode 100644
index 0000000..26b708c
--- /dev/null
+++ b/third_party/chre/BUILD.bazel
@@ -0,0 +1,24 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+filegroup(
+    name = "chre",
+    srcs = [
+        "integration_test.cc",
+    ],
+)
diff --git a/third_party/chre/BUILD.gn b/third_party/chre/BUILD.gn
new file mode 100644
index 0000000..efb2889
--- /dev/null
+++ b/third_party/chre/BUILD.gn
@@ -0,0 +1,188 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+import("$dir_pw_build/error.gni")
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_chrono/backend.gni")
+import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_unit_test/test.gni")
+import("chre.gni")
+
+# This file defines a GN source_set for an external installation of chre.
+# To use, checkout the chre source into a directory, then set the build arg
+# dir_pw_third_party_chre to point to that directory. The chre library
+# will be available in GN at "$dir_pw_third_party/chre".
+if (dir_pw_third_party_chre == "") {
+} else {
+  config("disable_warnings") {
+    cflags = [
+      "-Wno-cast-qual",
+      "-Wno-int-in-bool-context",
+      "-Wno-thread-safety-analysis",
+    ]
+
+    # TODO(b/294106526): Fill out the rest of the implementation and remove this.
+    cflags += [ "-Wno-unused-parameter" ]
+    visibility = [ ":*" ]
+  }
+
+  config("default_chre_config_defines") {
+    cflags = [
+      "-DCHRE_MESSAGE_TO_HOST_MAX_SIZE=2048",
+      "-DCHRE_MINIMUM_LOG_LEVEL=CHRE_LOG_LEVEL_DEBUG",
+      "-DCHRE_ASSERTIONS_DISABLED",
+      "-DCHRE_FILENAME=__FILE__",
+      "-DCHRE_PATCH_VERSION=1",
+      "-DCHRE_PLATFORM_ID=1",
+      "-DCHRE_FIRST_SUPPORTED_API_VERSION=CHRE_API_VERSION_1_1",
+      "-DCHRE_VARIANT_SUPPLIES_STATIC_NANOAPP_LIST",
+      "-DCHRE_NANOAPP_INTERNAL",
+    ]
+  }
+
+  pw_source_set("default_chre_config") {
+    public_configs = [ ":default_chre_config_defines" ]
+  }
+
+  pw_source_set("config") {
+    public_deps = [ pw_chre_CONFIG ]
+  }
+
+  config("public_includes") {
+    include_dirs = [
+      "$dir_pw_third_party_chre/core/include",
+      "$dir_pw_third_party_chre/chre_api/include",
+      "$dir_pw_third_party_chre/chre_api/include/chre_api",
+      "$dir_pw_third_party_chre/pal/include",
+      "$dir_pw_third_party_chre/platform/include",
+      "$dir_pw_third_party_chre/platform/shared/include",
+      "$dir_pw_third_party_chre/util/include",
+      "$dir_pw_third_party_chre/apps/include",
+    ]
+    visibility = [ ":*" ]
+  }
+
+  pw_source_set("chre_headers") {
+    public_configs = [ ":public_includes" ]
+    public_deps = [ ":config" ]
+  }
+
+  pw_source_set("chre") {
+    public_configs = [ ":disable_warnings" ]
+    sources = [
+      "$dir_pw_third_party_chre/core/debug_dump_manager.cc",
+      "$dir_pw_third_party_chre/core/event.cc",
+      "$dir_pw_third_party_chre/core/event_loop.cc",
+      "$dir_pw_third_party_chre/core/event_loop_manager.cc",
+      "$dir_pw_third_party_chre/core/event_ref_queue.cc",
+      "$dir_pw_third_party_chre/core/host_comms_manager.cc",
+      "$dir_pw_third_party_chre/core/host_notifications.cc",
+      "$dir_pw_third_party_chre/core/init.cc",
+      "$dir_pw_third_party_chre/core/log.cc",
+      "$dir_pw_third_party_chre/core/nanoapp.cc",
+      "$dir_pw_third_party_chre/core/settings.cc",
+      "$dir_pw_third_party_chre/core/static_nanoapps.cc",
+      "$dir_pw_third_party_chre/core/timer_pool.cc",
+      "$dir_pw_third_party_chre/platform/shared/assert.cc",
+      "$dir_pw_third_party_chre/platform/shared/chre_api_core.cc",
+      "$dir_pw_third_party_chre/platform/shared/chre_api_re.cc",
+      "$dir_pw_third_party_chre/platform/shared/chre_api_user_settings.cc",
+      "$dir_pw_third_party_chre/platform/shared/chre_api_version.cc",
+      "$dir_pw_third_party_chre/platform/shared/memory_manager.cc",
+      "$dir_pw_third_party_chre/platform/shared/nanoapp/nanoapp_dso_util.cc",
+      "$dir_pw_third_party_chre/platform/shared/pal_system_api.cc",
+      "$dir_pw_third_party_chre/platform/shared/system_time.cc",
+      "$dir_pw_third_party_chre/platform/shared/version.cc",
+      "$dir_pw_third_party_chre/util/buffer_base.cc",
+      "$dir_pw_third_party_chre/util/dynamic_vector_base.cc",
+      "$dir_pw_third_party_chre/util/nanoapp/audio.cc",
+      "$dir_pw_third_party_chre/util/nanoapp/callbacks.cc",
+      "$dir_pw_third_party_chre/util/nanoapp/debug.cc",
+      "$dir_pw_third_party_chre/util/nanoapp/wifi.cc",
+      "$dir_pw_third_party_chre/util/system/debug_dump.cc",
+    ]
+
+    public_deps = [
+      ":chre_headers",
+      "$pw_chre_PLATFORM_BACKEND",
+    ]
+    remove_configs = [ "$dir_pw_build:internal_strict_warnings" ]
+  }
+
+  pw_source_set("example_apps") {
+    sources = [
+      "$dir_pw_third_party_chre/apps/debug_dump_world/debug_dump_world.cc",
+      "$dir_pw_third_party_chre/apps/hello_world/hello_world.cc",
+      "$dir_pw_third_party_chre/apps/message_world/message_world.cc",
+      "$dir_pw_third_party_chre/apps/spammer/spammer.cc",
+      "$dir_pw_third_party_chre/apps/timer_world/timer_world.cc",
+      "$dir_pw_third_party_chre/apps/unload_tester/unload_tester.cc",
+    ]
+    public_deps = [ ":chre" ]
+  }
+
+  config("test_includes") {
+    include_dirs = [
+      "$dir_pw_third_party_chre/platform/shared",
+      "$dir_pw_third_party_chre/test/simulation/inc",
+    ]
+    visibility = [ ":*" ]
+  }
+
+  config("tests_disable_warnings") {
+    cflags = [ "-Wno-sign-compare" ]
+    visibility = [ ":*" ]
+  }
+
+  pw_test("unit_tests") {
+    enable_if =
+        dir_pw_third_party_chre != "" && pw_chrono_SYSTEM_CLOCK_BACKEND != ""
+    sources = [
+      "$dir_pw_third_party_chre/util/tests/blocking_queue_test.cc",
+      "$dir_pw_third_party_chre/util/tests/buffer_test.cc",
+      "$dir_pw_third_party_chre/util/tests/conditional_lock_guard_test.cc",
+      "$dir_pw_third_party_chre/util/tests/debug_dump_test.cc",
+      "$dir_pw_third_party_chre/util/tests/lock_guard_test.cc",
+      "$dir_pw_third_party_chre/util/tests/memory_pool_test.cc",
+      "$dir_pw_third_party_chre/util/tests/optional_test.cc",
+      "$dir_pw_third_party_chre/util/tests/ref_base_test.cc",
+      "$dir_pw_third_party_chre/util/tests/shared_ptr_test.cc",
+      "$dir_pw_third_party_chre/util/tests/singleton_test.cc",
+      "$dir_pw_third_party_chre/util/tests/time_test.cc",
+      "$dir_pw_third_party_chre/util/tests/unique_ptr_test.cc",
+    ]
+    public_deps = [ ":chre" ]
+    public_configs = [ ":tests_disable_warnings" ]
+    remove_configs = [ "$dir_pw_build:internal_strict_warnings" ]
+  }
+
+  pw_test("integration_tests") {
+    enable_if =
+        dir_pw_third_party_chre != "" && pw_chrono_SYSTEM_CLOCK_BACKEND != ""
+    sources = [
+      "$dir_pw_third_party_chre/test/simulation/memory_test.cc",
+      "$dir_pw_third_party_chre/test/simulation/test_util.cc",
+      "$dir_pw_third_party_chre/test/simulation/timer_test.cc",
+      "integration_test.cc",
+    ]
+
+    public_deps = [ ":chre" ]
+    public_configs = [
+      ":test_includes",
+      ":tests_disable_warnings",
+    ]
+    remove_configs = [ "$dir_pw_build:internal_strict_warnings" ]
+  }
+}
diff --git a/third_party/chre/chre.gni b/third_party/chre/chre.gni
new file mode 100644
index 0000000..d4695d0
--- /dev/null
+++ b/third_party/chre/chre.gni
@@ -0,0 +1,26 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+declare_args() {
+  # If compiling backends with chre, this variable is set to the path to the
+  # chre installation. When set, a pw_source_set for the chre library is
+  # created at "$dir_pw_third_party/chre".
+  dir_pw_third_party_chre = ""
+
+  # The configuration for building CHRE.
+  pw_chre_CONFIG = "//third_party/chre:default_chre_config"
+
+  # CHRE's platform backend. The default is the pigweed backend.
+  pw_chre_PLATFORM_BACKEND = "//pw_chre:chre_backend"
+}
diff --git a/third_party/chre/integration_test.cc b/third_party/chre/integration_test.cc
new file mode 100644
index 0000000..acbaaec
--- /dev/null
+++ b/third_party/chre/integration_test.cc
@@ -0,0 +1,118 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <chrono>
+#include <functional>
+#include <thread>
+
+#include "chre/core/event_loop_manager.h"
+#include "chre/core/init.h"
+#include "chre/platform/memory.h"
+#include "chre/platform/system_timer.h"
+#include "chre/util/fixed_size_blocking_queue.h"
+#include "chre/util/memory.h"
+#include "chre/util/non_copyable.h"
+#include "chre/util/singleton.h"
+#include "chre/util/time.h"
+#include "chre_api/chre/version.h"
+#include "gtest/gtest.h"
+#include "test_base.h"
+#include "test_util.h"
+
+namespace chre {
+
+void TestBase::SetUp() {
+  TestEventQueueSingleton::init();
+  chre::init();
+  EventLoopManagerSingleton::get()->lateInit();
+
+  mChreThread = std::thread(
+      []() { EventLoopManagerSingleton::get()->getEventLoop().run(); });
+
+  auto callback = [](void*) {
+    LOGE("Test timed out ...");
+    TestEventQueueSingleton::get()->pushEvent(
+        CHRE_EVENT_SIMULATION_TEST_TIMEOUT);
+  };
+
+  ASSERT_TRUE(mSystemTimer.init());
+  ASSERT_TRUE(mSystemTimer.set(
+      callback, nullptr /*data*/, Nanoseconds(getTimeoutNs())));
+}
+
+void TestBase::TearDown() {
+  mSystemTimer.cancel();
+  TestEventQueueSingleton::get()->flush();
+  EventLoopManagerSingleton::get()->getEventLoop().stop();
+  mChreThread.join();
+  chre::deinit();
+  TestEventQueueSingleton::deinit();
+  deleteNanoappInfos();
+}
+
+template class Singleton<TestEventQueue>;
+
+TEST_F(TestBase, CanLoadAndStartSingleNanoapp) {
+  constexpr uint64_t kAppId = 0x0123456789abcdef;
+  constexpr uint32_t kAppVersion = 0;
+  constexpr uint32_t kAppPerms = 0;
+
+  UniquePtr<Nanoapp> nanoapp = createStaticNanoapp("Test nanoapp",
+                                                   kAppId,
+                                                   kAppVersion,
+                                                   kAppPerms,
+                                                   defaultNanoappStart,
+                                                   defaultNanoappHandleEvent,
+                                                   defaultNanoappEnd);
+
+  EventLoopManagerSingleton::get()->deferCallback(
+      SystemCallbackType::FinishLoadingNanoapp,
+      std::move(nanoapp),
+      testFinishLoadingNanoappCallback);
+  waitForEvent(CHRE_EVENT_SIMULATION_TEST_NANOAPP_LOADED);
+}
+
+TEST_F(TestBase, CanLoadAndStartMultipleNanoapps) {
+  constexpr uint64_t kAppId1 = 0x123;
+  constexpr uint64_t kAppId2 = 0x456;
+  constexpr uint32_t kAppVersion = 0;
+  constexpr uint32_t kAppPerms = 0;
+  loadNanoapp("Test nanoapp",
+              kAppId1,
+              kAppVersion,
+              kAppPerms,
+              defaultNanoappStart,
+              defaultNanoappHandleEvent,
+              defaultNanoappEnd);
+
+  loadNanoapp("Test nanoapp",
+              kAppId2,
+              kAppVersion,
+              kAppPerms,
+              defaultNanoappStart,
+              defaultNanoappHandleEvent,
+              defaultNanoappEnd);
+
+  uint16_t id1;
+  EXPECT_TRUE(EventLoopManagerSingleton::get()
+                  ->getEventLoop()
+                  .findNanoappInstanceIdByAppId(kAppId1, &id1));
+  uint16_t id2;
+  EXPECT_TRUE(EventLoopManagerSingleton::get()
+                  ->getEventLoop()
+                  .findNanoappInstanceIdByAppId(kAppId2, &id2));
+
+  EXPECT_NE(id1, id2);
+}
+}  // namespace chre
diff --git a/third_party/emboss/BUILD.gn b/third_party/emboss/BUILD.gn
index 8a4d4f7..21b8ba5 100644
--- a/third_party/emboss/BUILD.gn
+++ b/third_party/emboss/BUILD.gn
@@ -14,6 +14,7 @@
 
 import("//build_overrides/pigweed.gni")
 import("$dir_pw_build/python.gni")
+import("$dir_pw_build/target_types.gni")
 
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_third_party/emboss/emboss.gni")
@@ -23,19 +24,35 @@
   # EMBOSS_CHECK is used to check preconditions on application logic (e.g.
   # Write() checks the [requires: ...] attribute).
   defines = [
-    "EMBOSS_CHECK=PW_DCHECK",
+    "EMBOSS_CHECK=PW_DASSERT",
     "EMBOSS_CHECK_ABORTS",
-    "EMBOSS_DCHECK=PW_DCHECK",
+    "EMBOSS_DCHECK=PW_DASSERT",
     "EMBOSS_DCHECK_ABORTS",
   ]
+  cflags_cc = [
+    "-include",
+    rebase_path("$dir_pw_assert/public/pw_assert/assert.h", root_build_dir),
+  ]
 }
 
-source_set("cpp_utils") {
+pw_source_set("default_overrides") {
+  public_configs = [ ":default_config" ]
+  public_deps = [ "$dir_pw_assert" ]
+}
+
+config("disable_warnings") {
+  cflags_cc = [
+    "-Wno-unused-parameter",
+    "-Wno-deprecated-copy",
+    "-Wno-format-invalid-specifier",
+  ]
+}
+
+pw_source_set("cpp_utils") {
   # emboss depends on a separate checkout not included in pigweed, so
   # ignore gn check for this module.
   check_includes = false
-  public_configs = [ pw_third_party_emboss_CONFIG ]
-  sources = [
+  public = [
     "$dir_pw_third_party_emboss/runtime/cpp/emboss_arithmetic.h",
     "$dir_pw_third_party_emboss/runtime/cpp/emboss_arithmetic_all_known_generated.h",
     "$dir_pw_third_party_emboss/runtime/cpp/emboss_arithmetic_maximum_operation_generated.h",
@@ -52,6 +69,9 @@
     "$dir_pw_third_party_emboss/runtime/cpp/emboss_text_util.h",
     "$dir_pw_third_party_emboss/runtime/cpp/emboss_view_parameters.h",
   ]
+  public_configs = [ ":disable_warnings" ]
+  public_deps = [ pw_third_party_emboss_CONFIG ]
+  visibility = [ "*" ]
 }
 
 # Exists solely to satisfy presubmit. embossc_runner.py is used for real in
@@ -65,10 +85,3 @@
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
 }
-
-# Flags that are needed to compile targets that depend on Emboss.
-# TODO(benlawson): Fix Emboss upstream so this can be removed
-# (https://github.com/google/emboss/issues/69)
-config("flags") {
-  cflags = [ "-Wno-unused-parameter" ]
-}
diff --git a/third_party/emboss/build_defs.gni b/third_party/emboss/build_defs.gni
index 3e02b2a..e5ca204 100644
--- a/third_party/emboss/build_defs.gni
+++ b/third_party/emboss/build_defs.gni
@@ -72,7 +72,7 @@
   default_import_dir = get_path_info(default_import_dir, "dir")
   default_import_dir = rebase_path(default_import_dir, root_build_dir)
 
-  action(target_name + "_header") {
+  action("${target_name}_header") {
     script = "$dir_pw_third_party/emboss/embossc_runner.py"
 
     args = [
@@ -141,27 +141,28 @@
     outputs = [ compiled_header_path ]
   }
 
-  config(target_name + "_emboss_config") {
+  config("${target_name}_emboss_config") {
     include_dirs = [
       "$dir_pw_third_party_emboss",
       root_gen_dir,
     ]
   }
 
+  # Since the emboss_cc_library template is used in non-Pigweed environments, this target is not pw_source_set,
+  # which restricts visibility by default.
   source_set(target_name) {
     forward_variables_from(invoker, "*")
 
     sources = [ compiled_header_path ]
 
-    if (!defined(invoker.deps)) {
-      deps = []
-    }
-    deps += [ "$dir_pw_third_party/emboss:cpp_utils" ]
-    public_deps = [ ":" + target_name + "_header" ]
+    public_deps = [
+      ":${target_name}_header",
+      "$dir_pw_third_party/emboss:cpp_utils",
+    ]
 
     if (!defined(invoker.public_configs)) {
       public_configs = []
     }
-    public_configs += [ ":" + target_name + "_emboss_config" ]
+    public_configs += [ ":${target_name}_emboss_config" ]
   }
 }
diff --git a/third_party/emboss/docs.rst b/third_party/emboss/docs.rst
index 1cabc8a..478a79a 100644
--- a/third_party/emboss/docs.rst
+++ b/third_party/emboss/docs.rst
@@ -34,8 +34,9 @@
 Optionally, configure the Emboss defines documented at
 `dir_pw_third_party_emboss/runtime/cpp/emboss_defines.h
 <https://github.com/google/emboss/blob/master/runtime/cpp/emboss_defines.h>`_
-by setting the ``pw_third_party_emboss_CONFIG`` variable to a config that
-overrides the defines. By default, checks will use PW_DCHECK.
+by setting the ``pw_third_party_emboss_CONFIG`` variable to a source set that
+includes a public config overriding the defines. By default, checks will
+use PW_DASSERT.
 
 ..
    inclusive-language: enable
@@ -48,11 +49,11 @@
 
 .. code-block::
 
-  import("$dir_pw_third_party/emboss/build_defs.gni")
+   import("$dir_pw_third_party/emboss/build_defs.gni")
 
-  emboss_cc_library("protocol") {
-    source = "my-protocol.emb"
-  }
+   emboss_cc_library("protocol") {
+     source = "my-protocol.emb"
+   }
 
 This generates a source set of the same name as the target, in this case "protocol".
 To use the bindings, list this target as a dependency in GN and include the generated
@@ -60,4 +61,5 @@
 
 .. code-block::
 
-  #include <some/path/to/protocol.emb.h>
+   #include <some/path/to/protocol.emb.h>
+
diff --git a/third_party/emboss/emboss.gni b/third_party/emboss/emboss.gni
index 93d8396..a9c52ec 100644
--- a/third_party/emboss/emboss.gni
+++ b/third_party/emboss/emboss.gni
@@ -19,7 +19,7 @@
   # source code.
   dir_pw_third_party_emboss = ""
 
-  # config target for overriding Emboss defines (e.g. EMBOSS_CHECK).
+  # target for overriding Emboss defines (e.g. EMBOSS_CHECK).
   pw_third_party_emboss_CONFIG =
-      "$dir_pigweed/third_party/emboss:default_config"
+      "$dir_pigweed/third_party/emboss:default_overrides"
 }
diff --git a/third_party/freertos/docs.rst b/third_party/freertos/docs.rst
index 0079f24..d923686 100644
--- a/third_party/freertos/docs.rst
+++ b/third_party/freertos/docs.rst
@@ -41,8 +41,13 @@
 
 Bazel
 =====
+.. There's a bug in the Bazel docs site which is causing the link to the evergreen
+.. section on constraint settings to 404. So for now, we'll just link to the
+.. v5.4.0 doc on constraint settings. When the Bazel bug is fixed we can return the
+.. URL to https://bazel.build/reference/be/platform#constraint_setting
+
 In Bazel, the FreeRTOS build is configured through `constraint_settings
-<https://bazel.build/reference/be/platform#constraint_setting>`_. The `platform
+<https://docs.bazel.build/versions/5.4.0/be/platform.html#constraint_setting>`_. The `platform
 <https://bazel.build/extending/platforms>`_ you are building for must specify
 values for the following settings:
 
diff --git a/third_party/freertos/public/pw_third_party/freertos/config_assert.h b/third_party/freertos/public/pw_third_party/freertos/config_assert.h
index 11d8048..f7123df 100644
--- a/third_party/freertos/public/pw_third_party/freertos/config_assert.h
+++ b/third_party/freertos/public/pw_third_party/freertos/config_assert.h
@@ -29,5 +29,5 @@
 #endif  // PW_THIRD_PARTY_FREERTOS_NO_STATICS == 1
 
 #ifdef __cplusplus
-}  // extern "C++"
+}       // extern "C++"
 #endif  // __cplusplus
diff --git a/third_party/fuchsia/BUILD.gn b/third_party/fuchsia/BUILD.gn
index 2773a80..48ac33b 100644
--- a/third_party/fuchsia/BUILD.gn
+++ b/third_party/fuchsia/BUILD.gn
@@ -18,39 +18,14 @@
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+import("fuchsia.gni")
 
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
 }
 
-config("fit_public_include_path") {
-  include_dirs = [ "repo/sdk/lib/fit/include" ]
-  visibility = [ ":*" ]
-}
-
-config("stdcompat_public_include_path") {
-  include_dirs = [ "repo/sdk/lib/stdcompat/include" ]
-  visibility = [ ":*" ]
-}
-
-pw_source_set("fit") {
-  public_configs = [ ":fit_public_include_path" ]
-  public_deps = [
-    ":stdcompat",
-    dir_pw_assert,
-  ]
-  public = [
-    "repo/sdk/lib/fit/include/lib/fit/function.h",
-    "repo/sdk/lib/fit/include/lib/fit/nullable.h",
-    "repo/sdk/lib/fit/include/lib/fit/result.h",
-    "repo/sdk/lib/fit/include/lib/fit/traits.h",
-  ]
-  sources = [
-    "repo/sdk/lib/fit/include/lib/fit/internal/compiler.h",
-    "repo/sdk/lib/fit/include/lib/fit/internal/function.h",
-    "repo/sdk/lib/fit/include/lib/fit/internal/result.h",
-    "repo/sdk/lib/fit/include/lib/fit/internal/utility.h",
-  ]
+group("fit") {
+  public_deps = [ "$dir_pw_third_party_fuchsia/sdk/lib/fit" ]
 }
 
 pw_test("function_tests") {
@@ -64,25 +39,8 @@
   remove_configs = [ "$dir_pw_build:strict_warnings" ]
 }
 
-pw_source_set("stdcompat") {
-  public_configs = [ ":stdcompat_public_include_path" ]
-  public = [
-    "repo/sdk/lib/stdcompat/include/lib/stdcompat/bit.h",
-    "repo/sdk/lib/stdcompat/include/lib/stdcompat/functional.h",
-    "repo/sdk/lib/stdcompat/include/lib/stdcompat/memory.h",
-    "repo/sdk/lib/stdcompat/include/lib/stdcompat/optional.h",
-    "repo/sdk/lib/stdcompat/include/lib/stdcompat/type_traits.h",
-    "repo/sdk/lib/stdcompat/include/lib/stdcompat/utility.h",
-    "repo/sdk/lib/stdcompat/include/lib/stdcompat/version.h",
-  ]
-  sources = [
-    "repo/sdk/lib/stdcompat/include/lib/stdcompat/internal/bit.h",
-    "repo/sdk/lib/stdcompat/include/lib/stdcompat/internal/constructors.h",
-    "repo/sdk/lib/stdcompat/include/lib/stdcompat/internal/exception.h",
-    "repo/sdk/lib/stdcompat/include/lib/stdcompat/internal/storage.h",
-    "repo/sdk/lib/stdcompat/include/lib/stdcompat/internal/type_traits.h",
-    "repo/sdk/lib/stdcompat/include/lib/stdcompat/internal/utility.h",
-  ]
+group("stdcompat") {
+  public_deps = [ "$dir_pw_third_party_fuchsia/sdk/lib/stdcompat" ]
 }
 
 pw_python_script("generate_fuchsia_patch") {
diff --git a/third_party/fuchsia/copy.bara.sky b/third_party/fuchsia/copy.bara.sky
index 234d844..be7a2a7 100644
--- a/third_party/fuchsia/copy.bara.sky
+++ b/third_party/fuchsia/copy.bara.sky
@@ -61,7 +61,8 @@
         checker = leakr.disable_check("Syncing between OSS projects"),
     ),
     origin_files = glob(fuchsia_repo_files),
-    destination_files = glob(["third_party/fuchsia/repo/**"]),
+    # Exclude BUILD.gn files to keep Pigweed's versions.
+    destination_files = glob(["third_party/fuchsia/repo/**"], exclude = ["**/BUILD.gn"]),
     authoring = authoring.pass_thru("Fuchsia Authors <noreply@google.com>"),
     transformations = [
         core.move("", "third_party/fuchsia/repo"),
diff --git a/third_party/fuchsia/fuchsia.gni b/third_party/fuchsia/fuchsia.gni
new file mode 100644
index 0000000..3f47a22
--- /dev/null
+++ b/third_party/fuchsia/fuchsia.gni
@@ -0,0 +1,21 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+declare_args() {
+  # Path to the Fuchsia sources to use in Pigweed's build. Defaults to Pigweed's
+  # mirror of the few Fuchsia source files it uses.
+  dir_pw_third_party_fuchsia = "$dir_pw_third_party/fuchsia/repo"
+}
diff --git a/third_party/fuchsia/repo/sdk/lib/fit/BUILD.gn b/third_party/fuchsia/repo/sdk/lib/fit/BUILD.gn
new file mode 100644
index 0000000..e789cda
--- /dev/null
+++ b/third_party/fuchsia/repo/sdk/lib/fit/BUILD.gn
@@ -0,0 +1,45 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# This build file is defined by Pigweed, not Fuchsia. It only refers to sources
+# imported into the Pigweed repo.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+config("public_include_path") {
+  include_dirs = [ "include" ]
+  visibility = [ ":*" ]
+}
+
+pw_source_set("fit") {
+  public_configs = [ ":public_include_path" ]
+  public_deps = [
+    "$dir_pw_third_party/fuchsia:stdcompat",
+    dir_pw_assert,
+  ]
+  public = [
+    "include/lib/fit/function.h",
+    "include/lib/fit/nullable.h",
+    "include/lib/fit/result.h",
+    "include/lib/fit/traits.h",
+  ]
+  sources = [
+    "include/lib/fit/internal/compiler.h",
+    "include/lib/fit/internal/function.h",
+    "include/lib/fit/internal/result.h",
+    "include/lib/fit/internal/utility.h",
+  ]
+}
diff --git a/third_party/fuchsia/repo/sdk/lib/fit/include/lib/fit/function.h b/third_party/fuchsia/repo/sdk/lib/fit/include/lib/fit/function.h
index 0c91f54..2403dee 100644
--- a/third_party/fuchsia/repo/sdk/lib/fit/include/lib/fit/function.h
+++ b/third_party/fuchsia/repo/sdk/lib/fit/include/lib/fit/function.h
@@ -5,6 +5,8 @@
 #ifndef LIB_FIT_FUNCTION_H_
 #define LIB_FIT_FUNCTION_H_
 
+#include <cstddef>
+#include <memory>
 #include <type_traits>
 
 #include "internal/function.h"
@@ -13,14 +15,14 @@
 
 namespace fit {
 
-template <size_t inline_target_size, bool require_inline, typename FunctionType>
+template <size_t inline_target_size, bool require_inline, typename FunctionType, typename Allocator>
 class function_impl {
   static_assert(std::is_function<FunctionType>::value,
                 "fit::function must be instantiated with a function type, such as void() or "
                 "int(char*, bool)");
 };
 
-template <size_t inline_target_size, bool require_inline, typename FunctionType>
+template <size_t inline_target_size, bool require_inline, typename FunctionType, typename Allocator>
 class callback_impl {
   static_assert(std::is_function<FunctionType>::value,
                 "fit::callback must be instantiated with a function type, such as void() or "
@@ -33,6 +35,10 @@
 // function.
 constexpr size_t default_inline_target_size = sizeof(void*) * 2;
 
+// The default allocator used for allocating callables on the heap. Its `value_type` is irrelevant,
+// since it must support rebinding.
+using default_callable_allocator = std::allocator<std::byte>;
+
 // A |fit::function| is a move-only polymorphic function wrapper.
 //
 // If you need a class with similar characteristics that also ensures
@@ -61,6 +67,9 @@
 // fit within a function without requiring heap allocation.
 // Defaults to |default_inline_target_size|.
 //
+// |Allocator| is the Allocator used for heap allocation, if required. Its `value_type` is
+// irrelevant, since it must support rebinding.
+//
 // Class members are documented in |fit::function_impl|, below.
 //
 // EXAMPLES
@@ -70,9 +79,10 @@
 // -
 // https://fuchsia.googlesource.com/fuchsia/+/HEAD/sdk/lib/fit/test/examples/function_example2.cc
 //
-template <typename T, size_t inline_target_size = default_inline_target_size>
+template <typename T, size_t inline_target_size = default_inline_target_size,
+          typename Allocator = default_callable_allocator>
 using function = function_impl<internal::RoundUpToWord(inline_target_size),
-                               /*require_inline=*/false, T>;
+                               /*require_inline=*/false, T, Allocator>;
 
 // A move-only callable object wrapper that forces callables to be stored inline
 // and never performs heap allocation.
@@ -82,7 +92,7 @@
 // compile.
 template <typename T, size_t inline_target_size = default_inline_target_size>
 using inline_function = function_impl<internal::RoundUpToWord(inline_target_size),
-                                      /*require_inline=*/true, T>;
+                                      /*require_inline=*/true, T, default_callable_allocator>;
 
 // Synonym for a function which takes no arguments and produces no result.
 using closure = function<void()>;
@@ -153,11 +163,15 @@
 // fit within a callback without requiring heap allocation.
 // Defaults to |default_inline_target_size|.
 //
+// |Allocator| is the Allocator used for heap allocation, if required. Its `value_type` is
+// irrelevant, since it must support rebinding.
+//
 // Class members are documented in |fit::callback_impl|, below.
 //
-template <typename T, size_t inline_target_size = default_inline_target_size>
-using callback =
-    callback_impl<internal::RoundUpToWord(inline_target_size), /*require_inline=*/false, T>;
+template <typename T, size_t inline_target_size = default_inline_target_size,
+          typename Allocator = default_callable_allocator>
+using callback = callback_impl<internal::RoundUpToWord(inline_target_size),
+                               /*require_inline=*/false, T, Allocator>;
 
 // A move-only, run-once, callable object wrapper that forces callables to be
 // stored inline and never performs heap allocation.
@@ -167,19 +181,22 @@
 // compile.
 template <typename T, size_t inline_target_size = default_inline_target_size>
 using inline_callback = callback_impl<internal::RoundUpToWord(inline_target_size),
-                                      /*require_inline=*/true, T>;
+                                      /*require_inline=*/true, T, default_callable_allocator>;
 
-template <size_t inline_target_size, bool require_inline, typename Result, typename... Args>
-class function_impl<inline_target_size, require_inline, Result(Args...)> final
-    : private ::fit::internal::function_base<inline_target_size, require_inline, Result(Args...)> {
-  using base = ::fit::internal::function_base<inline_target_size, require_inline, Result(Args...)>;
+template <size_t inline_target_size, bool require_inline, typename Allocator, typename Result,
+          typename... Args>
+class function_impl<inline_target_size, require_inline, Result(Args...), Allocator> final
+    : private ::fit::internal::function_base<inline_target_size, require_inline, Result(Args...),
+                                             Allocator> {
+  using base = ::fit::internal::function_base<inline_target_size, require_inline, Result(Args...),
+                                              Allocator>;
 
   // function_base requires private access during share()
-  friend class ::fit::internal::function_base<inline_target_size, require_inline, Result(Args...)>;
+  friend base;
 
   // supports target() for shared functions
   friend const void* ::fit::internal::get_target_type_id<>(
-      const function_impl<inline_target_size, require_inline, Result(Args...)>&);
+      const function_impl<inline_target_size, require_inline, Result(Args...), Allocator>&);
 
   template <typename U>
   using not_self_type = ::fit::internal::not_same_type<function_impl, U>;
@@ -228,10 +245,9 @@
   // unexpected behavior of a |fit::function| that would otherwise fail after
   // one call. To explicitly allow this, simply wrap the |fit::callback| in a
   // pass-through lambda before passing it to the |fit::function|.
-  template <size_t other_inline_target_size, bool other_require_inline>
-  function_impl(
-      ::fit::callback_impl<other_inline_target_size, other_require_inline, Result(Args...)>) =
-      delete;
+  template <size_t other_inline_target_size, bool other_require_inline, typename OtherAllocator>
+  function_impl(::fit::callback_impl<other_inline_target_size, other_require_inline,
+                                     Result(Args...), OtherAllocator>) = delete;
 
   // Creates a function with a target moved from another function,
   // leaving the other function with an empty target.
@@ -273,10 +289,9 @@
   // fail after one call. To explicitly allow this, simply wrap the
   // |fit::callback| in a pass-through lambda before assigning it to the
   // |fit::function|.
-  template <size_t other_inline_target_size, bool other_require_inline>
-  function_impl& operator=(
-      ::fit::callback_impl<other_inline_target_size, other_require_inline, Result(Args...)>) =
-      delete;
+  template <size_t other_inline_target_size, bool other_require_inline, typename OtherAllocator>
+  function_impl& operator=(::fit::callback_impl<other_inline_target_size, other_require_inline,
+                                                Result(Args...), OtherAllocator>) = delete;
 
   // Move assignment
   function_impl& operator=(function_impl&& other) noexcept {
@@ -313,44 +328,49 @@
   }
 };
 
-template <size_t inline_target_size, bool require_inline, typename FunctionType>
-void swap(function_impl<inline_target_size, require_inline, FunctionType>& a,
-          function_impl<inline_target_size, require_inline, FunctionType>& b) {
+template <size_t inline_target_size, bool require_inline, typename FunctionType, typename Allocator>
+void swap(function_impl<inline_target_size, require_inline, FunctionType, Allocator>& a,
+          function_impl<inline_target_size, require_inline, FunctionType, Allocator>& b) {
   a.swap(b);
 }
 
-template <size_t inline_target_size, bool require_inline, typename FunctionType>
-bool operator==(const function_impl<inline_target_size, require_inline, FunctionType>& f,
+template <size_t inline_target_size, bool require_inline, typename FunctionType, typename Allocator>
+bool operator==(const function_impl<inline_target_size, require_inline, FunctionType, Allocator>& f,
                 decltype(nullptr)) {
   return !f;
 }
-template <size_t inline_target_size, bool require_inline, typename FunctionType>
-bool operator==(decltype(nullptr),
-                const function_impl<inline_target_size, require_inline, FunctionType>& f) {
+template <size_t inline_target_size, bool require_inline, typename FunctionType, typename Allocator>
+bool operator==(
+    decltype(nullptr),
+    const function_impl<inline_target_size, require_inline, FunctionType, Allocator>& f) {
   return !f;
 }
-template <size_t inline_target_size, bool require_inline, typename FunctionType>
-bool operator!=(const function_impl<inline_target_size, require_inline, FunctionType>& f,
+template <size_t inline_target_size, bool require_inline, typename FunctionType, typename Allocator>
+bool operator!=(const function_impl<inline_target_size, require_inline, FunctionType, Allocator>& f,
                 decltype(nullptr)) {
   return !!f;
 }
-template <size_t inline_target_size, bool require_inline, typename FunctionType>
-bool operator!=(decltype(nullptr),
-                const function_impl<inline_target_size, require_inline, FunctionType>& f) {
+template <size_t inline_target_size, bool require_inline, typename FunctionType, typename Allocator>
+bool operator!=(
+    decltype(nullptr),
+    const function_impl<inline_target_size, require_inline, FunctionType, Allocator>& f) {
   return !!f;
 }
 
-template <size_t inline_target_size, bool require_inline, typename Result, typename... Args>
-class callback_impl<inline_target_size, require_inline, Result(Args...)> final
-    : private ::fit::internal::function_base<inline_target_size, require_inline, Result(Args...)> {
-  using base = ::fit::internal::function_base<inline_target_size, require_inline, Result(Args...)>;
+template <size_t inline_target_size, bool require_inline, typename Allocator, typename Result,
+          typename... Args>
+class callback_impl<inline_target_size, require_inline, Result(Args...), Allocator> final
+    : private ::fit::internal::function_base<inline_target_size, require_inline, Result(Args...),
+                                             Allocator> {
+  using base = ::fit::internal::function_base<inline_target_size, require_inline, Result(Args...),
+                                              Allocator>;
 
   // function_base requires private access during share()
-  friend class ::fit::internal::function_base<inline_target_size, require_inline, Result(Args...)>;
+  friend base;
 
   // supports target() for shared functions
   friend const void* ::fit::internal::get_target_type_id<>(
-      const callback_impl<inline_target_size, require_inline, Result(Args...)>&);
+      const callback_impl<inline_target_size, require_inline, Result(Args...), Allocator>&);
 
   template <typename U>
   using not_self_type = ::fit::internal::not_same_type<callback_impl, U>;
@@ -469,30 +489,32 @@
   }
 };
 
-template <size_t inline_target_size, bool require_inline, typename FunctionType>
-void swap(callback_impl<inline_target_size, require_inline, FunctionType>& a,
-          callback_impl<inline_target_size, require_inline, FunctionType>& b) {
+template <size_t inline_target_size, bool require_inline, typename FunctionType, typename Allocator>
+void swap(callback_impl<inline_target_size, require_inline, FunctionType, Allocator>& a,
+          callback_impl<inline_target_size, require_inline, FunctionType, Allocator>& b) {
   a.swap(b);
 }
 
-template <size_t inline_target_size, bool require_inline, typename FunctionType>
-bool operator==(const callback_impl<inline_target_size, require_inline, FunctionType>& f,
+template <size_t inline_target_size, bool require_inline, typename FunctionType, typename Allocator>
+bool operator==(const callback_impl<inline_target_size, require_inline, FunctionType, Allocator>& f,
                 decltype(nullptr)) {
   return !f;
 }
-template <size_t inline_target_size, bool require_inline, typename FunctionType>
-bool operator==(decltype(nullptr),
-                const callback_impl<inline_target_size, require_inline, FunctionType>& f) {
+template <size_t inline_target_size, bool require_inline, typename FunctionType, typename Allocator>
+bool operator==(
+    decltype(nullptr),
+    const callback_impl<inline_target_size, require_inline, FunctionType, Allocator>& f) {
   return !f;
 }
-template <size_t inline_target_size, bool require_inline, typename FunctionType>
-bool operator!=(const callback_impl<inline_target_size, require_inline, FunctionType>& f,
+template <size_t inline_target_size, bool require_inline, typename FunctionType, typename Allocator>
+bool operator!=(const callback_impl<inline_target_size, require_inline, FunctionType, Allocator>& f,
                 decltype(nullptr)) {
   return !!f;
 }
-template <size_t inline_target_size, bool require_inline, typename FunctionType>
-bool operator!=(decltype(nullptr),
-                const callback_impl<inline_target_size, require_inline, FunctionType>& f) {
+template <size_t inline_target_size, bool require_inline, typename FunctionType, typename Allocator>
+bool operator!=(
+    decltype(nullptr),
+    const callback_impl<inline_target_size, require_inline, FunctionType, Allocator>& f) {
   return !!f;
 }
 
diff --git a/third_party/fuchsia/repo/sdk/lib/fit/include/lib/fit/internal/function.h b/third_party/fuchsia/repo/sdk/lib/fit/include/lib/fit/internal/function.h
index bf70931..4cf078d 100644
--- a/third_party/fuchsia/repo/sdk/lib/fit/include/lib/fit/internal/function.h
+++ b/third_party/fuchsia/repo/sdk/lib/fit/include/lib/fit/internal/function.h
@@ -68,7 +68,8 @@
 
 static_assert(sizeof(target_ops<void>) == sizeof(void (*)()) * 5, "Unexpected target_ops padding");
 
-template <typename Callable, bool is_inline, bool is_shared, typename Result, typename... Args>
+template <typename Callable, bool is_inline, bool is_shared, typename Allocator, typename Result,
+          typename... Args>
 struct target;
 
 inline void trivial_target_destroy(void* /*bits*/) {}
@@ -96,9 +97,10 @@
   static_assert(std::is_same<Unused, void>::value, "Only instantiate null_target with void");
 };
 
-template <typename Result, typename... Args>
-struct target<decltype(nullptr), /*is_inline=*/true, /*is_shared=*/false, Result, Args...> final
-    : public null_target<> {};
+template <typename Allocator, typename Result, typename... Args>
+struct target<decltype(nullptr), /*is_inline=*/true, /*is_shared=*/false, Allocator, Result,
+              Args...>
+    final : public null_target<> {};
 
 inline void* null_target_get(void* /*bits*/) { return nullptr; }
 inline void null_target_move(void* /*from_bits*/, void* /*to_bits*/) {}
@@ -117,10 +119,8 @@
   std::memcpy(to_bits, from_bits, size_bytes);
 }
 
-template <typename Callable, typename Result, typename... Args>
-struct target<Callable,
-              /*is_inline=*/true, /*is_shared=*/false, Result, Args...>
-    final {
+template <typename Callable, typename Allocator, typename Result, typename... Args>
+struct target<Callable, /*is_inline=*/true, /*is_shared=*/false, Allocator, Result, Args...> final {
   template <typename Callable_>
   static void initialize(void* bits, Callable_&& target) {
     new (bits) Callable(std::forward<Callable_>(target));
@@ -160,23 +160,25 @@
 
 inline void* inline_target_get(void* bits) { return bits; }
 
-template <typename Callable, typename Result, typename... Args>
-constexpr target_ops<Result, Args...> target<Callable,
-                                             /*is_inline=*/true,
-                                             /*is_shared=*/false, Result, Args...>::ops = {
-    &unshared_target_type_id, &inline_target_get, target::get_move_function(),
-    target::get_destroy_function(), &target::invoke};
+template <typename Callable, typename Allocator, typename Result, typename... Args>
+constexpr target_ops<Result, Args...>
+    target<Callable, /*is_inline=*/true, /*is_shared=*/false, Allocator, Result, Args...>::ops = {
+        &unshared_target_type_id, &inline_target_get, target::get_move_function(),
+        target::get_destroy_function(), &target::invoke};
 
 // vtable for pointer to target function
 
-template <typename Callable, typename Result, typename... Args>
-struct target<Callable,
-              /*is_inline=*/false, /*is_shared=*/false, Result, Args...>
+template <typename Callable, typename Allocator, typename Result, typename... Args>
+struct target<Callable, /*is_inline=*/false, /*is_shared=*/false, Allocator, Result, Args...>
     final {
   template <typename Callable_>
   static void initialize(void* bits, Callable_&& target) {
     auto ptr = static_cast<Callable**>(bits);
-    *ptr = new Callable(std::forward<Callable_>(target));
+    CallableAllocator allocator;
+    *ptr = CallableAllocatorTraits::allocate(allocator, 1u);
+    if (*ptr) {
+      CallableAllocatorTraits::construct(allocator, *ptr, std::forward<Callable_>(target));
+    }
   }
   static Result invoke(void* bits, Args... args) {
     auto& target = **static_cast<Callable**>(bits);
@@ -189,19 +191,33 @@
   }
   static void destroy(void* bits) {
     auto ptr = static_cast<Callable**>(bits);
-    delete *ptr;
+    if (*ptr) {
+      CallableAllocator allocator;
+      CallableAllocatorTraits::destroy(allocator, *ptr);
+      CallableAllocatorTraits::deallocate(allocator, *ptr, 1u);
+      *ptr = nullptr;
+    }
   }
 
   static const target_ops<Result, Args...> ops;
+
+ private:
+  using AllocatorTraits = std::allocator_traits<Allocator>;
+  using CallableAllocator = typename AllocatorTraits::template rebind_alloc<Callable>;
+  using CallableAllocatorTraits = std::allocator_traits<CallableAllocator>;
+
+  static_assert(CallableAllocatorTraits::is_always_equal::value,
+                "Objects of type Allocator must always be equal to each other: an Allocator object "
+                "must be able to deallocate the memory allocated by a different Allocator object.");
 };
 
 inline void* heap_target_get(void* bits) { return *static_cast<void**>(bits); }
 
-template <typename Callable, typename Result, typename... Args>
-constexpr target_ops<Result, Args...> target<Callable,
-                                             /*is_inline=*/false,
-                                             /*is_shared=*/false, Result, Args...>::ops = {
-    &unshared_target_type_id, &heap_target_get, &target::move, &target::destroy, &target::invoke};
+template <typename Callable, typename Allocator, typename Result, typename... Args>
+constexpr target_ops<Result, Args...>
+    target<Callable, /*is_inline=*/false, /*is_shared=*/false, Allocator, Result, Args...>::ops = {
+        &unshared_target_type_id, &heap_target_get, &target::move, &target::destroy,
+        &target::invoke};
 
 // vtable for fit::function std::shared_ptr to target function
 
@@ -212,13 +228,12 @@
 
 // For this vtable,
 // Callable by definition will be either a fit::function or fit::callback
-template <typename SharedFunction, typename Result, typename... Args>
-struct target<SharedFunction,
-              /*is_inline=*/false, /*is_shared=*/true, Result, Args...>
+template <typename SharedFunction, typename Allocator, typename Result, typename... Args>
+struct target<SharedFunction, /*is_inline=*/false, /*is_shared=*/true, Allocator, Result, Args...>
     final {
   static void initialize(void* bits, SharedFunction target) {
     new (bits) std::shared_ptr<SharedFunction>(
-        std::move(std::make_shared<SharedFunction>(std::move(target))));
+        std::move(std::allocate_shared<SharedFunction, Allocator>(Allocator(), std::move(target))));
   }
   static void copy_shared_ptr(void* from_bits, void* to_bits) {
     auto& from_shared_ptr = *static_cast<std::shared_ptr<SharedFunction>*>(from_bits);
@@ -246,10 +261,9 @@
   static const target_ops<Result, Args...> ops;
 };
 
-template <typename SharedFunction, typename Result, typename... Args>
-constexpr target_ops<Result, Args...> target<SharedFunction,
-                                             /*is_inline=*/false,
-                                             /*is_shared=*/true, Result, Args...>::ops = {
+template <typename SharedFunction, typename Allocator, typename Result, typename... Args>
+constexpr target_ops<Result, Args...> target<
+    SharedFunction, /*is_inline=*/false, /*is_shared=*/true, Allocator, Result, Args...>::ops = {
     &target::target_type_id, &target::get, &target::move, &target::destroy, &target::invoke};
 
 // Calculates the alignment to use for a function of the provided
@@ -388,13 +402,14 @@
   const base_target_ops* ops_;
 };
 
-template <size_t inline_target_size, bool require_inline, typename FunctionType>
+template <size_t inline_target_size, bool require_inline, typename FunctionType, typename Allocator>
 class function_base;
 
 // Function implementation details that require the function signature.
 // See |fit::function| and |fit::callback| documentation for more information.
-template <size_t inline_target_size, bool require_inline, typename Result, typename... Args>
-class function_base<inline_target_size, require_inline, Result(Args...)>
+template <size_t inline_target_size, bool require_inline, typename Allocator, typename Result,
+          typename... Args>
+class function_base<inline_target_size, require_inline, Result(Args...), Allocator>
     : public generic_function_base<inline_target_size> {
   using base = generic_function_base<inline_target_size>;
 
@@ -407,11 +422,10 @@
 
   template <typename Callable>
   using target_type = target<Callable, (sizeof(Callable) <= inline_target_size),
-                             /*is_shared=*/false, Result, Args...>;
+                             /*is_shared=*/false, Allocator, Result, Args...>;
   template <typename SharedFunction>
-  using shared_target_type = target<SharedFunction,
-                                    /*is_inline=*/false,
-                                    /*is_shared=*/true, Result, Args...>;
+  using shared_target_type =
+      target<SharedFunction, /*is_inline=*/false, /*is_shared=*/true, Allocator, Result, Args...>;
 
   using ops_type = const target_ops<Result, Args...>*;
 
diff --git a/third_party/fuchsia/repo/sdk/lib/stdcompat/BUILD.gn b/third_party/fuchsia/repo/sdk/lib/stdcompat/BUILD.gn
new file mode 100644
index 0000000..a17135a
--- /dev/null
+++ b/third_party/fuchsia/repo/sdk/lib/stdcompat/BUILD.gn
@@ -0,0 +1,46 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# This build file is defined by Pigweed, not Fuchsia. It only refers to sources
+# imported into the Pigweed repo.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+config("public_include_path") {
+  include_dirs = [ "include" ]
+  visibility = [ ":*" ]
+}
+
+pw_source_set("stdcompat") {
+  public_configs = [ ":public_include_path" ]
+  public = [
+    "include/lib/stdcompat/bit.h",
+    "include/lib/stdcompat/functional.h",
+    "include/lib/stdcompat/memory.h",
+    "include/lib/stdcompat/optional.h",
+    "include/lib/stdcompat/type_traits.h",
+    "include/lib/stdcompat/utility.h",
+    "include/lib/stdcompat/version.h",
+  ]
+  sources = [
+    "include/lib/stdcompat/internal/bit.h",
+    "include/lib/stdcompat/internal/constructors.h",
+    "include/lib/stdcompat/internal/exception.h",
+    "include/lib/stdcompat/internal/storage.h",
+    "include/lib/stdcompat/internal/type_traits.h",
+    "include/lib/stdcompat/internal/utility.h",
+  ]
+}
diff --git a/third_party/fuzztest/configs/BUILD.gn b/third_party/fuzztest/configs/BUILD.gn
index da09a52..cea99b1 100644
--- a/third_party/fuzztest/configs/BUILD.gn
+++ b/third_party/fuzztest/configs/BUILD.gn
@@ -12,6 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_third_party/fuzztest/fuzztest.gni")
+
 # Targets that include FuzzTest headers need to include this config.
 config("disabled_warnings") {
   cflags = [
@@ -21,3 +25,11 @@
   ]
   cflags_cc = [ "-Wno-extra-semi" ]
 }
+
+# Include path for FuzzTest.
+#
+# This is needed as FuzzTest is built in a dedicated toolchain, and
+# `public_configs` do not propagate across toolchain boundaries by default.
+config("public_include_path") {
+  include_dirs = [ "$dir_pw_third_party_fuzztest" ]
+}
diff --git a/third_party/fuzztest/docs.rst b/third_party/fuzztest/docs.rst
index 854175e..4518421 100644
--- a/third_party/fuzztest/docs.rst
+++ b/third_party/fuzztest/docs.rst
@@ -93,8 +93,8 @@
 
 .. code-block:: sh
 
-  python pw_build/py/pw_build/generate_3p_gn.py \
-    -w third_party/fuzztest/src
+   python pw_build/py/pw_build/generate_3p_gn.py \
+     -w third_party/fuzztest/src
 
 .. DO NOT EDIT BELOW THIS LINE. Generated section.
 
diff --git a/third_party/googletest/docs.rst b/third_party/googletest/docs.rst
index 2570b99..d5732ce 100644
--- a/third_party/googletest/docs.rst
+++ b/third_party/googletest/docs.rst
@@ -51,7 +51,10 @@
         ``pw_third_party.googletest``.
 
    .. tab-item:: Bazel
-      Set the following `label flags`_, either in your `target config`_ or on
+
+      Set the following :ref:`label flags <docs-build_system-bazel_flags>`,
+      either in your
+      :ref:`target config <docs-build_system-bazel_configuration>` or on
       the command line:
 
       * ``pw_unit_test_googletest_backend`` to
@@ -70,6 +73,3 @@
 
   Not all unit tests build properly with upstream GoogleTest yet. This is a
   work in progress.
-
-.. _target config: :ref:`_docs-build_system-bazel_configuration`
-.. _label flags: :ref:`_docs-build_system-bazel_flags`
diff --git a/third_party/nanopb/docs.rst b/third_party/nanopb/docs.rst
index 1af7211..360d50f 100644
--- a/third_party/nanopb/docs.rst
+++ b/third_party/nanopb/docs.rst
@@ -22,7 +22,7 @@
 
 .. code-block::
 
-  pw_third_party_nanopb_CONFIG = "$dir_pw_third_party/nanopb:disable_error_messages"
+   pw_third_party_nanopb_CONFIG = "$dir_pw_third_party/nanopb:disable_error_messages"
 
 
 This will add ``-DPB_NO_ERRMSG=1`` to the build, which disables error messages
diff --git a/third_party/pico_sdk/src/rp2_common/pico_multicore/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_multicore/BUILD.gn
index 1e7833b..e1d4c4d 100644
--- a/third_party/pico_sdk/src/rp2_common/pico_multicore/BUILD.gn
+++ b/third_party/pico_sdk/src/rp2_common/pico_multicore/BUILD.gn
@@ -43,5 +43,5 @@
     "${PICO_ROOT}/src/rp2_common/pico_runtime",
   ]
   public = [ "${_CWD}/include/pico/multicore.h" ]
-  sources = [ "${_CWD}/pico_multicore.c" ]
+  sources = [ "${_CWD}/multicore.c" ]
 }
diff --git a/third_party/re2/configs/BUILD.gn b/third_party/re2/configs/BUILD.gn
index d4e4203..8874e7f 100644
--- a/third_party/re2/configs/BUILD.gn
+++ b/third_party/re2/configs/BUILD.gn
@@ -12,6 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_third_party/re2/re2.gni")
+
 # Targets that include RE2 headers need to include this config.
 config("disabled_warnings") {
   cflags = [
@@ -26,9 +30,20 @@
   cflags = [
     "-Wno-c99-extensions",
     "-Wno-cast-qual",
+    "-Wno-dtor-name",
+    "-Wno-gnu-anonymous-struct",
+    "-Wno-nested-anon-types",
     "-Wno-shadow",
     "-Wno-switch-enum",
     "-Wno-unused-parameter",
   ]
-  configs = [ ":disabled_warnings" ]
+}
+
+# Include path for RE2.
+#
+# This is needed as the library is used to build FuzzTest in a dedicated
+# toolchain, and `public_configs` do not propagate across toolchain boundaries
+# by default.
+config("public_include_path") {
+  include_dirs = [ "$dir_pw_third_party_re2" ]
 }
diff --git a/ts/device/index.ts b/ts/device/index.ts
index 1b55751..825ffe7 100644
--- a/ts/device/index.ts
+++ b/ts/device/index.ts
@@ -25,11 +25,11 @@
 import { WebSerialTransport } from '../transport/web_serial_transport';
 import { ProtoCollection } from 'pigweedjs/pw_protobuf_compiler';
 
-function protoFieldToMethodName(string) {
-  return string.split('_').map(titleCase).join('');
+function protoFieldToMethodName(fieldName: string) {
+  return fieldName.split('_').map(titleCase).join('');
 }
-function titleCase(string) {
-  return string.charAt(0).toUpperCase() + string.slice(1);
+function titleCase(title: string) {
+  return title.charAt(0).toUpperCase() + title.slice(1);
 }
 
 export class Device {
@@ -76,26 +76,26 @@
     });
   }
 
-  getMethodArguments(fullPath) {
+  getMethodArguments(fullPath: string) {
     return this.nameToMethodArgumentsMap[fullPath];
   }
 
   private setupRpcs() {
     const rpcMap = {};
-    const channel = this.client.channel();
+    const channel = this.client.channel()!;
     const servicesKeys = Array.from(channel.services.keys());
     servicesKeys.forEach((serviceKey) => {
       setPathOnObject(
         rpcMap,
         serviceKey,
-        this.mapServiceMethods(channel.services.get(serviceKey)),
+        this.mapServiceMethods(channel.services.get(serviceKey)!),
       );
     });
     this.rpcs = rpcMap;
   }
 
   private mapServiceMethods(service: ServiceClient) {
-    const methodMap = {};
+    const methodMap: { [index: string]: any } = {};
     const methodKeys = Array.from(service.methodsByName.keys());
     methodKeys
       .filter(
@@ -106,7 +106,7 @@
       )
       .forEach((key) => {
         const fn = this.createMethodWrapper(
-          service.methodsByName.get(key),
+          service.methodsByName.get(key)!,
           key,
           `${service.name}.${key}`,
         );
@@ -133,6 +133,7 @@
         fullMethodPath,
       );
     }
+    throw new Error(`Unknown method: ${realMethod}`);
   }
 
   private createUnaryMethodWrapper(
@@ -144,8 +145,8 @@
       .getInputType()
       .replace(/^\./, '');
     const requestProtoDescriptor =
-      this.protoCollection.getDescriptorProto(requestType);
-    const requestFields = requestProtoDescriptor.getFieldList();
+      this.protoCollection.getDescriptorProto(requestType)!;
+    const requestFields = requestProtoDescriptor.getFieldList()!;
     const functionArguments = requestFields
       .map((field) => field.getName())
       .concat('return this(arguments);');
@@ -157,7 +158,7 @@
 
     // We create a new JS function dynamically here that takes
     // proto message fields as arguments and calls the actual RPC method.
-    const fn = new Function(...functionArguments).bind((args) => {
+    const fn = new Function(...functionArguments).bind((args: any[]) => {
       const request = new realMethod.method.requestType();
       requestFields.forEach((field, index) => {
         request[`set${titleCase(field.getName())}`](args[index]);
@@ -176,7 +177,7 @@
       .getInputType()
       .replace(/^\./, '');
     const requestProtoDescriptor =
-      this.protoCollection.getDescriptorProto(requestType);
+      this.protoCollection.getDescriptorProto(requestType)!;
     const requestFields = requestProtoDescriptor.getFieldList();
     const functionArguments = requestFields
       .map((field) => field.getName())
@@ -189,17 +190,19 @@
 
     // We create a new JS function dynamically here that takes
     // proto message fields as arguments and calls the actual RPC method.
-    const fn = new Function(...functionArguments).bind((args) => {
+    const fn = new Function(...functionArguments).bind((args: any[]) => {
       const request = new realMethod.method.requestType();
       requestFields.forEach((field, index) => {
         request[`set${protoFieldToMethodName(field.getName())}`](args[index]);
       });
       const callbacks = Array.from(args).slice(requestFields.length);
-      // @ts-ignore
       return realMethod.invoke(
         request,
+        // @ts-ignore
         callbacks[0],
+        // @ts-ignore
         callbacks[1],
+        // @ts-ignore
         callbacks[2],
       );
     });
diff --git a/ts/device/object_set.ts b/ts/device/object_set.ts
index f06bd73..7bc5f1d 100644
--- a/ts/device/object_set.ts
+++ b/ts/device/object_set.ts
@@ -12,26 +12,30 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-function hasOwnProperty(obj: object, prop: number | string) {
+type ObjectType = {
+  [index: number | string]: any;
+};
+
+function hasOwnProperty(obj: ObjectType, prop: number | string) {
   if (obj == null) {
     return false;
   }
   //to handle objects with null prototypes (too edge case?)
   return Object.prototype.hasOwnProperty.call(obj, prop);
 }
-function hasShallowProperty(obj: object, prop: number | string) {
+function hasShallowProperty(obj: ObjectType, prop: number | string) {
   return (
     (typeof prop === 'number' && Array.isArray(obj)) ||
     hasOwnProperty(obj, prop)
   );
 }
 
-function getShallowProperty(obj: object, prop: number | string) {
+function getShallowProperty(obj: ObjectType, prop: number | string) {
   if (hasShallowProperty(obj, prop)) {
     return obj[prop];
   }
 }
-function getKey(key) {
+function getKey(key: string) {
   const intKey = parseInt(key);
   if (intKey.toString() === key) {
     return intKey;
@@ -40,7 +44,7 @@
 }
 
 export function setPathOnObject(
-  obj: object,
+  obj: ObjectType,
   path: number | string | Array<number | string>,
   value: any,
   doNotReplace: boolean = false,
diff --git a/ts/index_test.ts b/ts/index_test.ts
index 2299e69..bca65db 100644
--- a/ts/index_test.ts
+++ b/ts/index_test.ts
@@ -26,6 +26,11 @@
 } from '../dist/index.umd';
 
 import { ProtoCollection } from '../dist/protos/collection.umd';
+import {
+  createLogViewer,
+  MockLogSource,
+  PigweedRPCLogSource,
+} from '../dist/logging.umd';
 import * as fs from 'fs';
 
 describe('Pigweed Bundle', () => {
@@ -66,6 +71,19 @@
     expect(WebSerial.WebSerialTransport).toBeDefined();
   });
 
+  it('has log viewer exports defined', () => {
+    expect(createLogViewer).toBeDefined();
+    expect(typeof createLogViewer).toBe('function');
+
+    expect(MockLogSource).toBeDefined();
+    expect(typeof MockLogSource).toBe('function');
+    expect(MockLogSource.name).toBe('MockLogSource');
+
+    expect(PigweedRPCLogSource).toBeDefined();
+    expect(typeof PigweedRPCLogSource).toBe('function');
+    expect(PigweedRPCLogSource.name).toBe('PigweedRPCLogSource');
+  });
+
   it('is not referring to any outside Pigweed modules', () => {
     const requireString = "require('pigweedjs";
     const file = fs.readFileSync(require.resolve('../dist/index.umd'));