Merge remote-tracking branch 'goog/upstream-master' into repo

Test: N/A - merging from upstream-main branch

Change-Id: I0910d4d996f5da4e847b0c1c43ce7ab716ad9dd7
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..9197fde
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,3 @@
+* text=auto
+# Force batch scripts to use CRLF.
+*.bat text eol=crlf
diff --git a/.gitignore b/.gitignore
index e66aa11..33e6059 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,6 +12,8 @@
 .clangd/
 *.swp
 *.swo
+*flycheck_*
+*_flymake.*
 
 # Python
 *.pyc
@@ -21,6 +23,8 @@
 .cache/
 .mypy_cache/
 __pycache__/
+build/
+dist/
 
 # PyOxidizer
 pw_env_setup/py/oxidizer/build
diff --git a/.pylintrc b/.pylintrc
index 2feb460..09055b8 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -3,13 +3,14 @@
 # A comma-separated list of package or module names from where C extensions may
 # be loaded. Extensions are loading into the active Python interpreter and may
 # run arbitrary code.
+# TODO(pwbug/280) Change "whitelist" to "allowlist". (Blocked on pylint.)
 extension-pkg-whitelist=mypy
 
-# Add files or directories to the blacklist. They should be base names, not
+# Add files or directories to the blocklist. They should be base names, not
 # paths.
 ignore=CVS
 
-# Add files or directories matching the regex patterns to the blacklist. The
+# Add files or directories matching the regex patterns to the blocklist. The
 # regex matches against base names, not paths.
 ignore-patterns=
 
@@ -60,14 +61,16 @@
 # --enable=similarities". If you want to run only the classes checker, but have
 # no Warning level messages displayed, use "--disable=all --enable=classes
 # --disable=W".
-disable=fixme,
-        subprocess-run-check
+disable=bad-continuation,  # Rely on yapf for formatting
+        fixme,
+        subprocess-run-check,
+        raise-missing-from,
 
 # Enable the message, report, category or checker with the given id(s). You can
 # either give multiple identifier separated by comma (,) or put this option
 # multiple time (only on the command line, not in the configuration file where
 # it should appear only once). See also the "--disable" option for examples.
-enable=c-extension-no-member
+enable=c-extension-no-member,
 
 
 [REPORTS]
@@ -86,7 +89,7 @@
 # Set the output format. Available formats are text, parseable, colorized, json
 # and msvs (visual studio). You can also give a reporter class, e.g.
 # mypackage.mymodule.MyReporterClass.
-output-format=text
+output-format=colorized
 
 # Tells whether to display a full report or only the messages.
 reports=no
@@ -384,7 +387,8 @@
 ignored-classes=optparse.Values,
                 thread._local,
                 _thread._local,
-                pw_cli.envparse.EnvNamespace
+                pw_cli.envparse.EnvNamespace,
+                pw_rpc.packet_pb2.RpcPacket
 
 # List of module names for which member attributes should not be checked
 # (useful for modules/projects where namespaces are manipulated during runtime
diff --git a/BUILD.gn b/BUILD.gn
index 73a08f1..400aac6 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -12,10 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
+import("$dir_pw_arduino_build/arduino.gni")
 import("$dir_pw_build/host_tool.gni")
+import("$dir_pw_build/python.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_toolchain/generate_toolchain.gni")
 import("$dir_pw_unit_test/test.gni")
@@ -23,13 +24,13 @@
 # Main build file for upstream Pigweed.
 
 declare_args() {
-  # The optimization level to use when building upstream Pigweed targets.
+  # The default optimization level for building upstream Pigweed targets.
   #
   # Choices:
   #   debug
   #   size_optimized
   #   speed_optimized
-  pw_optimization_level = "debug"
+  pw_default_optimization_level = "debug"
 
   # List of application image GN targets specific to the Pigweed target.
   pw_TARGET_APPLICATIONS = []
@@ -42,46 +43,118 @@
   deps = [
     ":docs",
     ":host",
+    ":python.lint",
+    ":python.tests",
+    ":stm32f429i",
+    ":target_support_packages.lint",
+    ":target_support_packages.tests",
   ]
 }
 
+# This template generates a group that builds pigweed_default with a particular
+# toolchain.
+template("_build_pigweed_default_at_all_optimization_levels") {
+  _toolchain_prefix = invoker.toolchain_prefix
+
+  group(target_name) {
+    deps = [
+      ":pigweed_default(${_toolchain_prefix}$pw_default_optimization_level)",
+    ]
+  }
+
+  foreach(optimization,
+          [
+            "debug",
+            "size_optimized",
+            "speed_optimized",
+          ]) {
+    group(target_name + "_$optimization") {
+      deps = [ ":pigweed_default($_toolchain_prefix$optimization)" ]
+    }
+  }
+}
+
+# Select a default toolchain based on host OS.
+if (host_os == "linux") {
+  _default_toolchain_prefix = "$dir_pigweed/targets/host:host_clang_"
+} else if (host_os == "mac") {
+  _default_toolchain_prefix = "$dir_pigweed/targets/host:host_clang_"
+} else if (host_os == "win") {
+  _default_toolchain_prefix = "$dir_pigweed/targets/host:host_gcc_"
+} else {
+  assert(false, "Please define a host config for your system: $host_os")
+}
+
 # Below are a list of GN targets you can build to force Pigweed to build for a
 # specific Pigweed target.
-group("host") {
-  # Auto select a toolchain based on host OS.
-  if (host_os == "linux") {
-    _default_host_toolchain = ":host_clang"
-  } else if (host_os == "mac") {
-    _default_host_toolchain = ":host_clang"
-  } else if (host_os == "win") {
-    _default_host_toolchain = ":host_gcc"
-  } else {
-    assert(false, "Please define a host config for your system: $host_os")
+_build_pigweed_default_at_all_optimization_levels("host") {
+  toolchain_prefix = _default_toolchain_prefix
+}
+
+_build_pigweed_default_at_all_optimization_levels("host_clang") {
+  toolchain_prefix = "$dir_pigweed/targets/host:host_clang_"
+}
+
+_build_pigweed_default_at_all_optimization_levels("host_gcc") {
+  toolchain_prefix = "$dir_pigweed/targets/host:host_gcc_"
+}
+
+_build_pigweed_default_at_all_optimization_levels("stm32f429i") {
+  toolchain_prefix = "$dir_pigweed/targets/stm32f429i-disc1:stm32f429i_disc1_"
+}
+
+if (dir_pw_third_party_arduino != "") {
+  _build_pigweed_default_at_all_optimization_levels("arduino") {
+    toolchain_prefix = "$dir_pigweed/targets/arduino:arduino_"
   }
-
-  deps = [ _default_host_toolchain ]
 }
 
-group("host_clang") {
-  deps = [ ":pigweed_default($dir_pigweed/targets/host:host_clang_$pw_optimization_level)" ]
-}
-
-group("host_gcc") {
-  deps = [ ":pigweed_default($dir_pigweed/targets/host:host_gcc_$pw_optimization_level)" ]
-}
-
-group("stm32f429i") {
-  deps = [ ":pigweed_default($dir_pigweed/targets/stm32f429i-disc1:stm32f429i_disc1_$pw_optimization_level)" ]
-}
-
-group("qemu") {
-  deps = [ ":pigweed_default($dir_pigweed/targets/lm3s6965evb-qemu:lm3s6965evb_qemu_$pw_optimization_level)" ]
+_build_pigweed_default_at_all_optimization_levels("qemu") {
+  toolchain_prefix = "$dir_pigweed/targets/lm3s6965evb-qemu:lm3s6965evb_qemu_"
 }
 
 group("docs") {
   deps = [ ":pigweed_default($dir_pigweed/targets/docs)" ]
 }
 
+pw_python_group("python") {
+  python_deps = [
+    # Python packages
+    "$dir_pw_allocator/py",
+    "$dir_pw_arduino_build/py",
+    "$dir_pw_bloat/py",
+    "$dir_pw_build/py",
+    "$dir_pw_cli/py",
+    "$dir_pw_docgen/py",
+    "$dir_pw_doctor/py",
+    "$dir_pw_env_setup/py",
+    "$dir_pw_hdlc_lite/py",
+    "$dir_pw_module/py",
+    "$dir_pw_package/py",
+    "$dir_pw_presubmit/py",
+    "$dir_pw_protobuf/py",
+    "$dir_pw_protobuf_compiler/py",
+    "$dir_pw_rpc/py",
+    "$dir_pw_status/py",
+    "$dir_pw_tokenizer/py",
+    "$dir_pw_trace/py",
+    "$dir_pw_trace_tokenized/py",
+    "$dir_pw_unit_test/py",
+    "$dir_pw_watch/py",
+
+    # Standalone scripts
+    "$dir_pw_hdlc_lite/rpc_example:example_script",
+  ]
+}
+
+# Python packages for supporting specific targets.
+pw_python_group("target_support_packages") {
+  python_deps = [
+    "$dir_pigweed/targets/lm3s6965evb-qemu/py",
+    "$dir_pigweed/targets/stm32f429i-disc1/py",
+  ]
+}
+
 # By default, Pigweed will build this target when invoking ninja.
 group("pigweed_default") {
   deps = []
@@ -123,7 +196,7 @@
 if (current_toolchain != default_toolchain) {
   group("apps") {
     # Application images built for all targets.
-    deps = []
+    deps = [ "$dir_pw_hdlc_lite/rpc_example" ]
 
     # Add target-specific images.
     deps += pw_TARGET_APPLICATIONS
@@ -147,6 +220,7 @@
       "$dir_pw_checksum",
       "$dir_pw_cpu_exception",
       "$dir_pw_hdlc_lite",
+      "$dir_pw_metric",
       "$dir_pw_polyfill",
       "$dir_pw_preprocessor",
       "$dir_pw_protobuf",
@@ -183,12 +257,16 @@
       "$dir_pw_bytes:tests",
       "$dir_pw_checksum:tests",
       "$dir_pw_containers:tests",
+      "$dir_pw_cpu_exception_armv7m:tests",
       "$dir_pw_fuzzer:tests",
       "$dir_pw_hdlc_lite:tests",
       "$dir_pw_hex_dump:tests",
       "$dir_pw_log:tests",
+      "$dir_pw_log_null:tests",
+      "$dir_pw_log_rpc:tests",
       "$dir_pw_log_tokenized:tests",
       "$dir_pw_malloc_freelist:tests",
+      "$dir_pw_metric:tests",
       "$dir_pw_polyfill:tests",
       "$dir_pw_preprocessor:tests",
       "$dir_pw_protobuf:tests",
@@ -208,13 +286,6 @@
       "$dir_pw_varint:tests",
     ]
 
-    import("$dir_pw_cpu_exception/backend.gni")
-
-    # TODO(pwbug/17): Re-think when Pigweed config system is added.
-    if (pw_cpu_exception_ENTRY_BACKEND == dir_pw_cpu_exception_armv7m) {
-      group_deps += [ "$dir_pw_cpu_exception_armv7m:tests" ]
-    }
-
     if (defined(pw_toolchain_SCOPE.is_host_toolchain) &&
         pw_toolchain_SCOPE.is_host_toolchain) {
       # TODO(pwbug/196): KVS tests are not compatible with device builds as they
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 18e45c4..1199180 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -14,28 +14,34 @@
 
 project(Pigweed)
 
-cmake_minimum_required(VERSION 3.14)
-
-include(pw_build/pigweed.cmake)
+cmake_minimum_required(VERSION 3.16)
 
 add_subdirectory(pw_assert)
 add_subdirectory(pw_assert_basic)
+add_subdirectory(pw_assert_log)
 add_subdirectory(pw_base64)
 add_subdirectory(pw_blob_store)
+add_subdirectory(pw_build)
 add_subdirectory(pw_bytes)
 add_subdirectory(pw_checksum)
 add_subdirectory(pw_containers)
 add_subdirectory(pw_cpu_exception)
 add_subdirectory(pw_cpu_exception_armv7m)
+add_subdirectory(pw_hdlc_lite)
 add_subdirectory(pw_kvs)
 add_subdirectory(pw_log)
 add_subdirectory(pw_log_basic)
 add_subdirectory(pw_log_tokenized)
 add_subdirectory(pw_minimal_cpp_stdlib)
 add_subdirectory(pw_polyfill)
+add_subdirectory(pw_protobuf)
 add_subdirectory(pw_preprocessor)
+add_subdirectory(pw_random)
+add_subdirectory(pw_result)
+add_subdirectory(pw_rpc)
 add_subdirectory(pw_span)
 add_subdirectory(pw_status)
+add_subdirectory(pw_stream)
 add_subdirectory(pw_string)
 add_subdirectory(pw_sys_io)
 add_subdirectory(pw_sys_io_stdio)
@@ -43,3 +49,5 @@
 add_subdirectory(pw_trace)
 add_subdirectory(pw_unit_test)
 add_subdirectory(pw_varint)
+
+add_subdirectory(third_party/nanopb)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index f27450e..e24d963 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -136,7 +136,7 @@
 $ pw presubmit --program quick
 ```
 
-![pigweed presubmit demonstration](docs/images/pw_presubmit_demo.gif)
+![pigweed presubmit demonstration](pw_presubmit/docs/pw_presubmit_demo.gif)
 
 Running `pw presubmit` manually will default to running the `full` presubmit
 program.
diff --git a/OWNERS b/OWNERS
index 1502d13..f584bdc 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,5 +1,6 @@
 amontanez@google.com
 davidrogers@google.com
+ewout@google.com
 frolv@google.com
 hepler@google.com
 jethier@google.com
diff --git a/PW_PLUGINS b/PW_PLUGINS
index 629c63f..f588ded 100644
--- a/PW_PLUGINS
+++ b/PW_PLUGINS
@@ -12,3 +12,6 @@
 
 # Pigweed's presubmit check script
 presubmit pw_presubmit.pigweed_presubmit main
+heap-viewer pw_allocator.heap_viewer main
+rpc pw_hdlc_lite.rpc_console main
+package pw_package.pigweed_packages main
diff --git a/README.md b/README.md
index ddda493..0938a5b 100644
--- a/README.md
+++ b/README.md
@@ -70,7 +70,7 @@
 configurations defined by your project. `pw format` leverages existing tools
 like `clang-format`, and it’s simple to add support for new languages.
 
-![pw presubmit demo](docs/images/pw_presubmit_demo.gif)
+![pw presubmit demo](pw_presubmit/docs/pw_presubmit_demo.gif)
 
 ## `pw_env_setup` - Cross platform embedded compiler setup
 
diff --git a/WORKSPACE b/WORKSPACE
index 68c001c..9f53c87 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -21,10 +21,9 @@
 load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
 http_archive(
     name = "build_bazel_rules_nodejs",
-    sha256 = "f9e7b9f42ae202cc2d2ce6d698ccb49a9f7f7ea572a78fd451696d03ef2ee116",
-    urls = ["https://github.com/bazelbuild/rules_nodejs/releases/download/1.6.0/rules_nodejs-1.6.0.tar.gz"],
+    sha256 = "4952ef879704ab4ad6729a29007e7094aef213ea79e9f2e94cbe1c9a753e63ef",
+    urls = ["https://github.com/bazelbuild/rules_nodejs/releases/download/2.2.0/rules_nodejs-2.2.0.tar.gz"],
 )
-
 # Get the latest LTS version of Node
 load("@build_bazel_rules_nodejs//:index.bzl", "node_repositories")
 node_repositories(package_json = ["//:package.json"])
@@ -37,17 +36,8 @@
     yarn_lock = "//:yarn.lock",
 )
 
-# Install Bazel rules from npm packages
-load("@npm//:install_bazel_dependencies.bzl", "install_bazel_dependencies")
-install_bazel_dependencies()
-
-
-# Set up TypeScript
-load("@npm_bazel_typescript//:index.bzl", "ts_setup_workspace")
-ts_setup_workspace()
-
 # Set up Karma
-load("@npm_bazel_karma//:package.bzl", "npm_bazel_karma_dependencies")
+load("@npm//@bazel/karma:package.bzl", "npm_bazel_karma_dependencies")
 npm_bazel_karma_dependencies()
 
 load("@io_bazel_rules_webtesting//web:repositories.bzl", "web_test_repositories")
diff --git a/bootstrap.bat b/bootstrap.bat
index 9532775..29146b6 100644
--- a/bootstrap.bat
+++ b/bootstrap.bat
@@ -23,87 +23,108 @@
 
 :: Pigweed Windows environment setup.
 
+:: WARNING: Multi-line "if" statements can be dangerous!
+::
+:: Example:
+::  call do_foo
+::  if [expression] (
+::    call cmd_a
+::    set my_var = %ERRORLEVEL%
+::    call final_script --flag %my_var%
+::  )
+:: Batch evaluates these expressions in a way that will produce unexpected
+:: behavior. It appears that when each line is executed, it does not affect
+:: local context until the entire expression is complete. In this example,
+:: ERRORLEVEL does not reflect `call cmd_a`, but whatever residual state was
+:: present from `do_foo`. Similarly, in the call to `final_script`, `my_var`
+:: will NOT be valid as the variable `set` doesn't apply until the entire `if`
+:: expression completes.
+:: This script only uses multi-line if statements to `goto` after an operation.
+
 :: If PW_CHECKOUT_ROOT is set, use it. Users should not set this variable.
 :: It's used because when one batch script invokes another the Powershell magic
 :: below doesn't work. To reinforce that users should not be using
 :: PW_CHECKOUT_ROOT, it is cleared here after it is used, and other pw tools
 :: will complain if they see that variable set.
 :: TODO(mohrr) find out a way to do this without PW_CHECKOUT_ROOT.
-if "%PW_CHECKOUT_ROOT%"=="" (
-  :: ~dp0 is the batchism for the directory in which a .bat file resides.
-  set "PW_ROOT=%~dp0"
-) else (
-  set "PW_ROOT=%PW_CHECKOUT_ROOT%"
-  set PW_CHECKOUT_ROOT=
-)
 
+:: ~dp0 is the batchism for the directory in which a .bat file resides.
+if "%PW_CHECKOUT_ROOT%"=="" ^
+set "PW_ROOT=%~dp0." &^
+goto select_python
+
+:: Since PW_CHECKOUT_ROOT is set, use it.
+set "PW_ROOT=%PW_CHECKOUT_ROOT%"
+set "PW_CHECKOUT_ROOT="
+
+:select_python
 :: Allow forcing a specific Python version through the environment variable
 :: PW_BOOTSTRAP_PYTHON. Otherwise, use the system Python if one exists.
 if not "%PW_BOOTSTRAP_PYTHON%" == "" (
   set "python=%PW_BOOTSTRAP_PYTHON%"
-) else (
-  where python >NUL 2>&1
-  if %ERRORLEVEL% EQU 0 (
-    set python=python
-  ) else (
-    echo.
-    echo Error: no system Python present
-    echo.
-    echo   Pigweed's bootstrap process requires a local system Python.
-    echo   Please install Python on your system, add it to your PATH
-    echo   and re-try running bootstrap.
-    goto finish
-  )
+  goto find_environment_root
 )
 
+:: Detect python installation.
+where python >NUL 2>&1
+if %ERRORLEVEL% EQU 0 (
+  set "python=python"
+  goto find_environment_root
+)
+
+echo.
+echo Error: no system Python present
+echo.
+echo   Pigweed's bootstrap process requires a local system Python.
+echo   Please install Python on your system, add it to your PATH
+echo   and re-try running bootstrap.
+goto finish
+
+
+:find_environment_root
 :: PW_ENVIRONMENT_ROOT allows developers to specify where the environment should
-:: be installed. _PW_ACTUAL_ENVIRONMENT_ROOT is where Pigweed keeps that 
-:: information. This separation allows Pigweed to assume PW_ENVIRONMENT_ROOT 
-:: came from the developer and not from a previous bootstrap possibly from 
+:: be installed. _PW_ACTUAL_ENVIRONMENT_ROOT is where Pigweed keeps that
+:: information. This separation allows Pigweed to assume PW_ENVIRONMENT_ROOT
+:: came from the developer and not from a previous bootstrap possibly from
 :: another workspace.
+
+:: Not prefixing environment with "." since that doesn't hide it anyway.
 if "%PW_ENVIRONMENT_ROOT%"=="" (
-   :: Not prefixing environment with "." since that doesn't hide it anyway.
-   set "_PW_ACTUAL_ENVIRONMENT_ROOT=%PW_ROOT%\environment"
+  set "_PW_ACTUAL_ENVIRONMENT_ROOT=%PW_ROOT%\environment"
 ) else (
-   set "_PW_ACTUAL_ENVIRONMENT_ROOT=%PW_ENVIRONMENT_ROOT%"
+  set "_PW_ACTUAL_ENVIRONMENT_ROOT=%PW_ENVIRONMENT_ROOT%"
 )
+
 set "shell_file=%_PW_ACTUAL_ENVIRONMENT_ROOT%\activate.bat"
 
-set _PW_OLD_CIPD_PACKAGE_FILES=%PW_CIPD_PACKAGE_FILES%
-set _PW_OLD_VIRTUALENV_REQUIREMENTS=%PW_VIRTUALENV_REQUIREMENTS%
-set _PW_OLD_VIRTUALENV_SETUP_PY_ROOTS=%PW_VIRTUALENV_SETUP_PY_ROOTS%
-set _PW_OLD_CARGO_PACKAGE_FILES=%PW_CARGO_PACKAGE_FILES%
-
-set PW_CIPD_PACKAGE_FILES=%PW_ROOT%\pw_env_setup\py\pw_env_setup\cipd_setup\pigweed.json;%PW_ROOT%\pw_env_setup\py\pw_env_setup\cipd_setup\luci.json;%PW_CIPD_PACKAGE_FILES%
-set PW_VIRTUALENV_REQUIREMENTS=%PW_ROOT%\pw_env_setup\py\pw_env_setup\virtualenv_setup\requirements.txt;%PW_VIRTUALENV_REQUIREMENTS%
-set PW_VIRTUALENV_SETUP_PY_ROOTS=%PW_ROOT%;%PW_VIRTUALENV_SETUP_PY_ROOTS%
-set PW_CARGO_PACKAGE_FILES=%PW_ROOT%\pw_env_setup\py\pw_env_setup\cargo_setup\packages.txt;%PW_CARGO_PACKAGE_FILES%
-
 set "_pw_start_script=%PW_ROOT%\pw_env_setup\py\pw_env_setup\windows_env_start.py"
 
+if "%PW_PROJECT_ROOT%"=="" set "PW_PROJECT_ROOT=%PW_ROOT%"
+
 :: If PW_SKIP_BOOTSTRAP is set, only run the activation stage instead of the
 :: complete env_setup.
-if "%PW_SKIP_BOOTSTRAP%" == "" (
-  :: Without the trailing slash in %PW_ROOT%/, batch combines that token with
-  :: the --shell-file argument.
-  call "%python%" "%PW_ROOT%\pw_env_setup\py\pw_env_setup\env_setup.py" ^
-      --pw-root "%PW_ROOT%/" ^
-      --shell-file "%shell_file%" ^
-      --install-dir "%_PW_ACTUAL_ENVIRONMENT_ROOT%"
+if not "%PW_SKIP_BOOTSTRAP%" == "" goto skip_bootstrap
+
+:: Without the trailing slash in %PW_ROOT%/, batch combines that token with
+:: the --shell-file argument.
+call "%python%" "%PW_ROOT%\pw_env_setup\py\pw_env_setup\env_setup.py" ^
+    --pw-root "%PW_ROOT%" ^
+    --shell-file "%shell_file%" ^
+    --install-dir "%_PW_ACTUAL_ENVIRONMENT_ROOT%" ^
+    --use-pigweed-defaults ^
+    --virtualenv-gn-target "%PW_ROOT%#:target_support_packages.install" ^
+    --project-root "%PW_PROJECT_ROOT%"
+goto activate_shell
+
+:skip_bootstrap
+if exist "%shell_file%" (
+  call "%python%" "%_pw_start_script%"
 ) else (
-  if exist "%shell_file%" (
-    call "%python%" "%_pw_start_script%"
-  ) else (
-    call "%python%" "%_pw_start_script%" --no-shell-file
-    goto finish
-  )
+  call "%python%" "%_pw_start_script%" --no-shell-file
+  goto finish
 )
 
-set PW_CIPD_PACKAGE_FILES=%_PW_OLD_CIPD_PACKAGE_FILES%
-set PW_VIRTUALENV_REQUIREMENTS=%_PW_OLD_VIRTUALENV_REQUIREMENTS%
-set PW_VIRTUALENV_SETUP_PY_ROOTS=%_PW_OLD_VIRTUALENV_SETUP_PY_ROOTS%
-set PW_CARGO_PACKAGE_FILES=%_PW_OLD_CARGO_PACKAGE_FILES%
-
+:activate_shell
 call "%shell_file%"
 
 :finish
diff --git a/bootstrap.sh b/bootstrap.sh
index 11e44e1..6f0c63b 100644
--- a/bootstrap.sh
+++ b/bootstrap.sh
@@ -14,88 +14,10 @@
 
 # This script must be tested on bash, zsh, and dash.
 
-_pw_abspath () {
+_bootstrap_abspath () {
   python -c "import os.path; print(os.path.abspath('$@'))"
 }
 
-
-# Note: Colors are unfortunately duplicated in several places; and removing the
-# duplication is not easy. Their locations are:
-#
-#   - bootstrap.sh
-#   - pw_cli/color.py
-#   - pw_env_setup/py/pw_env_setup/colors.py
-#
-# So please keep them matching then modifying them.
-_pw_none() {
-  echo -e "$*"
-}
-
-_pw_red() {
-  echo -e "\033[0;31m$*\033[0m"
-}
-
-_pw_bold_red() {
-  echo -e "\033[1;31m$*\033[0m"
-}
-
-_pw_yellow() {
-  echo -e "\033[0;33m$*\033[0m"
-}
-
-_pw_bold_yellow() {
-  echo -e "\033[1;33m$*\033[0m"
-}
-
-_pw_green() {
-  echo -e "\033[0;32m$*\033[0m"
-}
-
-_pw_bold_green() {
-  echo -e "\033[1;32m$*\033[0m"
-}
-
-_pw_blue() {
-  echo -e "\033[1;34m$*\033[0m"
-}
-
-_pw_cyan() {
-  echo -e "\033[1;36m$*\033[0m"
-}
-
-_pw_magenta() {
-  echo -e "\033[0;35m$*\033[0m"
-}
-
-_pw_bold_white() {
-  echo -e "\033[1;37m$*\033[0m"
-}
-
-# Note: This banner is duplicated in three places; which is a lesser evil than
-# the contortions that would be needed to share this snippet acros shell,
-# batch, and Python. Locations:
-#
-#   - bootstrap.sh
-#   - pw_cli/branding.py
-#   - pw_env_setup/py/pw_env_setup/windows_env_start.py
-#
-_PW_BANNER=$(cat <<EOF
- ▒█████▄   █▓  ▄███▒  ▒█    ▒█ ░▓████▒ ░▓████▒ ▒▓████▄
-  ▒█░  █░ ░█▒ ██▒ ▀█▒ ▒█░ █ ▒█  ▒█   ▀  ▒█   ▀  ▒█  ▀█▌
-  ▒█▄▄▄█░ ░█▒ █▓░ ▄▄░ ▒█░ █ ▒█  ▒███    ▒███    ░█   █▌
-  ▒█▀     ░█░ ▓█   █▓ ░█░ █ ▒█  ▒█   ▄  ▒█   ▄  ░█  ▄█▌
-  ▒█      ░█░ ░▓███▀   ▒█▓▀▓█░ ░▓████▒ ░▓████▒ ▒▓████▀
-EOF
-)
-
-# Support customizing the branding with a different banner and color.
-if test -f "$PW_BRANDING_BANNER"; then
-  _PW_BANNER=$(cat $PW_BRANDING_BANNER)
-fi
-if test -z "$PW_BRANDING_BANNER_COLOR"; then
-  PW_BRANDING_BANNER_COLOR=magenta
-fi
-
 # Users are not expected to set PW_CHECKOUT_ROOT, it's only used because it
 # seems to be impossible to reliably determine the path to a sourced file in
 # dash when sourced from a dash script instead of a dash interactive prompt.
@@ -104,21 +26,23 @@
 # variable set.
 # TODO(mohrr) find out a way to do this without PW_CHECKOUT_ROOT.
 if test -n "$PW_CHECKOUT_ROOT"; then
-  PW_SETUP_SCRIPT_PATH="$(_pw_abspath "$PW_CHECKOUT_ROOT/bootstrap.sh")"
+  _BOOTSTRAP_PATH="$(_bootstrap_abspath "$PW_CHECKOUT_ROOT/bootstrap.sh")"
+  # Downstream projects need to set PW_CHECKOUT_ROOT to point to Pigweed if
+  # they're using Pigweed's CI/CQ system.
   unset PW_CHECKOUT_ROOT
 # Shell: bash.
 elif test -n "$BASH"; then
-  PW_SETUP_SCRIPT_PATH="$(_pw_abspath "$BASH_SOURCE")"
+  _BOOTSTRAP_PATH="$(_bootstrap_abspath "$BASH_SOURCE")"
 # Shell: zsh.
 elif test -n "$ZSH_NAME"; then
-  PW_SETUP_SCRIPT_PATH="$(_pw_abspath "${(%):-%N}")"
+  _BOOTSTRAP_PATH="$(_bootstrap_abspath "${(%):-%N}")"
 # Shell: dash.
 elif test ${0##*/} = dash; then
-  PW_SETUP_SCRIPT_PATH="$(_pw_abspath \
+  _BOOTSTRAP_PATH="$(_bootstrap_abspath \
     "$(lsof -p $$ -Fn0 | tail -1 | sed 's#^[^/]*##;')")"
 # If everything else fails, try $0. It could work.
 else
-  PW_SETUP_SCRIPT_PATH="$(_pw_abspath "$0")"
+  _BOOTSTRAP_PATH="$(_bootstrap_abspath "$0")"
 fi
 
 # Check if this file is being executed or sourced.
@@ -139,143 +63,42 @@
   case ${0##*/} in sh|dash) _pw_sourced=1;; esac
 fi
 
-if [ "$_pw_sourced" -eq 0 ]; then
-  _PW_NAME=$(basename "$PW_SETUP_SCRIPT_PATH" .sh)
-  _pw_bold_red "Error: Attempting to $_PW_NAME in a subshell"
-  _pw_red "  Since $_PW_NAME.sh modifies your shell's environment variables, it"
-  _pw_red "  must be sourced rather than executed. In particular, "
-  _pw_red "  'bash $_PW_NAME.sh' will not work since the modified environment "
-  _pw_red "  will get destroyed at the end of the script. Instead, source the "
-  _pw_red "  script's contents in your shell:"
-  _pw_red ""
-  _pw_red "    \$ source $_PW_NAME.sh"
-  exit 1
-fi
-
-PW_ROOT="$(dirname "$PW_SETUP_SCRIPT_PATH")"
-
-if [[ "$PW_ROOT" = *" "* ]]; then
-  _pw_bold_red "Error: The Pigweed path contains spaces\n"
-  _pw_red "  The path '$PW_ROOT' contains spaces. "
-  _pw_red "  Pigweed's Python environment currently requires Pigweed to be "
-  _pw_red "  at a path without spaces. Please checkout Pigweed in a directory "
-  _pw_red "  without spaces and retry running bootstrap."
-  return
-fi
-
+# Downstream projects need to set something other than PW_ROOT here, like
+# YOUR_PROJECT_ROOT. Please also set PW_ROOT before invoking pw_bootstrap or
+# pw_activate.
+PW_ROOT="$(dirname "$_BOOTSTRAP_PATH")"
 export PW_ROOT
 
-# PW_ENVIRONMENT_ROOT allows developers to specify where the environment should
-# be installed. _PW_ACTUAL_ENVIRONMENT_ROOT is where Pigweed keeps that 
-# information. This separation allows Pigweed to assume PW_ENVIRONMENT_ROOT 
-# came from the developer and not from a previous bootstrap possibly from 
-# another workspace.
-if [ -z "$PW_ENVIRONMENT_ROOT" ]; then
-  _PW_ACTUAL_ENVIRONMENT_ROOT="$PW_ROOT/.environment"
-  export _PW_ACTUAL_ENVIRONMENT_ROOT
-else
-  _PW_ACTUAL_ENVIRONMENT_ROOT="$PW_ENVIRONMENT_ROOT"
-  export _PW_ACTUAL_ENVIRONMENT_ROOT
-fi
+# Please also set PW_PROJECT_ROOT to YOUR_PROJECT_ROOT.
+PW_PROJECT_ROOT="$PW_ROOT"
+export PW_PROJECT_ROOT
+
+. "$PW_ROOT/pw_env_setup/util.sh"
+
+pw_deactivate
+pw_eval_sourced "$_pw_sourced"
+pw_check_root "$PW_ROOT"
+_PW_ACTUAL_ENVIRONMENT_ROOT="$(pw_get_env_root)"
+export _PW_ACTUAL_ENVIRONMENT_ROOT
 SETUP_SH="$_PW_ACTUAL_ENVIRONMENT_ROOT/activate.sh"
 
-if [ -z "$PW_ENVSETUP_QUIET" ] && [ -z "$PW_ENVSETUP_NO_BANNER" ]; then
-  _pw_green "\n  WELCOME TO...\n"
-  "_pw_$PW_BRANDING_BANNER_COLOR" "$_PW_BANNER\n"
-fi
+# Downstream projects may wish to set PW_BANNER_FUNC to a function that prints
+# an ASCII art banner here.
 
 # Run full bootstrap when invoked as bootstrap, or env file is missing/empty.
-[ "$(basename "$PW_SETUP_SCRIPT_PATH")" = "bootstrap.sh" ] || \
+if [ "$(basename "$_BOOTSTRAP_PATH")" = "bootstrap.sh" ] || \
   [ ! -f "$SETUP_SH" ] || \
-  [ ! -s "$SETUP_SH" ]
-_PW_IS_BOOTSTRAP="$?"
-
-if [ "$_PW_IS_BOOTSTRAP" -eq 0 ]; then
-  _PW_NAME="bootstrap"
-
-  if [ -z "$PW_ENVSETUP_QUIET" ]; then
-    _pw_green "  BOOTSTRAP! Bootstrap may take a few minutes; please be patient.\n"
-  fi
-
-  # Allow forcing a specific version of Python for testing pursposes.
-  if [ -n "$PW_BOOTSTRAP_PYTHON" ]; then
-    PYTHON="$PW_BOOTSTRAP_PYTHON"
-  elif which python &> /dev/null; then
-    PYTHON=python
-  else
-    _pw_bold_red "Error: No system Python present\n"
-    _pw_red "  Pigweed's bootstrap process requires a local system Python."
-    _pw_red "  Please install Python on your system, add it to your PATH"
-    _pw_red "  and re-try running bootstrap."
-    return
-  fi
-
-  _PW_OLD_CIPD_PACKAGE_FILES="$PW_CIPD_PACKAGE_FILES"
-  PW_CIPD_PACKAGE_FILES="$PW_ROOT/pw_env_setup/py/pw_env_setup/cipd_setup/pigweed.json:$PW_ROOT/pw_env_setup/py/pw_env_setup/cipd_setup/luci.json:$PW_CIPD_PACKAGE_FILES"
-  export PW_CIPD_PACKAGE_FILES
-
-  _PW_OLD_VIRTUALENV_REQUIREMENTS="$PW_VIRTUALENV_REQUIREMENTS"
-  PW_VIRTUALENV_REQUIREMENTS="$PW_ROOT/pw_env_setup/py/pw_env_setup/virtualenv_setup/requirements.txt:$PW_VIRTUALENV_REQUIREMENTS"
-  export PW_VIRTUALENV_REQUIREMENTS
-
-  _PW_OLD_VIRTUALENV_SETUP_PY_ROOTS="$PW_VIRTUALENV_SETUP_PY_ROOTS"
-  PW_VIRTUALENV_SETUP_PY_ROOTS="$PW_ROOT/*:$PW_VIRTUALENV_SETUP_PY_ROOTS"
-  export PW_VIRTUALENV_SETUP_PY_ROOTS
-
-  _PW_OLD_CARGO_PACKAGE_FILES="$PW_CARGO_PACKAGE_FILES"
-  PW_CARGO_PACKAGE_FILES="$PW_ROOT/pw_env_setup/py/pw_env_setup/cargo_setup/packages.txt:$PW_CARGO_PACKAGE_FILES"
-  export PW_CARGO_PACKAGE_FILES
-
-  if [ -n "$PW_USE_GCS_ENVSETUP" ]; then
-    _PW_ENV_SETUP="$("$PW_ROOT/pw_env_setup/get_pw_env_setup.sh")"
-  fi
-
-  if [ -n "$_PW_ENV_SETUP" ]; then
-    "$_PW_ENV_SETUP" --shell-file "$SETUP_SH" --install-dir "$_PW_ACTUAL_ENVIRONMENT_ROOT"
-  else
-    "$PYTHON" "$PW_ROOT/pw_env_setup/py/pw_env_setup/env_setup.py" --shell-file "$SETUP_SH" --install-dir "$_PW_ACTUAL_ENVIRONMENT_ROOT"
-  fi
-
-  PW_CIPD_PACKAGE_FILES="$_PW_OLD_CIPD_PACKAGE_FILES"
-  PW_VIRTUALENV_REQUIREMENTS="$_PW_OLD_VIRTUALENV_REQUIREMENTS"
-  PW_VIRTUALENV_SETUP_PY_ROOTS="$_PW_OLD_VIRTUALENV_SETUP_PY_ROOTS"
-  PW_CARGO_PACKAGE_FILES="$_PW_OLD_CARGO_PACKAGE_FILES"
+  [ ! -s "$SETUP_SH" ]; then
+  pw_bootstrap --shell-file "$SETUP_SH" --install-dir "$_PW_ACTUAL_ENVIRONMENT_ROOT" --use-pigweed-defaults --json-file "$_PW_ACTUAL_ENVIRONMENT_ROOT/actions.json" --virtualenv-gn-target "$PW_ROOT#:target_support_packages.install"
+  pw_finalize bootstrap "$SETUP_SH"
 else
-  _PW_NAME="activate"
-
-  if [ -z "$PW_ENVSETUP_QUIET" ]; then
-    _pw_green "  ACTIVATOR! This sets your shell environment variables.\n"
-  fi
+  pw_activate
+  pw_finalize activate "$SETUP_SH"
 fi
 
-if [ -f "$SETUP_SH" ]; then
-  . "$SETUP_SH"
-
-  if [ "$?" -eq 0 ]; then
-    if [ "$_PW_IS_BOOTSTRAP" -eq 0 ] && [ -z "$PW_ENVSETUP_QUIET" ]; then
-      echo "To activate this environment in the future, run this in your "
-      echo "terminal:"
-      echo
-      _pw_green "  source ./activate.sh\n"
-    fi
-  else
-    _pw_red "Error during $_PW_NAME--see messages above."
-  fi
-else
-  _pw_red "Error during $_PW_NAME--see messages above."
-fi
-
-unset _PW_ENV_SETUP
-unset _PW_IS_BOOTSTRAP
-unset _PW_NAME
-unset _PW_BANNER
-unset _PW_OLD_CIPD_PACKAGE_FILES
-unset _PW_OLD_VIRTUALENV_REQUIREMENTS
-unset _PW_OLD_VIRTUALENV_SETUP_PY_ROOTS
-unset _PW_OLD_CARGO_PACKAGE_FILES
-unset _pw_abspath
-unset _pw_red
-unset _pw_bold_red
-unset _pw_green
-unset _pw_magenta
 unset _pw_sourced
+unset _BOOTSTRAP_PATH
+unset SETUP_SH
+unset _bootstrap_abspath
+
+pw_cleanup
diff --git a/docker/BUILD.gn b/docker/BUILD.gn
index 601472c..dd021e8 100644
--- a/docker/BUILD.gn
+++ b/docker/BUILD.gn
@@ -12,10 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_docgen/docs.gni")
+
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
 }
diff --git a/docker/docs.rst b/docker/docs.rst
index a5e26c0..ed6a290 100644
--- a/docker/docs.rst
+++ b/docker/docs.rst
@@ -1,5 +1,3 @@
-.. _chapter-docker:
-
 ------
 docker
 ------
diff --git a/docker/tag b/docker/tag
index 8947721..6d54d4b 100644
--- a/docker/tag
+++ b/docker/tag
@@ -1 +1 @@
-us.gcr.io/google.com/pigweed/environment:53a06fbc72122fe37b95d9f91025b0b9c44b4260
\ No newline at end of file
+us.gcr.io/google.com/pigweed/environment:2006150937cebd9b98206c2e7f39ee17cce5fddb
\ No newline at end of file
diff --git a/docs/BUILD.gn b/docs/BUILD.gn
index caac883..50abacc 100644
--- a/docs/BUILD.gn
+++ b/docs/BUILD.gn
@@ -12,14 +12,13 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_docgen/docs.gni")
+
 pw_doc_group("core_docs") {
   inputs = [
     "images/pw_env_setup_demo.gif",
-    "images/pw_presubmit_demo.gif",
     "images/pw_status_test.png",
     "images/pw_watch_build_demo.gif",
     "images/pw_watch_on_device_demo.gif",
@@ -38,6 +37,7 @@
 # Documentation for upstream Pigweed targets.
 group("target_docs") {
   deps = [
+    "$dir_pigweed/targets/arduino:target_docs",
     "$dir_pigweed/targets/docs:target_docs",
     "$dir_pigweed/targets/host:target_docs",
     "$dir_pigweed/targets/lm3s6965evb-qemu:target_docs",
@@ -45,6 +45,70 @@
   ]
 }
 
+group("module_docs") {
+  deps = [
+    "$dir_docker:docs",
+    "$dir_pw_allocator:docs",
+    "$dir_pw_arduino_build:docs",
+    "$dir_pw_assert:docs",
+    "$dir_pw_assert_basic:docs",
+    "$dir_pw_assert_log:docs",
+    "$dir_pw_base64:docs",
+    "$dir_pw_bloat:docs",
+    "$dir_pw_blob_store:docs",
+    "$dir_pw_boot_armv7m:docs",
+    "$dir_pw_build:docs",
+    "$dir_pw_bytes:docs",
+    "$dir_pw_checksum:docs",
+    "$dir_pw_cli:docs",
+    "$dir_pw_containers:docs",
+    "$dir_pw_cpu_exception:docs",
+    "$dir_pw_cpu_exception_armv7m:docs",
+    "$dir_pw_docgen:docs",
+    "$dir_pw_doctor:docs",
+    "$dir_pw_env_setup:docs",
+    "$dir_pw_fuzzer:docs",
+    "$dir_pw_hdlc_lite:docs",
+    "$dir_pw_hex_dump:docs",
+    "$dir_pw_kvs:docs",
+    "$dir_pw_log:docs",
+    "$dir_pw_log_basic:docs",
+    "$dir_pw_log_null:docs",
+    "$dir_pw_log_rpc:docs",
+    "$dir_pw_log_tokenized:docs",
+    "$dir_pw_metric:docs",
+    "$dir_pw_minimal_cpp_stdlib:docs",
+    "$dir_pw_module:docs",
+    "$dir_pw_package:docs",
+    "$dir_pw_polyfill:docs",
+    "$dir_pw_preprocessor:docs",
+    "$dir_pw_presubmit:docs",
+    "$dir_pw_protobuf:docs",
+    "$dir_pw_protobuf_compiler:docs",
+    "$dir_pw_random:docs",
+    "$dir_pw_result:docs",
+    "$dir_pw_ring_buffer:docs",
+    "$dir_pw_rpc:docs",
+    "$dir_pw_span:docs",
+    "$dir_pw_status:docs",
+    "$dir_pw_stream:docs",
+    "$dir_pw_string:docs",
+    "$dir_pw_sys_io:docs",
+    "$dir_pw_sys_io_arduino:docs",
+    "$dir_pw_sys_io_baremetal_stm32f429:docs",
+    "$dir_pw_sys_io_stdio:docs",
+    "$dir_pw_target_runner:docs",
+    "$dir_pw_tokenizer:docs",
+    "$dir_pw_toolchain:docs",
+    "$dir_pw_trace:docs",
+    "$dir_pw_trace_tokenized:docs",
+    "$dir_pw_unit_test:docs",
+    "$dir_pw_varint:docs",
+    "$dir_pw_watch:docs",
+    "$dir_pw_web_ui:docs",
+  ]
+}
+
 pw_doc_gen("docs") {
   conf = "conf.py"
   sources = [
@@ -59,57 +123,7 @@
   output_directory = target_gen_dir
   deps = [
     ":core_docs",
+    ":module_docs",
     ":target_docs",
-    "$dir_docker:docs",
-    "$dir_pw_allocator:docs",
-    "$dir_pw_assert:docs",
-    "$dir_pw_assert_basic:docs",
-    "$dir_pw_base64:docs",
-    "$dir_pw_bloat:docs",
-    "$dir_pw_blob_store:docs",
-    "$dir_pw_boot_armv7m:docs",
-    "$dir_pw_build:docs",
-    "$dir_pw_bytes:docs",
-    "$dir_pw_checksum:docs",
-    "$dir_pw_cli:docs",
-    "$dir_pw_containers:docs",
-    "$dir_pw_cpu_exception:docs",
-    "$dir_pw_cpu_exception_armv7m:docs",
-    "$dir_pw_docgen:docs",
-    "$dir_pw_doctor:docs",
-    "$dir_pw_env_setup:docs",
-    "$dir_pw_fuzzer:docs",
-    "$dir_pw_hex_dump:docs",
-    "$dir_pw_kvs:docs",
-    "$dir_pw_log:docs",
-    "$dir_pw_log_basic:docs",
-    "$dir_pw_log_tokenized:docs",
-    "$dir_pw_minimal_cpp_stdlib:docs",
-    "$dir_pw_module:docs",
-    "$dir_pw_polyfill:docs",
-    "$dir_pw_preprocessor:docs",
-    "$dir_pw_presubmit:docs",
-    "$dir_pw_protobuf:docs",
-    "$dir_pw_protobuf_compiler:docs",
-    "$dir_pw_random:docs",
-    "$dir_pw_result:docs",
-    "$dir_pw_ring_buffer:docs",
-    "$dir_pw_rpc:docs",
-    "$dir_pw_span:docs",
-    "$dir_pw_status:docs",
-    "$dir_pw_stream:docs",
-    "$dir_pw_string:docs",
-    "$dir_pw_sys_io:docs",
-    "$dir_pw_sys_io_baremetal_stm32f429:docs",
-    "$dir_pw_sys_io_stdio:docs",
-    "$dir_pw_target_runner:docs",
-    "$dir_pw_tokenizer:docs",
-    "$dir_pw_toolchain:docs",
-    "$dir_pw_trace:docs",
-    "$dir_pw_trace_tokenized:docs",
-    "$dir_pw_unit_test:docs",
-    "$dir_pw_varint:docs",
-    "$dir_pw_watch:docs",
-    "$dir_pw_web_ui:docs",
   ]
 }
diff --git a/docs/build_system.rst b/docs/build_system.rst
index a9d465c..6938d19 100644
--- a/docs/build_system.rst
+++ b/docs/build_system.rst
@@ -1,17 +1,188 @@
-.. _chapter-build-system:
+.. _docs-build-system:
 
 ============
 Build system
 ============
+Building software for embedded devices is a complex process. Projects often have
+custom toolchains, target different hardware platforms, and require additional
+configuration and post-processing of artifacts.
 
-Pigweed's primary build system is `GN`_, which is used for all upstream
-development. Some other common build systems are supported for integration into
-existing project, which are described in :ref:`chapter-pw-build`. We recommend
-using GN where possible.
+As a modern embedded framework, Pigweed's goal is to collect these embedded use
+cases into a powerful and flexible build system, then extend it with support for
+modern software development practices.
 
-.. _GN: https://gn.googlesource.com/gn/
+What's in a build system?
+=========================
+A quality build system provides a variety of features beyond compiling code.
+Throughout our experience with embedded development, we've found several build
+features to be especially useful, and designed Pigweed's build system with them
+in mind.
 
-This document describes Pigweed's upstream build structure.
+Simple toolchain configuration
+------------------------------
+Embedded projects often use custom build toolchains for their specific hardware.
+Configuring these should be a simple process, both in their initial setup and
+later adjustments.
+
+Multi-target builds
+-------------------
+Virtually every consumer product has firmware that targets different boards or
+MCUs during development. While building for a single board is simple enough, the
+complexity of supporting different targets ranges from changing compiler flags
+to swapping out entire libraries of firmware and drivers. This is often done by
+running multiple builds, configuring each one accordingly. In Pigweed, we've
+designed our build system with first-class multi-target support in mind,
+allowing any number of target configurations to be built simultaneously.
+
+Multi-language support
+----------------------
+Embedded projects are typically written in C, C++, and assembly. However, it is
+possible to have firmware written in other languages, such as Rust.
+Additionally, projects may have host-side tooling written in a wide variety of
+languages. Having all of these build together proves to be a large time saver.
+
+Custom scripting
+----------------
+Embedded projects often require post-processing of build artifacts; these may
+include:
+
+* Extracting ELF sections into a different container
+* Injecting metadata into firmware images
+* Image signing
+* Creating databases of symbols for debugging
+* Extracting string tokens into a database (for example, with
+  :ref:`module-pw_tokenizer`)
+
+These are run as steps during a build, facilitated by the build system.
+
+See also
+^^^^^^^^
+
+* :ref:`module-pw_build-python-action`
+
+Python packaging
+----------------
+Python is a favorite scripting language of many development teams, and here at
+Pigweed, we're no exception. Much of Pigweed's host-side tooling is written in
+Python. While Python works great for local development, problems can arise when
+scripts need to be packaged and distributed for vendors or factory teams. Having
+proper support for packaging Python within a build system allows teams to focus
+on writing code instead of worrying about distribution.
+
+Size reporting
+--------------
+On embedded devices, memory is everything. Most projects have some sort of
+custom tooling to determine how much flash and RAM space their firmware uses.
+Being able to run size reports as part of a build ensures that they are always
+up-to-date and allows space usage to be tracked over time.
+
+See also
+^^^^^^^^
+
+* :ref:`module-pw_bloat`
+
+Documentation
+-------------
+An oft-neglected part of software development, documentation is invaluable for
+future maintainers of a project. As such, Pigweed has integrated documentation
+which builds alongside its code and combines with other build features, such as
+size reports, to provide high quality, up-to-date references for developers.
+
+See also
+^^^^^^^^
+
+* :ref:`module-pw_docgen`
+
+Unit testing
+------------
+Unit tests are essential to ensure that the functionality of code remains
+consistent as changes are made to avoid accidental regressions. Running unit
+tests as part of a build keeps developers constantly aware of the impact of
+their changes.
+
+Host-side unit tests
+^^^^^^^^^^^^^^^^^^^^
+Though Pigweed targets embedded devices, a lot of its code can be run and tested
+on a host desktop by swapping out backends to host platform libraries. This is
+highly beneficial during development, as it allows tests to consistently run
+without having to go through the process of flashing a device.
+
+Device-side unit tests
+^^^^^^^^^^^^^^^^^^^^^^
+As useful as host-side tests are, they are not sufficient for developing actual
+firmware, and it is critical to run tests on the actual hardware. Pigweed has
+invested into creating a test framework and build integration for running tests
+across physical devices as part of a build.
+
+See also
+^^^^^^^^
+
+* :ref:`module-pw_unit_test`
+* :ref:`module-pw_target_runner`
+
+Bonus: pw watch
+---------------
+In web development, it is common to have a file system watcher listening for
+source file changes and triggering a build for quick iteration. When combined
+with a fast incremental build system, this becomes a powerful feature, allowing
+things such as unit tests and size reports to re-run whenever any dependent
+code is modified.
+
+While initially seen as somewhat of a gimmick, Pigweed's watcher has become a
+staple of Pigweed development, with most Pigweed users having it permanently
+running in a terminal window.
+
+See also
+^^^^^^^^
+
+* :ref:`module-pw_watch`
+
+Pigweed's build systems
+=======================
+Pigweed can be used either as a monolith or à la carte, slotting into an
+existing project. To this end, Pigweed supports multiple build systems, allowing
+Pigweed-based projects to choose the most suitable one for them.
+
+Of the supported build systems, GN is the most full-featured, followed by CMake,
+and finally Bazel.
+
+CMake
+-----
+A well-known name in C/C++ development, `CMake`_ is widely used by all kinds of
+projects, including embedded devices. Pigweed's CMake support is provided
+primarily for projects that have an existing CMake build and wish to integrate
+Pigweed modules.
+
+Bazel
+-----
+The open source version of Google's internal build system. `Bazel`_ has been
+growing in popularity within the open source world, as well as being adopted by
+various proprietary projects. Its modular structure makes it a great fit for
+à la carte usage.
+
+GN
+--
+A perhaps unfamiliar name, `GN (Generate Ninja)`_ is a meta-build system that
+outputs `Ninja`_ build files, originally designed for use in Chromium. Pigweed
+first experimented with GN after hearing about it from another team, and we
+quickly came to appreciate its speed and simplicity. GN has become Pigweed's
+primary build system; it is used for all upstream development and strongly
+recommended for Pigweed-based projects where possible.
+
+.. _CMake: https://cmake.org/
+.. _Bazel: https://bazel.build/
+.. _GN (Generate Ninja): https://gn.googlesource.com/gn
+.. _Ninja: https://ninja-build.org/
+
+The GN build
+============
+This section describes Pigweed's GN build structure, how it is used upstream,
+build conventions, and recommendations for Pigweed-based projects. While
+containing some details about how GN works in general, this section is not
+intended to be a guide on how to use GN. To learn more about the tool itself,
+refer to the official `GN reference`_.
+
+.. _GN reference: https://gn.googlesource.com/gn/+/master/docs/reference.md
 
 .. note::
   A quick note on terminology: the word "target" is overloaded within GN (and
@@ -22,44 +193,149 @@
   To avoid confusing the two, we refer to the former as "GN targets" and the
   latter as "Pigweed targets".
 
-.gn
-===
-The entrypoint to the GN build is the ``.gn`` file, which defines the project's
-root directory (henceforth ``//``). In Pigweed, its only purpose is to point GN
-to the location of the BUILDCONFIG file.
+Entrypoint: .gn
+---------------
+The entrypoint to a GN build is the ``.gn`` file, which defines a project's root
+directory (henceforth ``//``).
 
-BUILDCONFIG.gn
-==============
-``//BUILDCONFIG.gn`` configures the GN build by defining global configuration
-options. Most of Pigweed's configuration is left to individual build targets,
-so the BUILDCONFIG file is relatively empty. It sets Pigweed's default
-toolchain, which GN requires before evaluating any BUILD files.
+``.gn`` must point to the location of a ``BUILDCONFIG.gn`` file for the project.
+In Pigweed upstream, this is its only purpose.
 
-//BUILD.gn
-==========
+Downstream projects may additionally use ``.gn`` to set global overrides for
+Pigweed's build arguments, which apply across all Pigweed targets. For example,
+a project could configure the protobuf libraries that it uses. This is done by
+defining a ``default_args`` scope containing the overrides.
+
+.. code::
+
+  # The location of the BUILDCONFIG file.
+  buildconfig = "//BUILDCONFIG.gn"
+
+  # Build arguments set across all Pigweed targets.
+  default_args = {
+    dir_pw_third_party_nanopb = "//third_party/nanopb-0.4.2"
+  }
+
+Configuration: BUILDCONFIG.gn
+-----------------------------
+The file ``BUILDCONFIG.gn`` configures the GN build by defining any desired
+global variables/options. It can be located anywhere in the build tree, but is
+conventionally placed at the root. ``.gn`` points GN to this file.
+
+``BUILDCONFIG.gn`` is evaluated before any other GN files, and variables defined
+within it are placed into GN's global scope, becoming available in every file
+without requiring imports.
+
+The options configured in this file differ from those in ``.gn`` in two ways:
+
+1. ``BUILDCONFIG.gn`` is evaluated for every GN toolchain (and Pigweed target),
+   whereas ``.gn`` is only evaluated once. This allows ``BUILDCONFIG.gn`` to set
+   different options for each Pigweed target.
+2. In ``.gn``, only GN build arguments can be overridden. ``BUILDCONFIG.gn``
+   allows defining arbitrary variables.
+
+Generally, it is preferable to expose configuration options through build args
+instead of globals in ``BUILDCONFIG.gn`` (something Pigweed's build previously
+did), as they are more flexible, greppable, and easier to manage. However, it
+may make sense to define project-specific constants in ``BUILDCONFIG.gn``.
+
+Pigweed's upstream ``BUILDCONFIG.gn`` does not define any variables; it just
+sets Pigweed's default toolchain, which GN requires.
+
+.. _top-level-build:
+
+Top-level GN targets: //BUILD.gn
+--------------------------------
 The root ``BUILD.gn`` file defines all of the libraries, images, tests, and
-binaries built within Pigweed. These are split across a few logical groups,
-which are described below. In order to build a GN target, it *must* be listed in
-one of the groups in this file.
+binaries built by a Pigweed project. This file is evaluated immediately after
+``BUILDCONFIG.gn``, with the active toolchain (which is the default toolchain
+at the start of a build).
 
-``//BUILD.gn`` is also responsible for enumerating each of the Pigweed targets
-that Pigweed supports. These targets are defined as toolchains providing their
-custom configuration options. ``/BUILD.gn`` instantiates a version of its GN
-target groups for each of these toolchains.
+``//BUILD.gn`` is responsible for enumerating each of the Pigweed targets built
+by a project. This is done by instantiating a version of each of the project's
+GN target groups with each Pigweed target's toolchain. For example, in upstream,
+all of Pigweed's GN targets are contained within the ``pigweed_default`` group.
+This group is instantiated multiple times, with different Pigweed target
+toolchains.
+
+These groups include the following:
+
+* ``host`` -- builds ``pigweed_default`` with Clang or GCC, depending on the
+  platform
+* ``host_clang`` -- builds ``pigweed_default`` for the host with Clang
+* ``host_gcc`` -- builds ``pigweed_default`` for the host with GCC
+* ``stm32f429i`` -- builds ``pigweed_default`` for STM32F429i Discovery board
+* ``docs`` -- builds the Pigweed documentation and size reports
+
+Pigweed projects are recommended to follow this pattern, creating a top-level
+group for each of their Pigweed targets that builds a common GN target with the
+appropriate toolchain.
+
+It is important that no dependencies are listed under the default toolchain
+within ``//BUILD.gn``, as it does not configure any build parameters, and
+therefore should not evaluate any other GN files. The pattern that Pigweed uses
+to achieve this is to wrap all dependencies within a condition checking the
+toolchain.
+
+.. code::
+
+  group("my_application_images") {
+    deps = []  # Empty in the default toolchain.
+
+    if (current_toolchain != default_toolchain) {
+      # This is only evaluated by Pigweed target toolchains, which configure
+      # all of the required options to build Pigweed code.
+      deps += [ "//images:evt" ]
+    }
+  }
+
+  # The images group is instantiated for each of the project's Pigweed targets.
+  group("my_pigweed_target") {
+    deps = [ ":my_application_images(//toolchains:my_pigweed_target)" ]
+  }
 
 .. warning::
   Pigweed's default toolchain is never used, so it is set to a dummy toolchain
   which doesn't define any tools. ``//BUILD.gn`` contains conditions which check
   that the current toolchain is not the default before declaring any GN target
   dependencies to prevent the default toolchain from evaluating any other BUILD
-  files. All GN targets added to the build must be placed in one of these
+  files. All GN targets added to the build must be placed under one of these
   conditional scopes.
 
-Groups
-------
+"default" group
+^^^^^^^^^^^^^^^
+The root ``BUILD.gn`` file can define a special group named ``default``. If
+present, Ninja will build this group when invoked without arguments.
+
+.. tip::
+  Defining a ``default`` group makes using ``pw watch`` simple!
+
+Optimization levels
+^^^^^^^^^^^^^^^^^^^
+Pigweed's ``//BUILD.gn`` defines the ``pw_default_optimization_level`` build
+arg, which specifies the optimization level to use for the default groups
+(``host``, ``stm32f429i``, etc.). The supported values for
+``pw_default_optimization_level`` are:
+
+* ``debug`` -- create debugging-friendly binaries (``-Og``)
+* ``size_optimized`` -- optimize for size (``-Os``)
+* ``speed_optimized`` -- optimized for speed, without increasing code size
+  (``-O2``)
+
+Pigweed defines versions of its groups in ``//BUILD.gn`` for each optimization
+level. Rather than relying on ``pw_default_optimization_level``, you may
+directly build a group at the desired optimization level:
+``<group>_<optimization>``. Examples include ``host_clang_debug``,
+``host_gcc_size_optimized``, and ``stm32f429i_speed_optimized``.
+
+Upstream GN target groups
+^^^^^^^^^^^^^^^^^^^^^^^^^
+In upstream, Pigweed splits its top-level GN targets into a few logical groups,
+which are described below. In order to build a GN target, it *must* be listed in
+one of the groups in this file.
 
 apps
-^^^^
+~~~~
 This group defines the application images built in Pigweed. It lists all of the
 common images built across all Pigweed targets, such as modules' example
 executables. Each Pigweed target can additionally provide its own specific
@@ -67,44 +343,112 @@
 this group.
 
 host_tools
-^^^^^^^^^^
+~~~~~~~~~~
 This group defines host-side tooling binaries built for Pigweed.
 
 pw_modules
-^^^^^^^^^^
+~~~~~~~~~~
 This group lists the main libraries for all of Pigweed's modules.
 
 pw_module_tests
-^^^^^^^^^^^^^^^
+~~~~~~~~~~~~~~~
 All modules' unit tests are collected here, so that they can all be run at once.
 
 pigweed_default
-^^^^^^^^^^^^^^^
+~~~~~~~~~~~~~~~
 This group defines everything built in a Pigweed build invocation by collecting
-the above groups and conditionally adding them, based on the Pigweed target
-configuration. Generally, new dependencies should not be added here; instead use
-one of the groups listed above.
+the above groups and conditionally depending on them based on the active Pigweed
+target's configuration. Generally, new dependencies should not be added here;
+instead, use one of the groups listed above.
 
-The ``pigweed_default`` group is instantiated for each of the Pigweed target
-toolchains.
+The ``pigweed_default`` group is instantiated for each upstream Pigweed target's
+toolchain.
 
 Pigweed target instantiations
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 These groups wrap ``pigweed_default`` with a specific target toolchain. They are
 named after the Pigweed target, e.g. ``host_clang``, ``stm32f429i``, etc.
 
+Other BUILD files: //\*\*/BUILD.gn
+----------------------------------
+The rest of the ``BUILD.gn`` files in the tree define libraries, configs, and
+build args for each of the modules in a Pigweed project.
+
+Project configuration: //build_overrides/pigweed.gni
+----------------------------------------------------
+Each Pigweed project must contain a Pigweed configuration file at a known
+location in the GN build tree. Currently, this file only contains a single build
+argument, which must be set to the GN build path to the root of the Pigweed
+repository within the project.
+
+Module variables
+----------------
+As Pigweed is inteded to be a subcomponent of a larger project, it cannot assume
+where it or its modules is located. Therefore, Pigweed's upstream BUILD.gn files
+do not use absolute paths; instead, variables are defined pointing to each of
+Pigweed's modules, set relative to a project-specific ``dir_pigweed``.
+
+To depend on Pigweed modules from GN code, import Pigweed's overrides file and
+reference these module variables.
+
+.. code::
+
+  # This must be imported before .gni files from any other Pigweed modules. To
+  # prevent gn format from reordering this import, it must be separated by a
+  # blank line from other imports.
+
+  import("//build_overrides/pigweed.gni")
+
+GN target type wrappers
+-----------------------
+To faciliate injecting global configuration options, Pigweed defines wrappers
+around builtin GN target types such as ``source_set`` and ``executable``. These
+are defined within ``$dir_pw_build/target_types.gni``.
+
+.. note::
+  To take advantage of Pigweed's flexible target configuration system, use
+  ``pw_*`` target types (e.g. ``pw_source_set``) in your BUILD.gn files instead
+  of GN builtins.
+
 Pigweed targets
-===============
-Each Pigweed target is defined as a GN toolchain which provides its own build
-tool and output binary configs, and custom overrides for Pigweed's build
-configuration arguments. For more information on Pigweed's target system, as
-well as each of the supported targets, refer to :ref:`chapter-targets`.
+---------------
+To build for a specific hardware platform, builds define Pigweed targets. These
+are essentially GN toolchains which set special arguments telling Pigweed how to
+build. For information on Pigweed's target system, refer to
+:ref:`docs-targets`.
 
-Usage examples
-==============
+The dummy toolchain
+-------------------
+Pigweed's ``BUILDCONFIG.gn`` sets the project's default toolchain to a "dummy"
+toolchain which does not specify any compilers or override any build arguments.
+Downstream projects are recommended to do the same, following the steps
+described in :ref:`top-level-build` to configure builds for each of their
+Pigweed targets.
 
-Building a custom executable
-----------------------------
+.. admonition:: Why use a dummy?
+
+  To support some of its advanced (and useful!) build features, Pigweed requires
+  the ability to generate new toolchains on the fly. This requires having
+  knowledge of the full configuration of the current toolchain (which is easy if
+  it's all defined within a scope), something which is impractical to achieve
+  using the default toolchain.
+
+  Additionally, there are some cases where GN treats default and non-default
+  toolchains differently. By not using the default toolchain, we avoid having
+  to deal with these inconsistencies.
+
+  It is possible to build Pigweed using only the default toolchain, but it
+  requires a more complicated setup to get everything working and should be
+  avoided unless necessary (for example, when integrating with a large existing
+  GN-based project).
+
+Upstream development examples
+-----------------------------
+If developing for upstream Pigweed, some common build use cases are described
+below.
+
+Building a custom executable/app image
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 1. Define your executable GN target using the ``pw_executable`` template.
 
diff --git a/docs/embedded_cpp_guide.rst b/docs/embedded_cpp_guide.rst
index b05832c..4d7a669 100644
--- a/docs/embedded_cpp_guide.rst
+++ b/docs/embedded_cpp_guide.rst
@@ -1,8 +1,4 @@
-.. _chapter-embedded-cpp:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _docs-embedded-cpp:
 
 ==================
 Embedded C++ Guide
diff --git a/docs/faq.rst b/docs/faq.rst
index 090c08e..3b51b4a 100644
--- a/docs/faq.rst
+++ b/docs/faq.rst
@@ -1,4 +1,4 @@
-.. _chapter-faq:
+.. _docs-faq:
 
 --------------------------
 Frequently Asked Questions
@@ -33,7 +33,7 @@
 Isn't C++ bloated and slow?
 ---------------------------
 In general, no, but it is important to follow some guidelines as discussed in
-the :ref:`Embedded C++ Guide <chapter-embedded-cpp>`.
+the :ref:`Embedded C++ Guide <docs-embedded-cpp>`.
 
 At Google, we have made some quantitative analysis of various common embedded
 patterns in C++ to evaluate the cost of various constructs. We will open source
diff --git a/docs/getting_started.md b/docs/getting_started.md
index 4a968c0..59e17de 100644
--- a/docs/getting_started.md
+++ b/docs/getting_started.md
@@ -43,7 +43,7 @@
 (4) Start the watcher. The watcher will invoke Ninja to build all the targets
 
 ```bash
-$ pw watch out default stm32f429i
+$ pw watch out default
 
  ▒█████▄   █▓  ▄███▒  ▒█    ▒█ ░▓████▒ ░▓████▒ ▒▓████▄
   ▒█░  █░ ░█▒ ██▒ ▀█▒ ▒█░ █ ▒█  ▒█   ▀  ▒█   ▀  ▒█  ▀█▌
@@ -52,7 +52,7 @@
   ▒█      ░█░ ░▓███▀   ▒█▓▀▓█░ ░▓████▒ ░▓████▒ ▒▓████▀
 
 20200707 17:24:06 INF Starting Pigweed build watcher
-20200707 17:24:06 INF Will build [1/1]: out default stm32f429i
+20200707 17:24:06 INF Will build [1/1]: out default
 20200707 17:24:06 INF Attaching filesystem watcher to $HOME/wrk/pigweed/...
 20200707 17:24:06 INF Triggering initial build...
 ...
@@ -232,32 +232,24 @@
 
 ## Building for a Device
 
-As mentioned previously, Pigweed builds for host by default. In the context of
-Pigweed, a Pigweed "target" is a build configuration that includes a toolchain,
-default library configurations, and more to result in binaries that run
-natively on the target.
+A Pigweed "target" is a build configuration that includes a toolchain, default
+library configurations, and more to result in binaries that run natively on the
+target. With the default build invocation, you're already building for a device
+target (the STMicroelectronics STM32F429I-DISC1) in parallel with the host
+build!
 
-Switch to the window running `pw_watch`, and quit using `ctrl+c`. To get
-`pw_watch` to build the new STM32F429I-DISC1 target, re-launch by specifying
-which Ninja targets to build:
+If you want to build JUST for the device, you can kick of watch with:
 
 ```bash
-$ pw watch out default stm32f429i
+$ pw watch out stm32f429i
 ```
 
 This is equivalent to the following Ninja invocation:
 
 ```bash
-$ ninja -C out default stm32f429i
+$ ninja -C out stm32f429i
 ```
 
-Or since the "default" target builds host and docs,
-
-```bash
-$ ninja -C out host docs stm32f429i
-```
-
-Now `pw_watch` is building for host and a device!
 
 ## Running Tests on a Device
 
@@ -273,7 +265,7 @@
 the boards and distribute the tests across the devices. More boards = faster
 tests! Keep in mind that you may have to make some environment specific updates
 to ensure you have permissions to use the USB device. For example, on Linux you
-may need to update your udev rules and ensure you're in the plugdev and dailout
+may need to update your udev rules and ensure you're in the plugdev and dialout
 groups.
 
 ![development boards connected via USB](images/stm32f429i-disc1_connected.jpg)
diff --git a/docs/index.rst b/docs/index.rst
index a7230ed..9a02eca 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,5 +1,3 @@
-.. _chapter-home:
-
 .. highlight:: sh
 
 .. mdinclude:: README.md
diff --git a/docs/module_guides.rst b/docs/module_guides.rst
index 728deff..f45131e 100644
--- a/docs/module_guides.rst
+++ b/docs/module_guides.rst
@@ -1,3 +1,5 @@
+.. _docs-module-guides:
+
 =============
 Module Guides
 =============
diff --git a/docs/module_structure.rst b/docs/module_structure.rst
index 6774ffd..586fca0 100644
--- a/docs/module_structure.rst
+++ b/docs/module_structure.rst
@@ -1,8 +1,4 @@
-.. default-domain:: cpp
-
-.. highlight:: sh
-
-.. _chapter-module-guide:
+.. _docs-module-structure:
 
 ----------------
 Module Structure
@@ -31,7 +27,7 @@
     public/pw_foo/foo.h
     public/pw_foo/baz.h
 
-    # Exposed public headers go under internal/
+    # Exposed private headers go under internal/
     public/pw_foo/internal/bar.h
     public/pw_foo/internal/qux.h
 
@@ -100,8 +96,8 @@
 your product (e.g. for an Internet of Toast project, perhaps the prefix could
 be ``it_``).
 
-C++ file and directory locations
---------------------------------
+C++ module structure
+--------------------
 
 C++ public headers
 ~~~~~~~~~~~~~~~~~~
@@ -142,8 +138,8 @@
   These headers must not override headers from other modules. For
   that, there is the ``public_overrides/`` directory.
 
-Public override headers
-~~~~~~~~~~~~~~~~~~~~~~~
+C++ public override headers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
 Located ``{pw_module_dir}/public_overrides/<module>``. In general, the Pigweed
 philosophy is to avoid having "things hiding under rocks", and having header
 files with the same name that can override each other is considered a rock
@@ -190,8 +186,177 @@
     BUILD.gn
     README.md
 
+Compile-time configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+Pigweed modules are intended to be used in a wide variety of environments.
+In support of this, some modules expose compile-time configuration options.
+Pigweed has an established pattern for declaring and overriding module
+configuration.
+
+.. tip::
+
+  Compile-time configuration provides flexibility, but also imposes
+  restrictions. A module can only have one configuration in a given build.
+  This makes testing modules with compile-time configuration more difficult.
+  Where appropriate, consider alternatives such as C++ templates or runtime
+  configuration.
+
+Declaring configuration
+^^^^^^^^^^^^^^^^^^^^^^^
+Configuration values are declared in a header file with macros. If the macro
+value is not already defined, a default definition is provided. Otherwise,
+nothing is done. Configuration headers may include ``static_assert`` statements
+to validate configuration values.
+
+.. code-block:: c++
+
+  // Example configuration header
+
+  #ifndef PW_FOO_INPUT_BUFFER_SIZE_BYTES
+  #define PW_FOO_INPUT_BUFFER_SIZE_BYTES 128
+  #endif  // PW_FOO_INPUT_BUFFER_SIZE_BYTES
+
+  static_assert(PW_FOO_INPUT_BUFFER_SIZE_BYTES >= 64);
+
+The configuration header may go in one of three places in the module, depending
+on whether the header should be exposed by the module or not.
+
+.. code-block::
+
+  pw_foo/...
+
+    # Publicly accessible configuration header
+    public/pw_foo/config.h
+
+    # Internal configuration header that is included by other module headers
+    public/pw_foo/internal/config.h
+
+    # Internal configuration header
+    pw_foo_private/config.h
+
+The configuration header is provided by a build system library. This library
+acts as a :ref:`facade<docs-module-structure-facades>`. The facade uses a
+variable such as ``pw_foo_CONFIG``. In upstream Pigweed, all config facades
+default to the ``pw_build_DEFAULT_MODULE_CONFIG`` backend. In the GN build
+system, the config facade is declared as follows:
+
+.. code-block::
+
+  declare_args() {
+    # The build target that overrides the default configuration options for this
+    # module. This should point to a source set that provides defines through a
+    # public config (which may -include a file or add defines directly).
+    pw_foo_CONFIG = pw_build_DEFAULT_MODULE_CONFIG
+  }
+
+  # An example source set for each potential config header location follows.
+
+  # Publicly accessible configuration header (most common)
+  pw_source_set("config") {
+    public = [ "public/pw_foo/config.h" ]
+    public_configs = [ ":public_include_path" ]
+    public_deps = [ pw_foo_CONFIG ]
+  }
+
+  # Internal configuration header that is included by other module headers
+  pw_source_set("config") {
+    sources = [ "public/pw_foo/internal/config.h" ]
+    public_configs = [ ":public_include_path" ]
+    public_deps = [ pw_foo_CONFIG ]
+    visibility = [":*"]  # Only allow this module to depend on ":config"
+    friend = [":*"]  # Allow this module to access the config.h header.
+  }
+
+  # Internal configuration header
+  pw_source_set("config") {
+    public = [ "pw_foo_private/config.h" ]
+    public_deps = [ pw_foo_CONFIG ]
+    visibility = [":*"]  # Only allow this module to depend on ":config"
+  }
+
+Overriding configuration
+^^^^^^^^^^^^^^^^^^^^^^^^
+As noted above, all module configuration facades default to the same backend
+(``pw_build_DEFAULT_MODULE_CONFIG``). This allows projects to override
+configuration values for multiple modules from a single configuration backend,
+if desired. The configuration values may also be overridden individually by
+setting backends for the individual module configurations (e.g. in GN,
+``pw_foo_CONFIG = "//configuration:my_foo_config"``).
+
+Configurations are overridden by setting compilation options in the config
+backend. These options could be set through macro definitions, such as
+``-DPW_FOO_INPUT_BUFFER_SIZE_BYTES=256``, or in a header file included with the
+``-include`` option.
+
+This example shows two ways to configure a module in the GN build system.
+
+.. code-block::
+
+  # In the toolchain, set either pw_build_DEFAULT_MODULE_CONFIG or pw_foo_CONFIG
+  pw_build_DEFAULT_MODULE_CONFIG = get_path_info(":define_overrides", "abspath")
+
+  # This configuration sets PW_FOO_INPUT_BUFFER_SIZE_BYTES using the -D macro.
+  pw_source_set("define_overrides") {
+    public_configs = [ ":define_options" ]
+  }
+
+  config("define_options") {
+    defines = [ "-DPW_FOO_INPUT_BUFFER_SIZE_BYTES=256" ]
+  }
+
+  # This configuration sets PW_FOO_INPUT_BUFFER_SIZE_BYTES with a header file.
+  pw_source_set("include_overrides") {
+    public_configs = [ ":header_options" ]
+
+    # Header file with #define PW_FOO_INPUT_BUFFER_SIZE_BYTES 256
+    sources = [ "my_config_overrides.h" ]
+  }
+
+  config("header_options") {
+    cflags = [
+      "-include",
+      "my_config_overrides.h",
+    ]
+  }
+
+.. _docs-module-structure-facades:
+
+Facades
+-------
+In Pigweed, facades represent a dependency that can be swapped at compile time.
+Facades are similar in concept to a virtual interface, but the implementation is
+set by the build system. Runtime polymorphism with facades is not
+possible, and each facade may only have one implementation (backend) per
+toolchain compilation.
+
+In the simplest sense, a facade is just a dependency represented by a variable.
+For example, the ``pw_log`` facade is represented by the ``pw_log_BACKEND``
+build variable. Facades typically are bundled with a build system library that
+depends on the backend.
+
+Facades are essential in some circumstances:
+
+* Low-level, platform-specific features (:ref:`module-pw_cpu_exception`).
+* Features that require a macro or non-virtual function interface
+  (:ref:`module-pw_log`, :ref:`module-pw_assert`).
+* Highly leveraged code where a virtual interface or callback is too costly or
+  cumbersome (:ref:`module-pw_tokenizer`).
+
+.. caution::
+
+  Modules should only use facades when necessary. Facades are permanently locked
+  to a particular implementation at compile time. Multpile backends cannot be
+  used in one build, and runtime dependency injection is not possible, which
+  makes testing difficult. Where appropriate, modules should use other
+  mechanisms, such as virtual interfaces, callbacks, or templates, in place of
+  facades.
+
+The GN build system provides the
+:ref:`pw_facade template<module-pw_build-facade>` as a convenient way to declare
+facades.
+
 Documentation
-~~~~~~~~~~~~~
+-------------
 Documentation should go in the root module folder, typically in the
 ``docs.rst`` file. There must be a docgen entry for the documentation in the
 ``BUILD.gn`` file with the target name ``docs``; so the full target for the
@@ -217,13 +382,18 @@
     docs/image/screenshot.png
     docs/image/diagram.svg
 
-Steps to create a new module for a Pigweed project
---------------------------------------------------
-These instructions are for creating a new module for contribution to the
-Pigweed project. See below for an `example`__ of what the new module folder
-might look like.
+Creating a new Pigweed module
+-----------------------------
+To create a new Pigweed module, follow the below steps.
 
-__ `Example module structure`_
+.. tip::
+
+  Connect with the Pigweed community (by `mailing the Pigweed list
+  <https://groups.google.com/forum/#!forum/pigweed>`_ or `raising your idea
+  in the Pigweed chat <https://discord.gg/M9NSeTA>`_) to discuss your module
+  idea before getting too far into the implementation. This can prevent
+  accidentally duplicating work, or avoiding writing code that won't get
+  accepted.
 
 1. Create module folder following `Module name`_ guidelines
 2. Add `C++ public headers`_ files in
@@ -243,7 +413,7 @@
 
 6. Add folder alias for new module variable in ``/modules.gni``
 
-    - dir_pw_new = "$dir_pigweed/pw_new"
+    - ``dir_pw_new = get_path_info("pw_new", "abspath")``
 
 7. Add new module to main GN build
 
@@ -259,7 +429,8 @@
 
     - Add in ``docs/BUILD.gn`` to ``pw_doc_gen("docs")``
 
-11. Run :ref:`chapter-module-module-check`
+11. Run :ref:`module-pw_module-module-check`
 
     - ``$ pw module-check {pw_module_dir}``
 
+12. Contribute your module to upstream Pigweed (optional but encouraged!)
diff --git a/docs/style_guide.rst b/docs/style_guide.rst
index 104fe73..320aac3 100644
--- a/docs/style_guide.rst
+++ b/docs/style_guide.rst
@@ -1,8 +1,4 @@
-.. _chapter-style:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _docs-pw-style:
 
 ===========================
 Style Guide and Conventions
diff --git a/docs/targets.rst b/docs/targets.rst
index 22784ce..21f4ccc 100644
--- a/docs/targets.rst
+++ b/docs/targets.rst
@@ -1,11 +1,114 @@
-.. _chapter-targets:
+.. _docs-targets:
 
 =======
 Targets
 =======
-Pigweed is designed to support many 32-bit targets. This section contains
-documentation for the targets used for upstream Pigweed development,
-though more may exist outside the main Pigweed repository.
+Pigweed is designed to be portable to many different hardware platforms.
+Pigweed's GN build comes with an extensible target system that allows it to be
+configured to build for any number of platforms, which all build simultaneously.
+
+Defining a target
+=================
+Each Pigweed target built by a project is defined within the GN build as a
+toolchain providing the target's build parameters.
+
+In Pigweed, these target toolchains are defined as GN scopes, which are fed into
+a ``generate_toolchain`` template to create the complete GN toolchain.
+
+Hierarchical target structure
+-----------------------------
+The rationale for scope-based toolchains is to make Pigweed targets extensible.
+Variables from a toolchain can be forwarded into new scopes and then extended
+or overriden. This facilitates the sharing of common configuration options
+between toolchains, and allows for hierarchical structures. Upstream Pigweed
+makes use of this heavily; it defines basic compiler-only configurations, uses
+these as a base for board-specific toolchains, then creates its final targets on
+top of those.
+
+.. blockdiag::
+
+  blockdiag {
+    default_fontsize = 14;
+    orientation = portrait;
+
+    arm_gcc  [label = "arm_gcc"];
+    arm_gcc_cortex_m4  [label = "cortex_m4"];
+    arm_gcc_cortex_m4f  [label = "cortex_m4f"];
+    arm_gcc_cortex_m4f_debug  [label = "cortex_m4f_debug"];
+    arm_gcc_cortex_m4f_size_optimized  [label = "cortex_m4f_size_optimized"];
+    stm32f429i_disc1_debug  [label = "stm32f429i_disc1_debug"];
+    arm_gcc -> arm_gcc_cortex_m4
+    arm_gcc -> arm_gcc_cortex_m4f
+    arm_gcc_cortex_m4f -> arm_gcc_cortex_m4f_debug
+    arm_gcc_cortex_m4f -> arm_gcc_cortex_m4f_size_optimized
+    arm_gcc_cortex_m4f_debug -> stm32f429i_disc1_debug
+  }
+
+Toolchain target variables
+--------------------------
+The core of a toolchain is defining the tools it uses. This is done by setting
+the variables ``ar``, ``cc``, and ``cxx`` to the appropirate compilers. Pigweed
+provides many commonly used compiler configurations in the ``pw_toolchain``
+module.
+
+The rest of the a Pigweed target's configuration is listed within a ``defaults``
+scope in its toolchain. Every variable in this scope is an override of a GN
+build argument defined in Pigweed. Some notable arguments include:
+
+* ``default_configs``: A list of GN configs to apply to every ``pw_*`` GN
+  template. This is typically used to set compiler flags, optimization levels,
+  global #defines, etc.
+* ``default_public_deps``: List of GN targets which are added as a dependency
+  to all ``pw_*`` GN targets. This is used to add global module dependencies;
+  for example, in upstream, ``pw_polyfill`` is added here to provide C++17
+  features in C++11/C++14 code.
+* Facade backends: Pigweed defines facades to provide a common interface for
+  core system features such as logging without assuming an implementation.
+  When building a Pigweed target, the implementations for each of these must be
+  chosen. The ``*_BACKEND`` build args that Pigweed defines are used to set
+  these.
+
+There are many other build arguments that can be set, some of which are
+module-specific. A full list can be seen by running ``gn args --list out``,
+and further documentation can be found within their respective modules.
+
+Example Pigweed target
+======================
+The code below demonstrates how a project might configure one of its Pigweed
+targets.
+
+.. code::
+
+  # Prevent gn format from reordering this import.
+  import("//build_overrides/pigweed.gni")
+
+  import("$dir_pw_toolchain/arm_gcc/toolchains.gni")
+  import("$dir_pw_toolchain/generate_toolchain.gni")
+
+  my_target_scope = {
+    # Use Pigweed's Cortex M4 toolchain as a base.
+    _toolchain_base = pw_toolchain_arm_gcc.cortex_m4f_debug
+
+    # Forward everything except the defaults scope from that toolchain.
+    forward_variables_from(_toolchain_base, "*", [ "defaults" ])
+
+    defaults = {
+      # Forward everything from the base toolchain's defaults.
+      forward_variables_from(_toolchain_base.defaults, "*")
+
+      # Extend with custom build arguments for the target.
+      pw_log_BACKEND = dir_pw_log_tokenized
+    }
+  }
+
+  # Create the actual GN toolchain from the scope.
+  generate_toolchain("my_target") {
+    forward_variables_from(my_target_scope, "*")
+  }
+
+Upstream targets
+================
+The following is a list of targets used for upstream Pigweed development.
 
 .. toctree::
   :maxdepth: 1
diff --git a/modules.gni b/modules.gni
index 915a30d..974c2a4 100644
--- a/modules.gni
+++ b/modules.gni
@@ -18,8 +18,10 @@
   # All module variables are prefixed with dir_.
   dir_docker = get_path_info("docker", "abspath")
   dir_pw_allocator = get_path_info("pw_allocator", "abspath")
+  dir_pw_arduino_build = get_path_info("pw_arduino_build", "abspath")
   dir_pw_assert = get_path_info("pw_assert", "abspath")
   dir_pw_assert_basic = get_path_info("pw_assert_basic", "abspath")
+  dir_pw_assert_log = get_path_info("pw_assert_log", "abspath")
   dir_pw_base64 = get_path_info("pw_base64", "abspath")
   dir_pw_bloat = get_path_info("pw_bloat", "abspath")
   dir_pw_blob_store = get_path_info("pw_blob_store", "abspath")
@@ -40,12 +42,16 @@
   dir_pw_kvs = get_path_info("pw_kvs", "abspath")
   dir_pw_log = get_path_info("pw_log", "abspath")
   dir_pw_log_basic = get_path_info("pw_log_basic", "abspath")
+  dir_pw_log_null = get_path_info("pw_log_null", "abspath")
+  dir_pw_log_rpc = get_path_info("pw_log_rpc", "abspath")
   dir_pw_log_tokenized = get_path_info("pw_log_tokenized", "abspath")
   dir_pw_malloc = get_path_info("pw_malloc", "abspath")
   dir_pw_malloc_freelist = get_path_info("pw_malloc_freelist", "abspath")
+  dir_pw_metric = get_path_info("pw_metric", "abspath")
   dir_pw_minimal_cpp_stdlib = get_path_info("pw_minimal_cpp_stdlib", "abspath")
   dir_pw_module = get_path_info("pw_module", "abspath")
   dir_pw_fuzzer = get_path_info("pw_fuzzer", "abspath")
+  dir_pw_package = get_path_info("pw_package", "abspath")
   dir_pw_polyfill = get_path_info("pw_polyfill", "abspath")
   dir_pw_preprocessor = get_path_info("pw_preprocessor", "abspath")
   dir_pw_presubmit = get_path_info("pw_presubmit", "abspath")
@@ -64,8 +70,10 @@
       get_path_info("pw_sys_io_baremetal_lm3s6965evb", "abspath")
   dir_pw_sys_io_baremetal_stm32f429 =
       get_path_info("pw_sys_io_baremetal_stm32f429", "abspath")
+  dir_pw_sys_io_arduino = get_path_info("pw_sys_io_arduino", "abspath")
   dir_pw_sys_io_stdio = get_path_info("pw_sys_io_stdio", "abspath")
   dir_pw_target_runner = get_path_info("pw_target_runner", "abspath")
+  dir_pw_third_party = get_path_info("third_party", "abspath")
   dir_pw_tokenizer = get_path_info("pw_tokenizer", "abspath")
   dir_pw_toolchain = get_path_info("pw_toolchain", "abspath")
   dir_pw_trace = get_path_info("pw_trace", "abspath")
diff --git a/package.json b/package.json
index 55b642c..a37e1c1 100644
--- a/package.json
+++ b/package.json
@@ -5,10 +5,10 @@
   "author": "",
   "license": "Apache-2.0",
   "devDependencies": {
-    "@bazel/jasmine": "^1.7.0",
-    "@bazel/karma": "^1.7.0",
-    "@bazel/rollup": "^1.7.0",
-    "@bazel/typescript": "^1.7.0",
+    "@bazel/jasmine": "^2.2.0",
+    "@bazel/karma": "^2.2.0",
+    "@bazel/rollup": "^2.2.0",
+    "@bazel/typescript": "^2.2.0",
     "@material-ui/core": "^4.10.2",
     "@rollup/plugin-commonjs": "^13.0.0",
     "@rollup/plugin-node-resolve": "^8.0.1",
diff --git a/pw_allocator/BUILD.gn b/pw_allocator/BUILD.gn
index ad852f2..6e8ee54 100644
--- a/pw_allocator/BUILD.gn
+++ b/pw_allocator/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 declare_args() {
   pw_allocator_POISON_HEAP = false
 }
@@ -78,7 +78,6 @@
     "$dir_pw_log",
   ]
   sources = [ "freelist_heap.cc" ]
-  sources += public
 }
 
 pw_test_group("tests") {
@@ -108,5 +107,6 @@
 }
 
 pw_doc_group("docs") {
+  inputs = [ "doc_resources/pw_allocator_heap_visualizer_demo.png" ]
   sources = [ "docs.rst" ]
 }
diff --git a/pw_allocator/block.cc b/pw_allocator/block.cc
index f463b13..bbf1a34 100644
--- a/pw_allocator/block.cc
+++ b/pw_allocator/block.cc
@@ -21,11 +21,11 @@
 Status Block::Init(const std::span<std::byte> region, Block** block) {
   // Ensure the region we're given is aligned and sized accordingly
   if (reinterpret_cast<uintptr_t>(region.data()) % alignof(Block) != 0) {
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
 
   if (region.size() < sizeof(Block)) {
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
 
   union {
@@ -43,21 +43,21 @@
 
   aliased.block->prev = nullptr;
   *block = aliased.block;
-#if PW_ALLOCATOR_POISON_ENABLE
+#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
   (*block)->PoisonBlock();
 #endif  // PW_ALLOCATOR_POISON_ENABLE
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status Block::Split(size_t head_block_inner_size, Block** new_block) {
   if (new_block == nullptr) {
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
 
   // Don't split used blocks.
   // TODO: Relax this restriction? Flag to enable/disable this check?
   if (Used()) {
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
 
   // First round the head_block_inner_size up to a alignof(Block) bounary.
@@ -74,7 +74,7 @@
   // (1) Are we trying to allocate a head block larger than the current head
   // block? This may happen because of the alignment above.
   if (aligned_head_block_inner_size > InnerSize()) {
-    return Status::OUT_OF_RANGE;
+    return Status::OutOfRange();
   }
 
   // (2) Does the resulting block have enough space to store the header?
@@ -82,7 +82,7 @@
   // size == sizeof(Block))?
   if (InnerSize() - aligned_head_block_inner_size <
       sizeof(Block) + 2 * PW_ALLOCATOR_POISON_OFFSET) {
-    return Status::RESOURCE_EXHAUSTED;
+    return Status::ResourceExhausted();
   }
 
   // Create the new block inside the current one.
@@ -113,23 +113,23 @@
 
   *new_block = next;
 
-#if PW_ALLOCATOR_POISON_ENABLE
+#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
   PoisonBlock();
   (*new_block)->PoisonBlock();
 #endif  // PW_ALLOCATOR_POISON_ENABLE
 
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status Block::MergeNext() {
   // Anything to merge with?
   if (Last()) {
-    return Status::OUT_OF_RANGE;
+    return Status::OutOfRange();
   }
 
   // Is this or the next block in use?
   if (Used() || Next()->Used()) {
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
 
   // Simply enough, this block's next pointer becomes the next block's
@@ -142,14 +142,14 @@
     Next()->prev = this;
   }
 
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status Block::MergePrev() {
   // We can't merge if we have no previous. After that though, merging with
   // the previous block is just MergeNext from the previous block.
   if (prev == nullptr) {
-    return Status::OUT_OF_RANGE;
+    return Status::OutOfRange();
   }
 
   // WARNING: This class instance will still exist, but technically be invalid
@@ -171,14 +171,14 @@
       break;
     case NEXT_MISMATCHED:
       PW_DCHECK(false,
-                "The 'prev' field in the next block (%p), does not match the "
+                "The 'prev' field in the next block (%p) does not match the "
                 "address of the current block (%p).",
                 Next()->Prev(),
                 this);
       break;
     case PREV_MISMATCHED:
       PW_DCHECK(false,
-                "The 'next' field in the previous block (%p), does not match "
+                "The 'next' field in the previous block (%p) does not match "
                 "the address of the current block (%p).",
                 Prev()->Next(),
                 this);
@@ -208,7 +208,7 @@
     return BlockStatus::PREV_MISMATCHED;
   }
 
-#if PW_ALLOCATOR_POISON_ENABLE
+#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
   if (!this->CheckPoisonBytes()) {
     return BlockStatus::POISON_CORRUPTED;
   }
@@ -219,7 +219,7 @@
 // Paint sizeof(void*) bytes before and after the usable space in Block as the
 // randomized function pattern.
 void Block::PoisonBlock() {
-#if PW_ALLOCATOR_POISON_ENABLE
+#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
   std::byte* front_region = reinterpret_cast<std::byte*>(this) + sizeof(*this);
   memcpy(front_region, POISON_PATTERN, PW_ALLOCATOR_POISON_OFFSET);
 
@@ -230,7 +230,7 @@
 }
 
 bool Block::CheckPoisonBytes() const {
-#if PW_ALLOCATOR_POISON_ENABLE
+#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
   std::byte* front_region = reinterpret_cast<std::byte*>(
       reinterpret_cast<intptr_t>(this) + sizeof(*this));
   if (std::memcmp(front_region, POISON_PATTERN, PW_ALLOCATOR_POISON_OFFSET)) {
diff --git a/pw_allocator/block_test.cc b/pw_allocator/block_test.cc
index c71cd38..0e09579 100644
--- a/pw_allocator/block_test.cc
+++ b/pw_allocator/block_test.cc
@@ -30,7 +30,7 @@
   Block* block = nullptr;
   auto status = Block::Init(std::span(bytes, kN), &block);
 
-  ASSERT_EQ(status, Status::OK);
+  ASSERT_EQ(status, Status::Ok());
   EXPECT_EQ(block->OuterSize(), kN);
   EXPECT_EQ(block->InnerSize(),
             kN - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET);
@@ -50,7 +50,7 @@
   Block* block = nullptr;
   auto status = Block::Init(std::span(byte_ptr + 1, kN - 1), &block);
 
-  EXPECT_EQ(status, Status::INVALID_ARGUMENT);
+  EXPECT_EQ(status, Status::InvalidArgument());
 }
 
 TEST(Block, CannotCreateTooSmallBlock) {
@@ -59,7 +59,7 @@
   Block* block = nullptr;
   auto status = Block::Init(std::span(bytes, kN), &block);
 
-  EXPECT_EQ(status, Status::INVALID_ARGUMENT);
+  EXPECT_EQ(status, Status::InvalidArgument());
 }
 
 TEST(Block, CanSplitBlock) {
@@ -73,7 +73,7 @@
   Block* next_block = nullptr;
   auto status = block->Split(kSplitN, &next_block);
 
-  ASSERT_EQ(status, Status::OK);
+  ASSERT_EQ(status, Status::Ok());
   EXPECT_EQ(block->InnerSize(), kSplitN);
   EXPECT_EQ(block->OuterSize(),
             kSplitN + sizeof(Block) + 2 * PW_ALLOCATOR_POISON_OFFSET);
@@ -106,7 +106,7 @@
   Block* next_block = nullptr;
   auto status = block->Split(kSplitN, &next_block);
 
-  ASSERT_EQ(status, Status::OK);
+  ASSERT_EQ(status, Status::Ok());
   EXPECT_EQ(block->InnerSize(), split_len);
   EXPECT_EQ(block->OuterSize(),
             split_len + sizeof(Block) + 2 * PW_ALLOCATOR_POISON_OFFSET);
@@ -159,7 +159,7 @@
   Block* next_block = nullptr;
   auto status = block->Split(kSplitN, &next_block);
 
-  EXPECT_EQ(status, Status::RESOURCE_EXHAUSTED);
+  EXPECT_EQ(status, Status::ResourceExhausted());
   EXPECT_EQ(next_block, nullptr);
 }
 
@@ -172,7 +172,7 @@
   Block::Init(std::span(bytes, kN), &block);
 
   auto status = block->Split(kSplitN, nullptr);
-  EXPECT_EQ(status, Status::INVALID_ARGUMENT);
+  EXPECT_EQ(status, Status::InvalidArgument());
 }
 
 TEST(Block, CannotMakeBlockLargerInSplit) {
@@ -186,7 +186,7 @@
   Block* next_block = nullptr;
   auto status = block->Split(block->InnerSize() + 1, &next_block);
 
-  EXPECT_EQ(status, Status::OUT_OF_RANGE);
+  EXPECT_EQ(status, Status::OutOfRange());
 }
 
 TEST(Block, CannotMakeSecondBlockLargerInSplit) {
@@ -202,7 +202,7 @@
       block->InnerSize() - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET + 1,
       &next_block);
 
-  ASSERT_EQ(status, Status::RESOURCE_EXHAUSTED);
+  ASSERT_EQ(status, Status::ResourceExhausted());
   EXPECT_EQ(next_block, nullptr);
 }
 
@@ -217,7 +217,7 @@
   Block* next_block = nullptr;
   auto status = block->Split(0, &next_block);
 
-  ASSERT_EQ(status, Status::OK);
+  ASSERT_EQ(status, Status::Ok());
   EXPECT_EQ(block->InnerSize(), static_cast<size_t>(0));
 }
 
@@ -234,7 +234,7 @@
       block->InnerSize() - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET,
       &next_block);
 
-  ASSERT_EQ(status, Status::OK);
+  ASSERT_EQ(status, Status::Ok());
   EXPECT_EQ(next_block->InnerSize(), static_cast<size_t>(0));
 }
 
@@ -267,7 +267,7 @@
 
   Block* next_block = nullptr;
   auto status = block->Split(512, &next_block);
-  EXPECT_EQ(status, Status::FAILED_PRECONDITION);
+  EXPECT_EQ(status, Status::FailedPrecondition());
 }
 
 TEST(Block, CanMergeWithNextBlock) {
@@ -287,7 +287,7 @@
   Block* block3 = nullptr;
   block->Split(kSplit2, &block3);
 
-  EXPECT_EQ(block3->MergeNext(), Status::OK);
+  EXPECT_EQ(block3->MergeNext(), Status::Ok());
 
   EXPECT_EQ(block->Next(), block3);
   EXPECT_EQ(block3->Prev(), block);
@@ -312,8 +312,8 @@
   Block* next_block = nullptr;
   block->Split(512, &next_block);
 
-  EXPECT_EQ(next_block->MergeNext(), Status::OUT_OF_RANGE);
-  EXPECT_EQ(block->MergePrev(), Status::OUT_OF_RANGE);
+  EXPECT_EQ(next_block->MergeNext(), Status::OutOfRange());
+  EXPECT_EQ(block->MergePrev(), Status::OutOfRange());
 }
 
 TEST(Block, CannotMergeUsedBlock) {
@@ -329,8 +329,8 @@
   block->Split(512, &next_block);
 
   block->MarkUsed();
-  EXPECT_EQ(block->MergeNext(), Status::FAILED_PRECONDITION);
-  EXPECT_EQ(next_block->MergePrev(), Status::FAILED_PRECONDITION);
+  EXPECT_EQ(block->MergeNext(), Status::FailedPrecondition());
+  EXPECT_EQ(next_block->MergePrev(), Status::FailedPrecondition());
 }
 
 TEST(Block, CanCheckValidBlock) {
@@ -374,7 +374,7 @@
   EXPECT_EQ(third_block->IsValid(), true);
   EXPECT_EQ(fourth_block->IsValid(), true);
 
-#if PW_ALLOCATOR_POISON_ENABLE
+#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
   std::byte fault_poison[PW_ALLOCATOR_POISON_OFFSET] = {std::byte(0)};
   std::byte* front_poison =
       reinterpret_cast<std::byte*>(third_block) + sizeof(*third_block);
@@ -389,7 +389,7 @@
 }
 
 TEST(Block, CanPoisonBlock) {
-#if PW_ALLOCATOR_POISON_ENABLE
+#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
   constexpr size_t kN = 1024;
   byte bytes[kN];
 
diff --git a/pw_allocator/doc_resources/pw_allocator_heap_visualizer_demo.png b/pw_allocator/doc_resources/pw_allocator_heap_visualizer_demo.png
new file mode 100644
index 0000000..af14d26
--- /dev/null
+++ b/pw_allocator/doc_resources/pw_allocator_heap_visualizer_demo.png
Binary files differ
diff --git a/pw_allocator/docs.rst b/pw_allocator/docs.rst
index 2149209..853cb08 100644
--- a/pw_allocator/docs.rst
+++ b/pw_allocator/docs.rst
@@ -1,10 +1,8 @@
-.. _chapter-pw-allocator:
+.. _module-pw_allocator:
 
-.. default-domain:: cpp
-
------------
-pw_alloctor
------------
+------------
+pw_allocator
+------------
 
 This module provides various building blocks
 for a dynamic allocator. This is composed of the following parts:
@@ -42,5 +40,66 @@
 will check if the painted space still remains the pattern, and return ``false``
 if the pattern is damaged.
 
+Heap Visualizer
+===============
+
+Functionality
+-------------
+
+``pw_allocator`` supplies a pw command ``pw heap-viewer`` to help visualize
+the state of the heap at the end of a dump file. The heap is represented by
+ASCII characters, where each character represents 4 bytes in the heap.
+
+.. image:: doc_resources/pw_allocator_heap_visualizer_demo.png
+
+Usage
+-----
+
+The heap visualizer can be launched from a shell using the Pigweed environment.
+
+.. code:: sh
+
+  $ pw heap-viewer --dump-file <directory of dump file> --heap-low-address
+  <hex address of heap lower address> --heap-high-address <hex address of heap
+  lower address> [options]
+
+The required arguments are:
+
+  - ``--dump-file`` is the path of a file that contains ``malloc/free``
+    information. Each line in the dump file represents a ``malloc/free`` call.
+    ``malloc`` is represented as ``m <size> <memory address>`` and ``free`` is
+    represented as ``f <memory address>``. For example, a dump file should look
+    like:
+
+      .. code:: sh
+
+        m 20 0x20004450  # malloc 20 bytes, the pointer is 0x20004450
+        m 8 0x2000447c   # malloc 8 bytes, the pointer is 0x2000447c
+        f 0x2000447c     # free the pointer at 0x2000447c
+        ...
+
+      Any line not formatted as the above will be ignored.
+
+  - ``--heap-low-address`` is the start of the heap. For example:
+
+      .. code:: sh
+
+        --heap-low-address 0x20004440
+
+  - ``--heap-high-address`` is the end of the heap. For example:
+
+      .. code:: sh
+
+        --heap-high-address 0x20006040
+
+Options include the following:
+
+  - ``--poison-enable``: If heap poisoning is enabled during the
+    allocation or not. The value is ``False`` if the option is not specified and
+    ``True`` otherwise.
+
+  - ``--pointer-size <integer of pointer size>``: The size of a pointer on the
+    machine where ``malloc/free`` is called. The default value is ``4``.
+
 Note, this module, and its documentation, is currently incomplete and
 experimental.
diff --git a/pw_allocator/freelist.cc b/pw_allocator/freelist.cc
index 4629a4f..d46e010 100644
--- a/pw_allocator/freelist.cc
+++ b/pw_allocator/freelist.cc
@@ -19,7 +19,7 @@
 Status FreeList::AddChunk(std::span<std::byte> chunk) {
   // Check that the size is enough to actually store what we need
   if (chunk.size() < sizeof(FreeListNode)) {
-    return Status::OUT_OF_RANGE;
+    return Status::OutOfRange();
   }
 
   union {
@@ -36,7 +36,7 @@
   aliased.node->next = chunks_[chunk_ptr];
   chunks_[chunk_ptr] = aliased.node;
 
-  return Status::OK;
+  return Status::Ok();
 }
 
 std::span<std::byte> FreeList::FindChunk(size_t size) const {
@@ -85,14 +85,14 @@
 
   // Check head first.
   if (chunks_[chunk_ptr] == nullptr) {
-    return Status::NOT_FOUND;
+    return Status::NotFound();
   }
 
   aliased.node = chunks_[chunk_ptr];
   if (aliased.data == chunk.data()) {
     chunks_[chunk_ptr] = aliased.node->next;
 
-    return Status::OK;
+    return Status::Ok();
   }
 
   // No? Walk the nodes.
@@ -103,13 +103,13 @@
     if (aliased_next.data == chunk.data()) {
       // Found it, remove this node out of the chain
       aliased.node->next = aliased_next.node->next;
-      return Status::OK;
+      return Status::Ok();
     }
 
     aliased.node = aliased.node->next;
   }
 
-  return Status::NOT_FOUND;
+  return Status::NotFound();
 }
 
 size_t FreeList::FindChunkPtrForSize(size_t size, bool non_null) const {
diff --git a/pw_allocator/freelist_heap_test.cc b/pw_allocator/freelist_heap_test.cc
index 94a10ef..7746383 100644
--- a/pw_allocator/freelist_heap_test.cc
+++ b/pw_allocator/freelist_heap_test.cc
@@ -111,7 +111,7 @@
   EXPECT_EQ(ptr2_start % alignment, static_cast<size_t>(0));
 }
 
-#if CHECK_TEST_CRASHES
+#if defined(CHECK_TEST_CRASHES) && CHECK_TEST_CRASHES
 
 // TODO(amontanez): Ensure that this test triggers an assert.
 TEST(FreeListHeap, CannotFreeNonOwnedPointer) {
diff --git a/pw_allocator/freelist_test.cc b/pw_allocator/freelist_test.cc
index 0484a7a..865ea3f 100644
--- a/pw_allocator/freelist_test.cc
+++ b/pw_allocator/freelist_test.cc
@@ -43,7 +43,7 @@
   byte data[kN] = {std::byte(0)};
 
   auto status = list.AddChunk(std::span(data, kN));
-  EXPECT_EQ(status, Status::OK);
+  EXPECT_EQ(status, Status::Ok());
 
   auto item = list.FindChunk(kN);
   EXPECT_EQ(item.size(), kN);
@@ -70,7 +70,7 @@
 
   list.AddChunk(std::span(data, kN));
   auto status = list.RemoveChunk(std::span(data, kN));
-  EXPECT_EQ(status, Status::OK);
+  EXPECT_EQ(status, Status::Ok());
 
   auto item = list.FindChunk(kN);
   EXPECT_EQ(item.size(), static_cast<size_t>(0));
@@ -148,7 +148,7 @@
 
   list.AddChunk(std::span(data, kN));
   auto status = list.RemoveChunk(std::span(data2, kN));
-  EXPECT_EQ(status, Status::NOT_FOUND);
+  EXPECT_EQ(status, Status::NotFound());
 }
 
 TEST(FreeList, CanStoreMultipleChunksPerBucket) {
diff --git a/pw_allocator/public/pw_allocator/block.h b/pw_allocator/public/pw_allocator/block.h
index b0cfae4..b82dee9 100644
--- a/pw_allocator/public/pw_allocator/block.h
+++ b/pw_allocator/public/pw_allocator/block.h
@@ -23,7 +23,7 @@
 
 namespace pw::allocator {
 
-#if PW_ALLOCATOR_POISON_ENABLE
+#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
 // Add poison offset of sizeof(void*) bytes before and after usable space in all
 // Blocks.
 #define PW_ALLOCATOR_POISON_OFFSET sizeof(void*)
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_allocator/py/BUILD.gn
similarity index 69%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_allocator/py/BUILD.gn
index 3c3be32..2baa9eb 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_allocator/py/BUILD.gn
@@ -12,8 +12,15 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_allocator/__init__.py",
+    "pw_allocator/heap_viewer.py",
+  ]
+  python_deps = [ "$dir_pw_cli/py" ]
 }
diff --git a/pw_allocator/py/pw_allocator/__init__.py b/pw_allocator/py/pw_allocator/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_allocator/py/pw_allocator/__init__.py
diff --git a/pw_allocator/py/pw_allocator/heap_viewer.py b/pw_allocator/py/pw_allocator/heap_viewer.py
new file mode 100644
index 0000000..72da6ff
--- /dev/null
+++ b/pw_allocator/py/pw_allocator/heap_viewer.py
@@ -0,0 +1,285 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Heap visualizer of ASCII characters."""
+
+import argparse
+import sys
+import math
+import logging
+from typing import Optional
+from dataclasses import dataclass
+import coloredlogs  # type: ignore
+
+
+@dataclass
+class HeapBlock:
+    """Building blocks for memory chunk allocated at heap."""
+    size: int
+    mem_offset: int
+    next: Optional['HeapBlock'] = None
+
+
+@dataclass
+class HeapUsage:
+    """Contains a linked list of allocated HeapBlocks."""
+    begin: HeapBlock = HeapBlock(0, 0)
+
+    def add_block(self, block):
+        cur_block = self.begin.next
+        prev_block = self.begin
+        while cur_block is not None:
+            if cur_block.mem_offset == block.mem_offset:
+                return
+            if cur_block.mem_offset < block.mem_offset:
+                prev_block = cur_block
+                cur_block = cur_block.next
+            else:
+                block.next = cur_block
+                prev_block.next = block
+                return
+        prev_block.next = block
+
+    def remove_block(self, address):
+        cur_block = self.begin.next
+        prev_block = self.begin
+        while cur_block is not None:
+            if cur_block.mem_offset == address:
+                prev_block.next = cur_block.next
+                return
+            if cur_block.mem_offset < address:
+                prev_block = cur_block
+                cur_block = cur_block.next
+            else:
+                return
+
+
+def add_parser_arguments(parser):
+    parser.add_argument('--dump-file',
+                        help=('dump file that contains a list of malloc and '
+                              'free instructions. The format should be as '
+                              'follows: "m <size> <address>" on a line for '
+                              'each malloc called and "f <address>" on a line '
+                              'for each free called.'),
+                        required=True)
+
+    parser.add_argument('--heap-low-address',
+                        help=('lower address of the heap.'),
+                        type=lambda x: int(x, 0),
+                        required=True)
+
+    parser.add_argument('--heap-high-address',
+                        help=('higher address of the heap.'),
+                        type=lambda x: int(x, 0),
+                        required=True)
+
+    parser.add_argument('--poison-enabled',
+                        help=('if heap poison is enabled or not.'),
+                        default=False,
+                        action='store_true')
+
+    parser.add_argument('--pointer-size',
+                        help=('size of pointer on the machine.'),
+                        default=4,
+                        type=lambda x: int(x, 0))
+
+
+_LEFT_HEADER_CHAR = '['
+_RIGHT_HEADER_CHAR = ']'
+_USED_CHAR = '*'
+_FREE_CHAR = ' '
+_CHARACTERS_PER_LINE = 64
+_BYTES_PER_CHARACTER = 4
+_LOG = logging.getLogger(__name__)
+
+
+def _exit_due_to_file_not_found():
+    _LOG.critical('Dump file location is not provided or dump file is not '
+                  'found. Please specify a valid file in the argument.')
+    sys.exit(1)
+
+
+def _exit_due_to_bad_heap_info():
+    _LOG.critical(
+        'Heap low/high address is missing or invalid. Please put valid '
+        'addresses in the argument.')
+    sys.exit(1)
+
+
+def visualize(dump_file=None,
+              heap_low_address=None,
+              heap_high_address=None,
+              poison_enabled=False,
+              pointer_size=4):
+    """Visualization of heap usage."""
+    # TODO(pwbug/236): Add standarized mechanisms to produce dump file and read
+    # heap information from dump file.
+    aligned_bytes = pointer_size
+    header_size = pointer_size * 2
+
+    try:
+        if heap_high_address < heap_low_address:
+            _exit_due_to_bad_heap_info()
+        heap_size = heap_high_address - heap_low_address
+    except TypeError:
+        _exit_due_to_bad_heap_info()
+
+    if poison_enabled:
+        poison_offset = pointer_size
+    else:
+        poison_offset = 0
+
+    try:
+        allocation_dump = open(dump_file, 'r')
+    except (FileNotFoundError, TypeError):
+        _exit_due_to_file_not_found()
+
+    heap_visualizer = HeapUsage()
+    # Parse the dump file.
+    for line in allocation_dump:
+        info = line[:-1].split(' ')
+        if info[0] == 'm':
+            # Add a HeapBlock when malloc is called
+            block = HeapBlock(
+                int(math.ceil(float(info[1]) / aligned_bytes)) * aligned_bytes,
+                int(info[2], 0) - heap_low_address)
+            heap_visualizer.add_block(block)
+        elif info[0] == 'f':
+            # Remove the HeapBlock when free is called
+            heap_visualizer.remove_block(int(info[1], 0) - heap_low_address)
+
+    # next_block indicates the nearest HeapBlock that hasn't finished
+    # printing.
+    next_block = heap_visualizer.begin
+    if next_block.next is None:
+        next_mem_offset = heap_size + header_size + poison_offset + 1
+        next_size = 0
+    else:
+        next_mem_offset = next_block.next.mem_offset
+        next_size = next_block.next.size
+
+    # Flags to indicate status of the 4 bytes going to be printed.
+    is_left_header = False
+    is_right_header = False
+    is_used = False
+
+    # Print overall heap information
+    _LOG.info('%-40s%-40s', f'The heap starts at {hex(heap_low_address)}.',
+              f'The heap ends at {hex(heap_high_address)}.')
+    _LOG.info('%-40s%-40s', f'Heap size is {heap_size // 1024}k bytes.',
+              f'Heap is aligned by {aligned_bytes} bytes.')
+    if poison_offset != 0:
+        _LOG.info(
+            'Poison is enabled %d bytes before and after the usable '
+            'space of each block.', poison_offset)
+    else:
+        _LOG.info('%-40s', 'Poison is disabled.')
+    _LOG.info(
+        '%-40s', 'Below is the visualization of the heap. '
+        'Each character represents 4 bytes.')
+    _LOG.info('%-40s', f"    '{_FREE_CHAR}' indicates free space.")
+    _LOG.info('%-40s', f"    '{_USED_CHAR}' indicates used space.")
+    _LOG.info(
+        '%-40s', f"    '{_LEFT_HEADER_CHAR}' indicates header or "
+        'poisoned space before the block.')
+    _LOG.info('%-40s', f"    '{_RIGHT_HEADER_CHAR}' poisoned space after "
+              'the block.')
+    print()
+
+    # Go over the heap space where there will be 64 characters each line.
+    for line_base_address in range(0, heap_size, _CHARACTERS_PER_LINE *
+                                   _BYTES_PER_CHARACTER):
+        # Print the heap address of the current line.
+        sys.stdout.write(f"{' ': <13}"
+                         f'{hex(heap_low_address + line_base_address)}'
+                         f"{f' (+{line_base_address}):': <12}")
+        for line_offset in range(0,
+                                 _CHARACTERS_PER_LINE * _BYTES_PER_CHARACTER,
+                                 _BYTES_PER_CHARACTER):
+            # Determine if the current 4 bytes is used, unused, or is a
+            # header.
+            # The case that we have went over the previous block and will
+            # turn to the next block.
+            current_address = line_base_address + line_offset
+            if current_address == next_mem_offset + next_size + poison_offset:
+                next_block = next_block.next
+                # If this is the last block, set nextMemOffset to be over
+                # the last byte of heap so that the rest of the heap will
+                # be printed out as unused.
+                # Otherwise set the next HeapBlock allocated.
+                if next_block.next is None:
+                    next_mem_offset = (heap_size + header_size +
+                                       poison_offset + 1)
+                    next_size = 0
+                else:
+                    next_mem_offset = next_block.next.mem_offset
+                    next_size = next_block.next.size
+
+            # Determine the status of the current 4 bytes.
+            if (next_mem_offset - header_size - poison_offset <=
+                    current_address < next_mem_offset):
+                is_left_header = True
+                is_right_header = False
+                is_used = False
+            elif (next_mem_offset <= current_address <
+                  next_mem_offset + next_size):
+                is_left_header = False
+                is_right_header = False
+                is_used = True
+            elif (next_mem_offset + next_size <= current_address <
+                  next_mem_offset + next_size + poison_offset):
+                is_left_header = False
+                is_right_header = True
+                is_used = False
+            else:
+                is_left_header = False
+                is_right_header = False
+                is_used = False
+
+            if is_left_header:
+                sys.stdout.write(_LEFT_HEADER_CHAR)
+            elif is_right_header:
+                sys.stdout.write(_RIGHT_HEADER_CHAR)
+            elif is_used:
+                sys.stdout.write(_USED_CHAR)
+            else:
+                sys.stdout.write(_FREE_CHAR)
+        sys.stdout.write('\n')
+
+    allocation_dump.close()
+
+
+def main():
+    """A python script to visualize heap usage given a dump file."""
+    parser = argparse.ArgumentParser(description=main.__doc__)
+    add_parser_arguments(parser)
+    # Try to use pw_cli logs, else default to something reasonable.
+    try:
+        import pw_cli.log  # pylint: disable=import-outside-toplevel
+        pw_cli.log.install()
+    except ImportError:
+        coloredlogs.install(level='INFO',
+                            level_styles={
+                                'debug': {
+                                    'color': 244
+                                },
+                                'error': {
+                                    'color': 'red'
+                                }
+                            },
+                            fmt='%(asctime)s %(levelname)s | %(message)s')
+    visualize(**vars(parser.parse_args()))
+
+
+if __name__ == "__main__":
+    main()
diff --git a/pw_allocator/py/pw_allocator/py.typed b/pw_allocator/py/pw_allocator/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_allocator/py/pw_allocator/py.typed
diff --git a/pw_allocator/py/setup.py b/pw_allocator/py/setup.py
new file mode 100644
index 0000000..9625741
--- /dev/null
+++ b/pw_allocator/py/setup.py
@@ -0,0 +1,30 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""pw_allocator"""
+
+import setuptools  # type: ignore
+
+setuptools.setup(
+    name='pw_allocator',
+    version='0.0.1',
+    author='Pigweed Authors',
+    author_email='pigweed-developers@googlegroups.com',
+    description='Pigweed heap allocator',
+    packages=setuptools.find_packages(),
+    package_data={'pw_allocator': ['py.typed']},
+    zip_safe=False,
+    install_requires=[
+        # 'pw_cli',
+    ],
+)
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_arduino_build/BUILD
similarity index 62%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_arduino_build/BUILD
index 3c3be32..a66ce62 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_arduino_build/BUILD
@@ -12,8 +12,20 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_library",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])  # Apache License 2.0
+
+pw_cc_library(
+    name = "pw_arduino_build",
+    srcs = ["arduino_main_wrapper.cc"],
+    hdrs = ["public/pw_arduino_build/init.h"],
+    deps = [
+        "//pw_sys_io",
+    ],
+)
diff --git a/pw_arduino_build/BUILD.gn b/pw_arduino_build/BUILD.gn
new file mode 100644
index 0000000..db72ea8
--- /dev/null
+++ b/pw_arduino_build/BUILD.gn
@@ -0,0 +1,53 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_arduino_build/arduino.gni")
+import("$dir_pw_build/facade.gni")
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
+
+declare_args() {
+  # Backend for the pw_arduino_init module.
+  pw_arduino_build_INIT_BACKEND = ""
+}
+
+if (dir_pw_third_party_arduino != "") {
+  pw_facade("arduino_init") {
+    backend = pw_arduino_build_INIT_BACKEND
+    public = [ "public/pw_arduino_build/init.h" ]
+    public_configs = [ ":default_config" ]
+  }
+  config("default_config") {
+    include_dirs = [ "public" ]
+  }
+
+  pw_source_set("arduino_main_wrapper") {
+    remove_configs = [ "$dir_pw_build:strict_warnings" ]
+    deps = [
+      ":arduino_init",
+      "$dir_pw_sys_io",
+      "$dir_pw_third_party_arduino:arduino_core_sources",
+    ]
+    sources = [ "arduino_main_wrapper.cc" ]
+  }
+} else {
+  group("arduino_main_wrapper") {
+  }
+}
+
+pw_doc_group("docs") {
+  sources = [ "docs.rst" ]
+}
diff --git a/pw_arduino_build/arduino.gni b/pw_arduino_build/arduino.gni
new file mode 100644
index 0000000..dd1e6f7
--- /dev/null
+++ b/pw_arduino_build/arduino.gni
@@ -0,0 +1,74 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+declare_args() {
+  # Enable/disable Arduino builds via group("arduino").
+  # Set to the full path of ./third_party/arduino
+  dir_pw_third_party_arduino = ""
+
+  # Expected args for an Arduino build:
+  arduino_core_name = "teensy"
+
+  # TODO(tonymd): "teensy/avr" here should match the folders in this dir:
+  # "../third_party/arduino/cores/$arduino_core_name/hardware/*")
+  # For teensy: "teensy/avr", for adafruit-samd: "samd/1.6.2"
+  arduino_package_name = "teensy/avr"
+  arduino_board = "teensy40"
+
+  # Menu options should be a list of strings.
+  arduino_menu_options = [
+    "menu.usb.serial",
+    "menu.keys.en-us",
+  ]
+}
+
+arduino_builder_script =
+    get_path_info("py/pw_arduino_build/__main__.py", "abspath")
+
+_arduino_core_path =
+    rebase_path("../third_party/arduino/cores/$arduino_core_name")
+_compiler_path_override =
+    rebase_path(getenv("_PW_ACTUAL_ENVIRONMENT_ROOT") + "/cipd/pigweed/bin")
+
+arduino_global_args = [
+  "--arduino-package-path",
+  _arduino_core_path,
+  "--arduino-package-name",
+  arduino_package_name,
+  "--compiler-path-override",
+  _compiler_path_override,
+
+  # Save config files to "out/arduino_debug/gen/arduino_builder_config.json"
+  "--config-file",
+  rebase_path(root_gen_dir) + "/arduino_builder_config.json",
+  "--save-config",
+]
+
+arduino_board_args = [
+  "--build-path",
+  rebase_path(root_build_dir),
+  "--board",
+  arduino_board,
+  "--menu-options",
+]
+arduino_board_args += arduino_menu_options
+
+arduino_show_command_args = arduino_global_args + [
+                              "show",
+                              "--delimit-with-newlines",
+                            ] + arduino_board_args
+
+arduino_run_command_args = arduino_global_args + [ "run" ] + arduino_board_args
diff --git a/pw_checksum/ccitt_crc16_test_c.c b/pw_arduino_build/arduino_main_wrapper.cc
similarity index 69%
copy from pw_checksum/ccitt_crc16_test_c.c
copy to pw_arduino_build/arduino_main_wrapper.cc
index 72f3cec..a220630 100644
--- a/pw_checksum/ccitt_crc16_test_c.c
+++ b/pw_arduino_build/arduino_main_wrapper.cc
@@ -12,8 +12,21 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-#include "pw_checksum/ccitt_crc16.h"
+#include <Arduino.h>
 
-uint16_t CallChecksumCcittCrc16(const void* data, size_t size_bytes) {
-  return pw_ChecksumCcittCrc16(data, size_bytes, 0xFFFF);
+#include "pw_arduino_build/init.h"
+
+extern "C" {
+
+int main();
+
+void setup() {
+  pw_arduino_Init();
+  // Start Pigweed main()
+  main();
 }
+
+// See //pw_arduino_build/docs.rst for caveats on loop() never running.
+void loop() {}
+
+}  // extern "C"
diff --git a/pw_arduino_build/docs.rst b/pw_arduino_build/docs.rst
new file mode 100644
index 0000000..6db47e3
--- /dev/null
+++ b/pw_arduino_build/docs.rst
@@ -0,0 +1,70 @@
+.. _module-pw_arduino_build:
+
+-----------------
+pw_arduino_build
+-----------------
+
+The ``pw_arduino_build`` module contains both the `arduino_builder`_ command
+line utility and an `Arduino Main Wrapper`_.
+
+.. seealso::
+   See the :ref:`target-arduino` target documentation for a list of supported
+   hardware.
+
+Arduino Main Wrapper
+====================
+
+``arduino_main_wrapper.cc`` implements the standard ``setup()`` and ``loop()``
+functions [#f1]_ that are expected in Arduino sketches.
+
+Pigweed executables rely on being able to define the ``main()`` function. This
+is a problem for Arduino code as each core defines it's own ``main()``. To get
+around this the Pigweed Arduino target renames ``main()`` to ``ArduinoMain()``
+using a preprocessor macro: ``-Dmain(...)=ArduinoMain()``. This macro only
+applies when compiling Arduino core source files. That frees up ``main()`` to be
+used elsewhere.
+
+Most Arduino cores will do some internal initialization before calling
+``setup()`` followed by ``loop()``. To make sure Pigweed ``main()`` is started
+after that early init we run it within ``setup()``:
+
+.. code-block:: cpp
+
+  void setup() {
+    pw_arduino_Init();
+    // Start Pigweed main()
+    main();
+  }
+
+  void loop() {}
+
+.. note::
+   ``pw_arduino_Init()`` initializes the :ref:`module-pw_sys_io_arduino`
+   module.
+
+.. warning::
+   You may notice ``loop()`` is empty in ``arduino_main_wrapper.cc`` and never
+   called. This will cause any code appearing after ``loop()`` in an Arduino
+   core to never be executed. For most cores this should be ok but may cause
+   issues in some scenarios.
+
+arduino_builder
+===============
+
+``arduino_builder`` is utility that can extract compile and tooling information
+from an Arduino core. It's used within Pigweed to shovel compiler flags into
+the `GN <https://gn.googlesource.com/gn/>`_ build system. It will also work
+without Pigweed and can be used with other build systems.
+
+Full documentation is pending. For now run ``arduino_builder --help`` for
+details.
+
+.. rubric::
+   Footnotes
+
+.. [#f1]
+   See the Arduino Reference documentation on `setup()
+   <https://www.arduino.cc/reference/en/language/structure/sketch/setup/>`_, and
+   `loop()
+   <https://www.arduino.cc/reference/en/language/structure/sketch/loop/>`_.
+
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_arduino_build/public/pw_arduino_build/init.h
similarity index 84%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to pw_arduino_build/public/pw_arduino_build/init.h
index 1670b7d..f98a22c 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_arduino_build/public/pw_arduino_build/init.h
@@ -11,7 +11,12 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+#pragma once
 
-#include "pw_boot_armv7m/boot.h"
+#include "pw_preprocessor/util.h"
 
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+PW_EXTERN_C_START
+
+void pw_arduino_Init();
+
+PW_EXTERN_C_END
diff --git a/pw_arduino_build/py/BUILD.gn b/pw_arduino_build/py/BUILD.gn
new file mode 100644
index 0000000..c0f2e00
--- /dev/null
+++ b/pw_arduino_build/py/BUILD.gn
@@ -0,0 +1,38 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_arduino_build/__init__.py",
+    "pw_arduino_build/__main__.py",
+    "pw_arduino_build/builder.py",
+    "pw_arduino_build/core_installer.py",
+    "pw_arduino_build/file_operations.py",
+    "pw_arduino_build/log.py",
+    "pw_arduino_build/teensy_detector.py",
+    "pw_arduino_build/unit_test_client.py",
+    "pw_arduino_build/unit_test_runner.py",
+    "pw_arduino_build/unit_test_server.py",
+  ]
+  tests = [
+    "builder_test.py",
+    "file_operations_test.py",
+  ]
+  python_deps = [ "$dir_pw_cli/py" ]
+}
diff --git a/pw_arduino_build/py/builder_test.py b/pw_arduino_build/py/builder_test.py
new file mode 100644
index 0000000..c911e89
--- /dev/null
+++ b/pw_arduino_build/py/builder_test.py
@@ -0,0 +1,48 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Tests for arduinobuilder."""
+
+import shlex
+import unittest
+from parameterized import parameterized  # type: ignore
+
+
+class TestShellArgumentSplitting(unittest.TestCase):
+    """Tests to ensure shlex.split handles expected use cases."""
+
+    @parameterized.expand([
+        (
+            "remove-both-quotes",
+            """ -DUSB_CONFIG_POWER=100 """
+            """ '-DUSB_MANUFACTURER="Adafruit LLC"' """
+            """ '-DUSB_PRODUCT="Adafruit PyGamer Advance M4"' """
+            """ "-I$HOME/samd/1.6.2/cores/arduino/TinyUSB" """,
+            [
+                """ -DUSB_CONFIG_POWER=100 """.strip(),
+                """ -DUSB_MANUFACTURER="Adafruit LLC" """.strip(),
+                """ -DUSB_PRODUCT="Adafruit PyGamer Advance M4" """.strip(),
+                """ -I$HOME/samd/1.6.2/cores/arduino/TinyUSB """.strip(),
+            ]
+        )
+    ]) # yapf: disable
+    def test_split_arguments_and_remove_quotes(self, unused_test_name,
+                                               input_string, expected):
+        """Test splitting a string into a list of arguments with quotes removed.
+        """
+        result = shlex.split(input_string)
+        self.assertEqual(result, expected)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/pw_arduino_build/py/file_operations_test.py b/pw_arduino_build/py/file_operations_test.py
new file mode 100644
index 0000000..9d350e6
--- /dev/null
+++ b/pw_arduino_build/py/file_operations_test.py
@@ -0,0 +1,102 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Tests for file_operations module."""
+
+import os
+import shutil
+import tempfile
+import unittest
+from pathlib import Path
+from parameterized import parameterized  # type: ignore
+
+from pw_arduino_build import file_operations
+
+
+def file_set():
+    return [
+        "app.ino",
+        "core/asm.S",
+        "core/asm.s",
+        "core/pwm/pulse.c",
+        "core/pwm/pulse.h",
+        "libraries/a.c",
+        "libraries/b.cpp",
+        "libraries/c.cc",
+        "libraries/c.h",
+    ]
+
+
+def create_files(root_dir, file_names):
+    for file_name in file_names:
+        folder_path = Path(root_dir) / Path(os.path.dirname(file_name))
+        folder_path.mkdir(parents=True, exist_ok=True)
+        file_path = Path(root_dir) / Path(file_name)
+        file_path.touch(exist_ok=True)
+
+
+class TestFileOperations(unittest.TestCase):
+    """Tests to ensure arduino core library source files can be found."""
+    def setUp(self):
+        self.test_dir = tempfile.mkdtemp()
+
+    def tearDown(self):
+        shutil.rmtree(self.test_dir)
+
+    @parameterized.expand([
+        (
+            "sources recursive", file_set(), ["**/*.ino", "**/*.h", "**/*.cpp"],
+            [
+                "app.ino",
+                os.path.join("core", "pwm", "pulse.h"),
+                os.path.join("libraries", "b.cpp"),
+                os.path.join("libraries", "c.h"),
+            ]
+        ),
+        (
+            "directories recursive", file_set(), ["**"],
+            [
+                "core",
+                os.path.join("core", "pwm"),
+                "libraries",
+            ]
+        ),
+        (
+            "directories one level deep", file_set(), ["*"],
+            [
+                "core",
+                "libraries",
+            ]
+        ),
+        (
+            "items one level deep", file_set(), ["*"],
+            [
+                "app.ino",
+                "core",
+                "libraries",
+            ]
+        )
+    ]) # yapf: disable
+    def test_find_files(self, test_case, base_fileset, patterns,
+                        expected_results):
+        """Test find_files on source files and directories."""
+        create_files(self.test_dir, base_fileset)
+        result = file_operations.find_files(self.test_dir,
+                                            patterns,
+                                            directories_only=("directories"
+                                                              in test_case))
+        self.assertSequenceEqual(expected_results, result)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_arduino_build/py/pw_arduino_build/__init__.py
similarity index 78%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_arduino_build/py/pw_arduino_build/__init__.py
index 3c3be32..6eec505 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_arduino_build/py/pw_arduino_build/__init__.py
@@ -11,9 +11,4 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations under
 # the License.
-
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+"""This package provides tooling specific to the arduino target."""
diff --git a/pw_arduino_build/py/pw_arduino_build/__main__.py b/pw_arduino_build/py/pw_arduino_build/__main__.py
new file mode 100644
index 0000000..4a17072
--- /dev/null
+++ b/pw_arduino_build/py/pw_arduino_build/__main__.py
@@ -0,0 +1,610 @@
+#!/usr/bin/env python3
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Command line interface for arduino_builder."""
+
+import argparse
+import json
+import logging
+import os
+import pprint
+import shlex
+import subprocess
+import sys
+from collections import OrderedDict
+from typing import List
+
+from pw_arduino_build import core_installer, log
+from pw_arduino_build.builder import ArduinoBuilder
+from pw_arduino_build.file_operations import decode_file_json
+
+_LOG = logging.getLogger(__name__)
+
+_pretty_print = pprint.PrettyPrinter(indent=1, width=120).pprint
+_pretty_format = pprint.PrettyPrinter(indent=1, width=120).pformat
+
+
+class MissingArduinoCore(Exception):
+    """Exception raised when an Arduino core can not be found."""
+
+
+def list_boards_command(unused_args, builder):
+    # list-boards subcommand
+    # (does not need a selected board or default menu options)
+
+    # TODO(tonymd): Print this sorted with auto-ljust columns
+    longest_name_length = 0
+    for board_name, board_dict in builder.board.items():
+        if len(board_name) > longest_name_length:
+            longest_name_length = len(board_name)
+    longest_name_length += 2
+
+    print("Board Name".ljust(longest_name_length), "Description")
+    for board_name, board_dict in builder.board.items():
+        print(board_name.ljust(longest_name_length), board_dict['name'])
+    sys.exit(0)
+
+
+def list_menu_options_command(args, builder):
+    # List all menu options for the selected board.
+    builder.select_board(args.board)
+
+    print("All Options")
+    all_options, all_column_widths = builder.get_menu_options()
+    separator = "-" * (all_column_widths[0] + all_column_widths[1] + 2)
+    print(separator)
+
+    for name, description in all_options:
+        print(name.ljust(all_column_widths[0] + 1), description)
+
+    print("\nDefault Options")
+    print(separator)
+
+    menu_options, unused_col_widths = builder.get_default_menu_options()
+    for name, description in menu_options:
+        print(name.ljust(all_column_widths[0] + 1), description)
+
+
+def show_command_print_string_list(args, string_list: List[str]):
+    if string_list:
+        join_token = "\n" if args.delimit_with_newlines else " "
+        print(join_token.join(string_list))
+
+
+def show_command_print_flag_string(args, flag_string):
+    if args.delimit_with_newlines:
+        flag_string_with_newlines = shlex.split(flag_string)
+        print("\n".join(flag_string_with_newlines))
+    else:
+        print(flag_string)
+
+
+def subtract_flags(flag_list_a: List[str],
+                   flag_list_b: List[str]) -> List[str]:
+    """Given two sets of flags return flags in a that are not in b."""
+    flag_counts = OrderedDict()  # type: OrderedDict[str, int]
+    for flag in flag_list_a + flag_list_b:
+        flag_counts[flag] = flag_counts.get(flag, 0) + 1
+    return [flag for flag in flag_list_a if flag_counts.get(flag, 0) == 1]
+
+
+def run_command_lines(args, command_lines: List[str]):
+    for command_line in command_lines:
+        if not args.quiet:
+            print(command_line)
+        # TODO(tonymd): Exit with sub command exit code.
+        command_line_args = shlex.split(command_line)
+        process = subprocess.run(command_line_args,
+                                 stdout=subprocess.PIPE,
+                                 stderr=subprocess.STDOUT)
+        if process.returncode != 0:
+            _LOG.error('Command failed with exit code %d.', process.returncode)
+            _LOG.error('Full command:')
+            _LOG.error('')
+            _LOG.error('  %s', command_line)
+            _LOG.error('')
+            _LOG.error('Process output:')
+            print(flush=True)
+            sys.stdout.buffer.write(process.stdout)
+            print(flush=True)
+            _LOG.error('')
+
+
+def run_command(args, builder):
+    """Run sub command function.
+
+    Runs Arduino recipes.
+    """
+
+    if args.run_prebuilds:
+        run_command_lines(args, builder.get_prebuild_steps())
+
+    if args.run_link:
+        line = builder.get_link_line()
+        archive_file_path = args.run_link[0]  # pylint: disable=unused-variable
+        object_files = args.run_link[1:]
+        line = line.replace("{object_files}", " ".join(object_files), 1)
+        run_command_lines(args, [line])
+
+    if args.run_objcopy:
+        run_command_lines(args, builder.get_objcopy_steps())
+
+    if args.run_postbuilds:
+        run_command_lines(args, builder.get_postbuild_steps())
+
+    if args.run_upload_command:
+        command = builder.get_upload_line(args.run_upload_command,
+                                          args.serial_port)
+        run_command_lines(args, [command])
+
+
+# pylint: disable=too-many-branches
+def show_command(args, builder):
+    """Show sub command function.
+
+    Prints compiler info and flags.
+    """
+    if args.cc_binary:
+        print(builder.get_cc_binary())
+
+    elif args.cxx_binary:
+        print(builder.get_cxx_binary())
+
+    elif args.objcopy_binary:
+        print(builder.get_objcopy_binary())
+
+    elif args.ar_binary:
+        print(builder.get_ar_binary())
+
+    elif args.size_binary:
+        print(builder.get_size_binary())
+
+    elif args.c_compile:
+        print(builder.get_c_compile_line())
+
+    elif args.cpp_compile:
+        print(builder.get_cpp_compile_line())
+
+    elif args.link:
+        print(builder.get_link_line())
+
+    elif args.objcopy:
+        print(builder.get_objcopy(args.objcopy))
+
+    elif args.objcopy_flags:
+        objcopy_flags = builder.get_objcopy_flags(args.objcopy_flags)
+        show_command_print_flag_string(args, objcopy_flags)
+
+    elif args.c_flags:
+        cflags = builder.get_c_flags()
+        show_command_print_flag_string(args, cflags)
+
+    elif args.s_flags:
+        sflags = builder.get_s_flags()
+        show_command_print_flag_string(args, sflags)
+
+    elif args.s_only_flags:
+        s_only_flags = subtract_flags(shlex.split(builder.get_s_flags()),
+                                      shlex.split(builder.get_c_flags()))
+        show_command_print_flag_string(args, " ".join(s_only_flags))
+
+    elif args.cpp_flags:
+        cppflags = builder.get_cpp_flags()
+        show_command_print_flag_string(args, cppflags)
+
+    elif args.cpp_only_flags:
+        cpp_only_flags = subtract_flags(shlex.split(builder.get_cpp_flags()),
+                                        shlex.split(builder.get_c_flags()))
+        show_command_print_flag_string(args, " ".join(cpp_only_flags))
+
+    elif args.ld_flags:
+        ldflags = builder.get_ld_flags()
+        show_command_print_flag_string(args, ldflags)
+
+    elif args.ld_libs:
+        show_command_print_flag_string(args, builder.get_ld_libs())
+
+    elif args.ld_lib_names:
+        show_command_print_flag_string(args,
+                                       builder.get_ld_libs(name_only=True))
+
+    elif args.ar_flags:
+        ar_flags = builder.get_ar_flags()
+        show_command_print_flag_string(args, ar_flags)
+
+    elif args.core_path:
+        print(builder.get_core_path())
+
+    elif args.prebuilds:
+        show_command_print_string_list(args, builder.get_prebuild_steps())
+
+    elif args.postbuilds:
+        show_command_print_string_list(args, builder.get_postbuild_steps())
+
+    elif args.upload_command:
+        print(builder.get_upload_line(args.upload_command, args.serial_port))
+
+    elif args.upload_tools:
+        tools = builder.get_upload_tool_names()
+        for tool_name in tools:
+            print(tool_name)
+
+    elif args.library_include_dirs:
+        show_command_print_string_list(args, builder.library_include_dirs())
+
+    elif args.library_includes:
+        show_command_print_string_list(args, builder.library_includes())
+
+    elif args.library_c_files:
+        show_command_print_string_list(args, builder.library_c_files())
+
+    elif args.library_s_files:
+        show_command_print_string_list(args, builder.library_s_files())
+
+    elif args.library_cpp_files:
+        show_command_print_string_list(args, builder.library_cpp_files())
+
+    elif args.core_c_files:
+        show_command_print_string_list(args, builder.core_c_files())
+
+    elif args.core_s_files:
+        show_command_print_string_list(args, builder.core_s_files())
+
+    elif args.core_cpp_files:
+        show_command_print_string_list(args, builder.core_cpp_files())
+
+    elif args.variant_c_files:
+        vfiles = builder.variant_c_files()
+        if vfiles:
+            show_command_print_string_list(args, vfiles)
+
+    elif args.variant_s_files:
+        vfiles = builder.variant_s_files()
+        if vfiles:
+            show_command_print_string_list(args, vfiles)
+
+    elif args.variant_cpp_files:
+        vfiles = builder.variant_cpp_files()
+        if vfiles:
+            show_command_print_string_list(args, vfiles)
+
+
+def add_common_parser_args(parser, serial_port, build_path, build_project_name,
+                           project_path, project_source_path):
+    """Add command line options common to the run and show commands."""
+    parser.add_argument(
+        "--serial-port",
+        default=serial_port,
+        help="Serial port for flashing. Default: '{}'".format(serial_port))
+    parser.add_argument(
+        "--build-path",
+        default=build_path,
+        help="Build directory. Default: '{}'".format(build_path))
+    parser.add_argument(
+        "--project-path",
+        default=project_path,
+        help="Project directory. Default: '{}'".format(project_path))
+    parser.add_argument(
+        "--project-source-path",
+        default=project_source_path,
+        help="Project directory. Default: '{}'".format(project_source_path))
+    parser.add_argument("--library-path",
+                        default="libraries",
+                        help="Path to Arduino Library directory.")
+    parser.add_argument(
+        "--build-project-name",
+        default=build_project_name,
+        help="Project name. Default: '{}'".format(build_project_name))
+    parser.add_argument("--board",
+                        required=True,
+                        help="Name of the board to use.")
+    # nargs="+" is one or more args, e.g:
+    #   --menu-options menu.usb.serialhid menu.speed.150
+    parser.add_argument(
+        "--menu-options",
+        nargs="+",
+        type=str,
+        metavar="menu.usb.serial",
+        help="Desired Arduino menu options. See the "
+        "'list-menu-options' subcommand for available options.")
+    parser.add_argument("--set-variable",
+                        action="append",
+                        metavar='some.variable=NEW_VALUE',
+                        help="Override an Arduino recipe variable. May be "
+                        "specified multiple times. For example: "
+                        "--set-variable 'serial.port.label=/dev/ttyACM0' "
+                        "--set-variable 'serial.port.protocol=Teensy'")
+
+
+def check_for_missing_args(args):
+    if args.arduino_package_path is None:
+        raise MissingArduinoCore(
+            "Please specify the location of an Arduino core using "
+            "'--arduino-package-path' and '--arduino-package-name'.")
+
+
+# TODO(tonymd): These defaults don't make sense anymore and should be removed.
+def get_default_options():
+    defaults = {}
+    defaults["build_path"] = os.path.realpath(
+        os.path.expanduser(
+            os.path.expandvars(os.path.join(os.getcwd(), "build"))))
+    defaults["project_path"] = os.path.realpath(
+        os.path.expanduser(os.path.expandvars(os.getcwd())))
+    defaults["project_source_path"] = os.path.join(defaults["project_path"],
+                                                   "src")
+    defaults["build_project_name"] = os.path.basename(defaults["project_path"])
+    defaults["serial_port"] = "UNKNOWN"
+    return defaults
+
+
+def load_config_file(args):
+    """Load a config file and merge with command line options.
+
+    Command line takes precedence over values loaded from a config file."""
+
+    if args.save_config and not args.config_file:
+        raise FileNotFoundError(
+            "'--save-config' requires the '--config-file' option")
+
+    if not args.config_file:
+        return
+
+    default_options = get_default_options()
+
+    commandline_options = {
+        # Global option
+        "arduino_package_path": args.arduino_package_path,
+        "arduino_package_name": args.arduino_package_name,
+        "compiler_path_override": args.compiler_path_override,
+        # These options may not exist unless show or run command
+        "build_path": getattr(args, "build_path", None),
+        "project_path": getattr(args, "project_path", None),
+        "project_source_path": getattr(args, "project_source_path", None),
+        "build_project_name": getattr(args, "build_project_name", None),
+        "board": getattr(args, "board", None),
+        "menu_options": getattr(args, "menu_options", None),
+    }
+
+    # Decode JSON config file.
+    json_file_options, config_file_path = decode_file_json(args.config_file)
+
+    # Merge config file with command line options.
+    merged_options = {}
+    for key, value in commandline_options.items():
+        # Use the command line specified option by default
+        merged_options[key] = value
+
+        # Is this option in the config file?
+        if json_file_options.get(key, None) is not None:
+            # Use the json defined option if it's not set on the command
+            # line (or is a default value).
+            if value is None or value == default_options.get(key, None):
+                merged_options[key] = json_file_options[key]
+
+    # Update args namespace to matched merged_options.
+    for key, value in merged_options.items():
+        setattr(args, key, value)
+
+    # Write merged_options if --save-config.
+    if args.save_config:
+        encoded_json = json.dumps(merged_options, indent=4)
+        # Create parent directories
+        os.makedirs(os.path.dirname(config_file_path), exist_ok=True)
+        # Save json file.
+        with open(config_file_path, "w") as jfile:
+            jfile.write(encoded_json)
+
+
+def _parse_args() -> argparse.Namespace:
+    """Setup argparse and parse command line args."""
+    def log_level(arg: str) -> int:
+        try:
+            return getattr(logging, arg.upper())
+        except AttributeError:
+            raise argparse.ArgumentTypeError(
+                f'{arg.upper()} is not a valid log level')
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-q",
+                        "--quiet",
+                        help="hide run command output",
+                        action="store_true")
+    parser.add_argument('-l',
+                        '--loglevel',
+                        type=log_level,
+                        default=logging.INFO,
+                        help='Set the log level '
+                        '(debug, info, warning, error, critical)')
+
+    default_options = get_default_options()
+
+    # Global command line options
+    parser.add_argument("--arduino-package-path",
+                        help="Path to the arduino IDE install location.")
+    parser.add_argument("--arduino-package-name",
+                        help="Name of the Arduino board package to use.")
+    parser.add_argument("--compiler-path-override",
+                        help="Path to arm-none-eabi-gcc bin folder. "
+                        "Default: Arduino core specified gcc")
+    parser.add_argument("-c", "--config-file", help="Path to a config file.")
+    parser.add_argument("--save-config",
+                        action="store_true",
+                        help="Save command line arguments to the config file.")
+
+    # Subcommands
+    subparsers = parser.add_subparsers(title="subcommand",
+                                       description="valid subcommands",
+                                       help="sub-command help",
+                                       dest="subcommand",
+                                       required=True)
+
+    # install-core command
+    install_core_parser = subparsers.add_parser(
+        "install-core", help="Download and install arduino cores")
+    install_core_parser.set_defaults(func=core_installer.install_core_command)
+    install_core_parser.add_argument("--prefix",
+                                     required=True,
+                                     help="Path to install core files.")
+    install_core_parser.add_argument(
+        "--core-name",
+        required=True,
+        choices=core_installer.supported_cores(),
+        help="Name of the arduino core to install.")
+
+    # list-boards command
+    list_boards_parser = subparsers.add_parser("list-boards",
+                                               help="show supported boards")
+    list_boards_parser.set_defaults(func=list_boards_command)
+
+    # list-menu-options command
+    list_menu_options_parser = subparsers.add_parser(
+        "list-menu-options",
+        help="show available menu options for selected board")
+    list_menu_options_parser.set_defaults(func=list_menu_options_command)
+    list_menu_options_parser.add_argument("--board",
+                                          required=True,
+                                          help="Name of the board to use.")
+
+    # show command
+    show_parser = subparsers.add_parser("show",
+                                        help="Return compiler information.")
+    add_common_parser_args(show_parser, default_options["serial_port"],
+                           default_options["build_path"],
+                           default_options["build_project_name"],
+                           default_options["project_path"],
+                           default_options["project_source_path"])
+    show_parser.add_argument("--delimit-with-newlines",
+                             help="Separate flag output with newlines.",
+                             action="store_true")
+    show_parser.add_argument("--library-names", nargs="+", type=str)
+
+    output_group = show_parser.add_mutually_exclusive_group(required=True)
+    output_group.add_argument("--c-compile", action="store_true")
+    output_group.add_argument("--cpp-compile", action="store_true")
+    output_group.add_argument("--link", action="store_true")
+    output_group.add_argument("--c-flags", action="store_true")
+    output_group.add_argument("--s-flags", action="store_true")
+    output_group.add_argument("--s-only-flags", action="store_true")
+    output_group.add_argument("--cpp-flags", action="store_true")
+    output_group.add_argument("--cpp-only-flags", action="store_true")
+    output_group.add_argument("--ld-flags", action="store_true")
+    output_group.add_argument("--ar-flags", action="store_true")
+    output_group.add_argument("--ld-libs", action="store_true")
+    output_group.add_argument("--ld-lib-names", action="store_true")
+    output_group.add_argument("--objcopy", help="objcopy step for SUFFIX")
+    output_group.add_argument("--objcopy-flags",
+                              help="objcopy flags for SUFFIX")
+    output_group.add_argument("--core-path", action="store_true")
+    output_group.add_argument("--cc-binary", action="store_true")
+    output_group.add_argument("--cxx-binary", action="store_true")
+    output_group.add_argument("--ar-binary", action="store_true")
+    output_group.add_argument("--objcopy-binary", action="store_true")
+    output_group.add_argument("--size-binary", action="store_true")
+    output_group.add_argument("--prebuilds",
+                              action="store_true",
+                              help="Show prebuild step commands.")
+    output_group.add_argument("--postbuilds",
+                              action="store_true",
+                              help="Show postbuild step commands.")
+    output_group.add_argument("--upload-tools", action="store_true")
+    output_group.add_argument("--upload-command")
+    output_group.add_argument("--library-includes", action="store_true")
+    output_group.add_argument("--library-include-dirs", action="store_true")
+    output_group.add_argument("--library-c-files", action="store_true")
+    output_group.add_argument("--library-s-files", action="store_true")
+    output_group.add_argument("--library-cpp-files", action="store_true")
+    output_group.add_argument("--core-c-files", action="store_true")
+    output_group.add_argument("--core-s-files", action="store_true")
+    output_group.add_argument("--core-cpp-files", action="store_true")
+    output_group.add_argument("--variant-c-files", action="store_true")
+    output_group.add_argument("--variant-s-files", action="store_true")
+    output_group.add_argument("--variant-cpp-files", action="store_true")
+
+    show_parser.set_defaults(func=show_command)
+
+    # run command
+    run_parser = subparsers.add_parser("run", help="Run Arduino recipes.")
+    add_common_parser_args(run_parser, default_options["serial_port"],
+                           default_options["build_path"],
+                           default_options["build_project_name"],
+                           default_options["project_path"],
+                           default_options["project_source_path"])
+    run_parser.add_argument("--run-link",
+                            nargs="+",
+                            type=str,
+                            help="Run the link command. Expected arguments: "
+                            "the archive file followed by all obj files.")
+    run_parser.add_argument("--run-objcopy", action="store_true")
+    run_parser.add_argument("--run-prebuilds", action="store_true")
+    run_parser.add_argument("--run-postbuilds", action="store_true")
+    run_parser.add_argument("--run-upload-command")
+
+    run_parser.set_defaults(func=run_command)
+
+    return parser.parse_args()
+
+
+def main():
+    """Main command line function.
+
+    Dispatches command line invocations to sub `*_command()` functions.
+    """
+    # Parse command line arguments.
+    args = _parse_args()
+    _LOG.debug(_pretty_format(args))
+
+    log.install(args.loglevel)
+
+    # Check for and set alternate compiler path.
+    if args.compiler_path_override:
+        # Get absolute path
+        compiler_path_override = os.path.realpath(
+            os.path.expanduser(os.path.expandvars(
+                args.compiler_path_override)))
+        args.compiler_path_override = compiler_path_override
+
+    load_config_file(args)
+
+    if args.subcommand == "install-core":
+        args.func(args)
+    elif args.subcommand in ["list-boards", "list-menu-options"]:
+        check_for_missing_args(args)
+        builder = ArduinoBuilder(args.arduino_package_path,
+                                 args.arduino_package_name)
+        builder.load_board_definitions()
+        args.func(args, builder)
+    else:  # args.subcommand in ["run", "show"]
+        check_for_missing_args(args)
+        builder = ArduinoBuilder(
+            args.arduino_package_path,
+            args.arduino_package_name,
+            build_path=args.build_path,
+            build_project_name=args.build_project_name,
+            project_path=args.project_path,
+            project_source_path=args.project_source_path,
+            library_path=getattr(args, 'library_path', None),
+            library_names=getattr(args, 'library_names', None),
+            compiler_path_override=args.compiler_path_override)
+        builder.load_board_definitions()
+        builder.select_board(args.board, args.menu_options)
+        if args.set_variable:
+            builder.set_variables(args.set_variable)
+        args.func(args, builder)
+
+    sys.exit(0)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/pw_arduino_build/py/pw_arduino_build/builder.py b/pw_arduino_build/py/pw_arduino_build/builder.py
new file mode 100755
index 0000000..fcd77e4
--- /dev/null
+++ b/pw_arduino_build/py/pw_arduino_build/builder.py
@@ -0,0 +1,1073 @@
+#!/usr/bin/env python3
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Extracts build information from Arduino cores."""
+
+import glob
+import logging
+import os
+import platform
+import pprint
+import re
+import sys
+import time
+from collections import OrderedDict
+from pathlib import Path
+from typing import List
+
+from pw_arduino_build import file_operations
+
+_LOG = logging.getLogger(__name__)
+
+_pretty_print = pprint.PrettyPrinter(indent=1, width=120).pprint
+_pretty_format = pprint.PrettyPrinter(indent=1, width=120).pformat
+
+
+def arduino_runtime_os_string():
+    arduno_platform = {
+        "Linux": "linux",
+        "Windows": "windows",
+        "Darwin": "macosx"
+    }
+    return arduno_platform[platform.system()]
+
+
+class ArduinoBuilder:
+    """Used to interpret arduino boards.txt and platform.txt files."""
+    # pylint: disable=too-many-instance-attributes,too-many-public-methods
+
+    BOARD_MENU_REGEX = re.compile(
+        r"^(?P<name>menu\.[^#=]+)=(?P<description>.*)$", re.MULTILINE)
+
+    BOARD_NAME_REGEX = re.compile(
+        r"^(?P<name>[^\s#\.]+)\.name=(?P<description>.*)$", re.MULTILINE)
+
+    VARIABLE_REGEX = re.compile(r"^(?P<name>[^\s#=]+)=(?P<value>.*)$",
+                                re.MULTILINE)
+
+    MENU_OPTION_REGEX = re.compile(
+        r"^menu\."  # starts with "menu"
+        r"(?P<menu_option_name>[^.]+)\."  # first token after .
+        r"(?P<menu_option_value>[^.]+)$")  # second (final) token after .
+
+    TOOL_NAME_REGEX = re.compile(
+        r"^tools\."  # starts with "tools"
+        r"(?P<tool_name>[^.]+)\.")  # first token after .
+
+    INTERPOLATED_VARIABLE_REGEX = re.compile(r"{[^}]+}", re.MULTILINE)
+
+    OBJCOPY_STEP_NAME_REGEX = re.compile(r"^recipe.objcopy.([^.]+).pattern$")
+
+    def __init__(self,
+                 arduino_path,
+                 package_name,
+                 build_path=None,
+                 project_path=None,
+                 project_source_path=None,
+                 library_path=None,
+                 library_names=None,
+                 build_project_name=None,
+                 compiler_path_override=False):
+        self.arduino_path = arduino_path
+        self.arduino_package_name = package_name
+        self.selected_board = None
+        self.build_path = build_path
+        self.project_path = project_path
+        self.project_source_path = project_source_path
+        self.build_project_name = build_project_name
+        self.compiler_path_override = compiler_path_override
+        self.variant_includes = ""
+        self.build_variant_path = False
+        if library_names and library_path:
+            self.library_names = library_names
+            self.library_path = os.path.realpath(
+                os.path.expanduser(os.path.expandvars(library_path)))
+
+        self.compiler_path_override_binaries = []
+        if self.compiler_path_override:
+            self.compiler_path_override_binaries = file_operations.find_files(
+                self.compiler_path_override, "*")
+
+        # Container dicts for boards.txt and platform.txt file data.
+        self.board = OrderedDict()
+        self.platform = OrderedDict()
+        self.menu_options = OrderedDict({
+            "global_options": {},
+            "default_board_values": {},
+            "selected": {}
+        })
+        self.tools_variables = {}
+
+        # Set and check for valid hardware folder.
+        self.hardware_path = os.path.join(self.arduino_path, "hardware")
+
+        if not os.path.exists(self.hardware_path):
+            raise FileNotFoundError(
+                "Arduino package path '{}' does not exist.".format(
+                    self.arduino_path))
+
+        # Set and check for valid package name
+        self.package_path = os.path.join(self.arduino_path, "hardware",
+                                         package_name)
+        # {build.arch} is the first folder name of the package (upcased)
+        self.build_arch = os.path.split(package_name)[0].upper()
+
+        if not os.path.exists(self.package_path):
+            _LOG.error("Error: Arduino package name '%s' does not exist.",
+                       package_name)
+            _LOG.error("Did you mean:\n")
+            # TODO(tonymd): On Windows concatenating "/" may not work
+            possible_alternatives = [
+                d.replace(self.hardware_path + os.sep, "", 1)
+                for d in glob.glob(self.hardware_path + "/*/*")
+            ]
+            _LOG.error("\n".join(possible_alternatives))
+            sys.exit(1)
+
+        # Grab all folder names in the cores directory. These are typically
+        # sub-core source files.
+        self.sub_core_folders = os.listdir(
+            os.path.join(self.package_path, "cores"))
+
+        self._find_tools_variables()
+
+        self.boards_txt = os.path.join(self.package_path, "boards.txt")
+        self.platform_txt = os.path.join(self.package_path, "platform.txt")
+
+    def select_board(self, board_name, menu_option_overrides=False):
+        self.selected_board = board_name
+
+        # Load default menu options for a selected board.
+        if not self.selected_board in self.board.keys():
+            _LOG.error("Error board: '%s' not supported.", self.selected_board)
+            # TODO(tonymd): Print supported boards here
+            sys.exit(1)
+
+        # Override default menu options if any are specified.
+        if menu_option_overrides:
+            for moption in menu_option_overrides:
+                if not self.set_menu_option(moption):
+                    # TODO(tonymd): Print supported menu options here
+                    sys.exit(1)
+
+        self._copy_default_menu_options_to_build_variables()
+        self._apply_recipe_overrides()
+        self._substitute_variables()
+
+    def set_variables(self, variable_list: List[str]):
+        # Convert the string list containing 'name=value' items into a dict
+        variable_source = {}
+        for var in variable_list:
+            var_name, value = var.split("=")
+            variable_source[var_name] = value
+
+        # Replace variables in platform
+        for var, value in self.platform.items():
+            self.platform[var] = self._replace_variables(
+                value, variable_source)
+
+    def _apply_recipe_overrides(self):
+        # Override link recipes with per-core exceptions
+        # Teensyduino cores
+        if self.build_arch == 'TEENSY':
+            # Change {build.path}/{archive_file}
+            # To {archive_file_path} (which should contain the core.a file)
+            new_link_line = self.platform["recipe.c.combine.pattern"].replace(
+                "{object_files} \"{build.path}/{archive_file}\"",
+                "{object_files} {archive_file_path}", 1)
+            # Add the teensy provided toolchain lib folder for link access to
+            # libarm_cortexM*_math.a
+            new_link_line = new_link_line.replace(
+                "\"-L{build.path}\"",
+                "\"-L{build.path}\" -L{compiler.path}/arm/arm-none-eabi/lib",
+                1)
+            self.platform["recipe.c.combine.pattern"] = new_link_line
+            # Remove the pre-compiled header include
+            self.platform["recipe.cpp.o.pattern"] = self.platform[
+                "recipe.cpp.o.pattern"].replace("\"-I{build.path}/pch\"", "",
+                                                1)
+
+        # Adafruit-samd core
+        # TODO(tonymd): This build_arch may clash with Arduino-SAMD core
+        elif self.build_arch == 'SAMD':
+            new_link_line = self.platform["recipe.c.combine.pattern"].replace(
+                "\"{build.path}/{archive_file}\" -Wl,--end-group",
+                "{archive_file_path} -Wl,--end-group", 1)
+            self.platform["recipe.c.combine.pattern"] = new_link_line
+
+        # STM32L4 Core:
+        # https://github.com/GrumpyOldPizza/arduino-STM32L4
+        elif self.build_arch == 'STM32L4':
+            # TODO(tonymd): {build.path}/{archive_file} for the link step always
+            # seems to be core.a (except STM32 core)
+            line_to_delete = "-Wl,--start-group \"{build.path}/{archive_file}\""
+            new_link_line = self.platform["recipe.c.combine.pattern"].replace(
+                line_to_delete, "-Wl,--start-group {archive_file_path}", 1)
+            self.platform["recipe.c.combine.pattern"] = new_link_line
+
+        # stm32duino core
+        elif self.build_arch == 'STM32':
+            pass
+
+    def _copy_default_menu_options_to_build_variables(self):
+        # Clear existing options
+        self.menu_options["selected"] = {}
+        # Set default menu options for selected board
+        for menu_key, menu_dict in self.menu_options["default_board_values"][
+                self.selected_board].items():
+            for name, var in self.board[self.selected_board].items():
+                starting_key = "{}.{}.".format(menu_key, menu_dict["name"])
+                if name.startswith(starting_key):
+                    new_var_name = name.replace(starting_key, "", 1)
+                    self.menu_options["selected"][new_var_name] = var
+
+    def set_menu_option(self, moption):
+        if moption not in self.board[self.selected_board]:
+            _LOG.error("Error: '%s' is not a valid menu option.", moption)
+            return False
+
+        # Override default menu option with new value.
+        menu_match_result = self.MENU_OPTION_REGEX.match(moption)
+        if menu_match_result:
+            menu_match = menu_match_result.groupdict()
+            menu_value = menu_match["menu_option_value"]
+            menu_key = "menu.{}".format(menu_match["menu_option_name"])
+            self.menu_options["default_board_values"][
+                self.selected_board][menu_key]["name"] = menu_value
+
+        # Update build variables
+        self._copy_default_menu_options_to_build_variables()
+        return True
+
+    def _set_global_arduino_variables(self):
+        """Set some global variables defined by the Arduino-IDE.
+
+        See Docs:
+        https://arduino.github.io/arduino-cli/platform-specification/#global-predefined-properties
+        """
+
+        # TODO(tonymd): Make sure these variables are replaced in recipe lines
+        # even if they are None: build_path, project_path, project_source_path,
+        # build_project_name
+        for current_board_name in self.board.keys():
+            if self.build_path:
+                self.board[current_board_name]["build.path"] = self.build_path
+            if self.build_project_name:
+                self.board[current_board_name][
+                    "build.project_name"] = self.build_project_name
+                # {archive_file} is the final *.elf
+                archive_file = "{}.elf".format(self.build_project_name)
+                self.board[current_board_name]["archive_file"] = archive_file
+                # {archive_file_path} is the final core.a archive
+                if self.build_path:
+                    self.board[current_board_name][
+                        "archive_file_path"] = os.path.join(
+                            self.build_path, "core.a")
+            if self.project_source_path:
+                self.board[current_board_name][
+                    "build.source.path"] = self.project_source_path
+
+            self.board[current_board_name]["extra.time.local"] = str(
+                int(time.time()))
+            self.board[current_board_name]["runtime.ide.version"] = "10812"
+            self.board[current_board_name][
+                "runtime.hardware.path"] = self.hardware_path
+
+            # Copy {runtime.tools.TOOL_NAME.path} vars
+            self._set_tools_variables(self.board[current_board_name])
+
+            self.board[current_board_name][
+                "runtime.platform.path"] = self.package_path
+            if self.platform["name"] == "Teensyduino":
+                # Teensyduino is installed into the arduino IDE folder
+                # rather than ~/.arduino15/packages/
+                self.board[current_board_name][
+                    "runtime.hardware.path"] = os.path.join(
+                        self.hardware_path, "teensy")
+
+            self.board[current_board_name]["build.system.path"] = os.path.join(
+                self.package_path, "system")
+
+            # Set the {build.core.path} variable that pointing to a sub-core
+            # folder. For Teensys this is:
+            # 'teensy/hardware/teensy/avr/cores/teensy{3,4}'. For other cores
+            # it's typically just the 'arduino' folder. For example:
+            # 'arduino-samd/hardware/samd/1.8.8/cores/arduino'
+            core_path = Path(self.package_path) / "cores"
+            core_path /= self.board[current_board_name].get(
+                "build.core", self.sub_core_folders[0])
+            self.board[current_board_name][
+                "build.core.path"] = core_path.as_posix()
+
+            self.board[current_board_name]["build.arch"] = self.build_arch
+
+            for name, var in self.board[current_board_name].items():
+                self.board[current_board_name][name] = var.replace(
+                    "{build.core.path}", core_path.as_posix())
+
+    def load_board_definitions(self):
+        """Loads Arduino boards.txt and platform.txt files into dictionaries.
+
+        Populates the following dictionaries:
+            self.menu_options
+            self.boards
+            self.platform
+        """
+        # Load platform.txt
+        with open(self.platform_txt, "r") as pfile:
+            platform_file = pfile.read()
+            platform_var_matches = self.VARIABLE_REGEX.finditer(platform_file)
+            for p_match in [m.groupdict() for m in platform_var_matches]:
+                self.platform[p_match["name"]] = p_match["value"]
+
+        # Load boards.txt
+        with open(self.boards_txt, "r") as bfile:
+            board_file = bfile.read()
+            # Get all top-level menu options, e.g. menu.usb=USB Type
+            board_menu_matches = self.BOARD_MENU_REGEX.finditer(board_file)
+            for menuitem in [m.groupdict() for m in board_menu_matches]:
+                self.menu_options["global_options"][menuitem["name"]] = {
+                    "description": menuitem["description"]
+                }
+
+            # Get all board names, e.g. teensy40.name=Teensy 4.0
+            board_name_matches = self.BOARD_NAME_REGEX.finditer(board_file)
+            for b_match in [m.groupdict() for m in board_name_matches]:
+                self.board[b_match["name"]] = OrderedDict()
+                self.menu_options["default_board_values"][
+                    b_match["name"]] = OrderedDict()
+
+            # Get all board variables, e.g. teensy40.*
+            for current_board_name in self.board.keys():
+                board_line_matches = re.finditer(
+                    fr"^\s*{current_board_name}\."
+                    fr"(?P<key>[^#=]+)=(?P<value>.*)$", board_file,
+                    re.MULTILINE)
+                for b_match in [m.groupdict() for m in board_line_matches]:
+                    # Check if this line is a menu option
+                    # (e.g. 'menu.usb.serial') and save as default if it's the
+                    # first one seen.
+                    ArduinoBuilder.save_default_menu_option(
+                        current_board_name, b_match["key"], b_match["value"],
+                        self.menu_options)
+                    self.board[current_board_name][
+                        b_match["key"]] = b_match["value"].strip()
+
+            self._set_global_arduino_variables()
+
+    @staticmethod
+    def save_default_menu_option(current_board_name, key, value, menu_options):
+        """Save a given menu option as the default.
+
+        Saves the key and value into menu_options["default_board_values"]
+        if it doesn't already exist. Assumes menu options are added in the order
+        specified in boards.txt. The first value for a menu key is the default.
+        """
+        # Check if key is a menu option
+        # e.g. menu.usb.serial
+        #      menu.usb.serial.build.usbtype
+        menu_match_result = re.match(
+            r'^menu\.'  # starts with "menu"
+            r'(?P<menu_option_name>[^.]+)\.'  # first token after .
+            r'(?P<menu_option_value>[^.]+)'  # second token after .
+            r'(\.(?P<rest>.+))?',  # optionally any trailing tokens after a .
+            key)
+        if menu_match_result:
+            menu_match = menu_match_result.groupdict()
+            current_menu_key = "menu.{}".format(menu_match["menu_option_name"])
+            # If this is the first menu option seen for current_board_name, save
+            # as the default.
+            if current_menu_key not in menu_options["default_board_values"][
+                    current_board_name]:
+                menu_options["default_board_values"][current_board_name][
+                    current_menu_key] = {
+                        "name": menu_match["menu_option_value"],
+                        "description": value
+                    }
+
+    def _replace_variables(self, line, variable_lookup_source):
+        """Replace {variables} from loaded boards.txt or platform.txt.
+
+        Replace interpolated variables surrounded by curly braces in line with
+        definitions from variable_lookup_source.
+        """
+        new_line = line
+        for current_var_match in self.INTERPOLATED_VARIABLE_REGEX.findall(
+                line):
+            # {build.flags.c} --> build.flags.c
+            current_var = current_var_match.strip("{}")
+
+            # check for matches in board definition
+            if current_var in variable_lookup_source:
+                replacement = variable_lookup_source.get(current_var, "")
+                new_line = new_line.replace(current_var_match, replacement)
+        return new_line
+
+    def _find_tools_variables(self):
+        # Gather tool directories in order of increasing precedence
+        runtime_tool_paths = []
+
+        # Check for tools installed in ~/.arduino15/packages/arduino/tools/
+        # TODO(tonymd): Is this Mac & Linux specific?
+        runtime_tool_paths += glob.glob(
+            os.path.join(
+                os.path.realpath(os.path.expanduser(os.path.expandvars("~"))),
+                ".arduino15", "packages", "arduino", "tools", "*"))
+
+        # <ARDUINO_PATH>/tools/<OS_STRING>/<TOOL_NAMES>
+        runtime_tool_paths += glob.glob(
+            os.path.join(self.arduino_path, "tools",
+                         arduino_runtime_os_string(), "*"))
+        # <ARDUINO_PATH>/tools/<TOOL_NAMES>
+        # This will grab linux/windows/macosx/share as <TOOL_NAMES>.
+        runtime_tool_paths += glob.glob(
+            os.path.join(self.arduino_path, "tools", "*"))
+
+        # Process package tools after arduino tools.
+        # They should overwrite vars & take precedence.
+
+        # <PACKAGE_PATH>/tools/<OS_STRING>/<TOOL_NAMES>
+        runtime_tool_paths += glob.glob(
+            os.path.join(self.package_path, "tools",
+                         arduino_runtime_os_string(), "*"))
+        # <PACKAGE_PATH>/tools/<TOOL_NAMES>
+        # This will grab linux/windows/macosx/share as <TOOL_NAMES>.
+        runtime_tool_paths += glob.glob(
+            os.path.join(self.package_path, "tools", "*"))
+
+        for path in runtime_tool_paths:
+            # Make sure TOOL_NAME is not an OS string
+            if not (path.endswith("linux") or path.endswith("windows")
+                    or path.endswith("macosx") or path.endswith("share")):
+                # TODO(tonymd): Check if a file & do nothing?
+
+                # Check if it's a directory with subdir(s) as a version string
+                #   create all 'runtime.tools.{tool_folder}-{version.path}'
+                #     (for each version)
+                #   create 'runtime.tools.{tool_folder}.path'
+                #     (with latest version)
+                if os.path.isdir(path):
+                    # Grab the tool name (folder) by itself.
+                    tool_folder = os.path.basename(path)
+                    # Sort so that [-1] is the latest version.
+                    version_paths = sorted(glob.glob(os.path.join(path, "*")))
+                    # Check if all sub folders start with a version string.
+                    if len(version_paths) == sum(
+                            bool(re.match(r"^[0-9.]+", os.path.basename(vp)))
+                            for vp in version_paths):
+                        for version_path in version_paths:
+                            version_string = os.path.basename(version_path)
+                            var_name = "runtime.tools.{}-{}.path".format(
+                                tool_folder, version_string)
+                            self.tools_variables[var_name] = os.path.join(
+                                path, version_string)
+                        var_name = "runtime.tools.{}.path".format(tool_folder)
+                        self.tools_variables[var_name] = os.path.join(
+                            path, os.path.basename(version_paths[-1]))
+                    # Else set toolpath to path.
+                    else:
+                        var_name = "runtime.tools.{}.path".format(tool_folder)
+                        self.tools_variables[var_name] = path
+
+        _LOG.debug("TOOL VARIABLES: %s", _pretty_format(self.tools_variables))
+
+    # Copy self.tools_variables into destination
+    def _set_tools_variables(self, destination):
+        for key, value in self.tools_variables.items():
+            destination[key] = value
+
+    def _substitute_variables(self):
+        """Perform variable substitution in board and platform metadata."""
+
+        # menu -> board
+        # Copy selected menu variables into board definiton
+        for name, value in self.menu_options["selected"].items():
+            self.board[self.selected_board][name] = value
+
+        # board -> board
+        # Replace any {vars} in the selected board with values defined within
+        # (and from copied in menu options).
+        for var, value in self.board[self.selected_board].items():
+            self.board[self.selected_board][var] = self._replace_variables(
+                value, self.board[self.selected_board])
+
+        # Check for build.variant variable
+        # This will be set in selected board after menu options substitution
+        build_variant = self.board[self.selected_board].get(
+            "build.variant", None)
+        if build_variant:
+            # Set build.variant.path
+            bvp = os.path.join(self.package_path, "variants", build_variant)
+            self.build_variant_path = bvp
+            self.board[self.selected_board]["build.variant.path"] = bvp
+            # Add the variant folder as an include directory
+            # (used in stm32l4 core)
+            self.variant_includes = "-I{}".format(bvp)
+
+        _LOG.debug("PLATFORM INITIAL: %s", _pretty_format(self.platform))
+
+        # board -> platform
+        # Replace {vars} in platform from the selected board definition
+        for var, value in self.platform.items():
+            self.platform[var] = self._replace_variables(
+                value, self.board[self.selected_board])
+
+        # platform -> platform
+        # Replace any remaining {vars} in platform from platform
+        for var, value in self.platform.items():
+            self.platform[var] = self._replace_variables(value, self.platform)
+
+        # Repeat platform -> platform for any lingering variables
+        # Example: {build.opt.name} in STM32 core
+        for var, value in self.platform.items():
+            self.platform[var] = self._replace_variables(value, self.platform)
+
+        _LOG.debug("MENU_OPTIONS: %s", _pretty_format(self.menu_options))
+        _LOG.debug("SELECTED_BOARD: %s",
+                   _pretty_format(self.board[self.selected_board]))
+        _LOG.debug("PLATFORM: %s", _pretty_format(self.platform))
+
+    def selected_board_spec(self):
+        return self.board[self.selected_board]
+
+    def get_menu_options(self):
+        all_options = []
+        max_string_length = [0, 0]
+
+        for key_name, description in self.board[self.selected_board].items():
+            menu_match_result = self.MENU_OPTION_REGEX.match(key_name)
+            if menu_match_result:
+                menu_match = menu_match_result.groupdict()
+                name = "menu.{}.{}".format(menu_match["menu_option_name"],
+                                           menu_match["menu_option_value"])
+                if len(name) > max_string_length[0]:
+                    max_string_length[0] = len(name)
+                if len(description) > max_string_length[1]:
+                    max_string_length[1] = len(description)
+                all_options.append((name, description))
+
+        return all_options, max_string_length
+
+    def get_default_menu_options(self):
+        default_options = []
+        max_string_length = [0, 0]
+
+        for key_name, value in self.menu_options["default_board_values"][
+                self.selected_board].items():
+            full_key = key_name + "." + value["name"]
+            if len(full_key) > max_string_length[0]:
+                max_string_length[0] = len(full_key)
+            if len(value["description"]) > max_string_length[1]:
+                max_string_length[1] = len(value["description"])
+            default_options.append((full_key, value["description"]))
+
+        return default_options, max_string_length
+
+    @staticmethod
+    def split_binary_from_arguments(compile_line):
+        compile_binary = None
+        rest_of_line = compile_line
+
+        compile_binary_match = re.search(r'^("[^"]+") ', compile_line)
+        if compile_binary_match:
+            compile_binary = compile_binary_match[1]
+            rest_of_line = compile_line.replace(compile_binary_match[0], "", 1)
+
+        return compile_binary, rest_of_line
+
+    def _strip_includes_source_file_object_file_vars(self, compile_line):
+        line = compile_line
+        if self.variant_includes:
+            line = compile_line.replace(
+                "{includes} \"{source_file}\" -o \"{object_file}\"",
+                self.variant_includes, 1)
+        else:
+            line = compile_line.replace(
+                "{includes} \"{source_file}\" -o \"{object_file}\"", "", 1)
+        return line
+
+    def _get_tool_name(self, line):
+        tool_match_result = self.TOOL_NAME_REGEX.match(line)
+        if tool_match_result:
+            return tool_match_result[1]
+        return False
+
+    def get_upload_tool_names(self):
+        return [
+            self._get_tool_name(t) for t in self.platform.keys()
+            if self.TOOL_NAME_REGEX.match(t) and 'upload.pattern' in t
+        ]
+
+    # TODO(tonymd): Use these getters in _replace_variables() or
+    # _substitute_variables()
+
+    def _get_platform_variable(self, variable):
+        # TODO(tonymd): Check for '.macos' '.linux' '.windows' in variable name,
+        # compare with platform.system() and return that instead.
+        return self.platform.get(variable, False)
+
+    def _get_platform_variable_with_substitutions(self, variable, namespace):
+        line = self.platform.get(variable, False)
+        # Get all unique variables used in this line in line.
+        unique_vars = sorted(
+            set(self.INTERPOLATED_VARIABLE_REGEX.findall(line)))
+        # Search for each unique_vars in namespace and global.
+        for var in unique_vars:
+            v_raw_name = var.strip("{}")
+
+            # Check for namespace.variable
+            #   eg: 'tools.stm32CubeProg.cmd'
+            possible_var_name = "{}.{}".format(namespace, v_raw_name)
+            result = self._get_platform_variable(possible_var_name)
+            # Check for os overriden variable
+            #   eg:
+            #     ('tools.stm32CubeProg.cmd', 'stm32CubeProg.sh'),
+            #     ('tools.stm32CubeProg.cmd.windows', 'stm32CubeProg.bat'),
+            possible_var_name = "{}.{}.{}".format(namespace, v_raw_name,
+                                                  arduino_runtime_os_string())
+            os_override_result = self._get_platform_variable(possible_var_name)
+
+            if os_override_result:
+                line = line.replace(var, os_override_result)
+            elif result:
+                line = line.replace(var, result)
+            # Check for variable at top level?
+            # elif self._get_platform_variable(v_raw_name):
+            #     line = line.replace(self._get_platform_variable(v_raw_name),
+            #                         result)
+        return line
+
+    def get_upload_line(self, tool_name, serial_port=False):
+        # TODO(tonymd): Error if tool_name does not exist
+        tool_namespace = "tools.{}".format(tool_name)
+        pattern = "tools.{}.upload.pattern".format(tool_name)
+
+        if not self._get_platform_variable(pattern):
+            _LOG.error("Error: upload tool '%s' does not exist.", tool_name)
+            tools = self.get_upload_tool_names()
+            _LOG.error("Valid tools: %s", ", ".join(tools))
+            return sys.exit(1)
+
+        line = self._get_platform_variable_with_substitutions(
+            pattern, tool_namespace)
+
+        # TODO(tonymd): Teensy specific tool overrides.
+        if tool_name == "teensyloader":
+            # Remove un-necessary lines
+            # {serial.port.label} and {serial.port.protocol} are returned by
+            # the teensy_ports binary.
+            line = line.replace("\"-portlabel={serial.port.label}\"", "", 1)
+            line = line.replace("\"-portprotocol={serial.port.protocol}\"", "",
+                                1)
+
+            if serial_port == "UNKNOWN" or not serial_port:
+                line = line.replace('"-port={serial.port}"', "", 1)
+            else:
+                line = line.replace("{serial.port}", serial_port, 1)
+
+        return line
+
+    def _get_binary_path(self, variable_pattern):
+        compile_line = self.replace_compile_binary_with_override_path(
+            self._get_platform_variable(variable_pattern))
+        compile_binary, _ = ArduinoBuilder.split_binary_from_arguments(
+            compile_line)
+        return compile_binary
+
+    def get_cc_binary(self):
+        return self._get_binary_path("recipe.c.o.pattern")
+
+    def get_cxx_binary(self):
+        return self._get_binary_path("recipe.cpp.o.pattern")
+
+    def get_objcopy_binary(self):
+        objcopy_step_name = self.get_objcopy_step_names()[0]
+        objcopy_binary = self._get_binary_path(objcopy_step_name)
+        return objcopy_binary
+
+    def get_ar_binary(self):
+        return self._get_binary_path("recipe.ar.pattern")
+
+    def get_size_binary(self):
+        return self._get_binary_path("recipe.size.pattern")
+
+    def replace_command_args_with_compiler_override_path(self, compile_line):
+        if not self.compiler_path_override:
+            return compile_line
+        replacement_line = compile_line
+        replacement_line_args = compile_line.split()
+        for arg in replacement_line_args:
+            compile_binary_basename = os.path.basename(arg.strip("\""))
+            if compile_binary_basename in self.compiler_path_override_binaries:
+                new_compiler = os.path.join(self.compiler_path_override,
+                                            compile_binary_basename)
+                replacement_line = replacement_line.replace(
+                    arg, new_compiler, 1)
+        return replacement_line
+
+    def replace_compile_binary_with_override_path(self, compile_line):
+        replacement_compile_line = compile_line
+
+        # Change the compiler path if there's an override path set
+        if self.compiler_path_override:
+            compile_binary, line = ArduinoBuilder.split_binary_from_arguments(
+                compile_line)
+            compile_binary_basename = os.path.basename(
+                compile_binary.strip("\""))
+            new_compiler = os.path.join(self.compiler_path_override,
+                                        compile_binary_basename)
+            if platform.system() == "Windows" and not re.match(
+                    r".*\.exe$", new_compiler, flags=re.IGNORECASE):
+                new_compiler += ".exe"
+
+            if os.path.isfile(new_compiler):
+                replacement_compile_line = "\"{}\" {}".format(
+                    new_compiler, line)
+
+        return replacement_compile_line
+
+    def get_c_compile_line(self):
+        _LOG.debug("ARDUINO_C_COMPILE: %s",
+                   _pretty_format(self.platform["recipe.c.o.pattern"]))
+
+        compile_line = self.platform["recipe.c.o.pattern"]
+        compile_line = self._strip_includes_source_file_object_file_vars(
+            compile_line)
+        compile_line += " -I{}".format(
+            self.board[self.selected_board]["build.core.path"])
+
+        compile_line = self.replace_compile_binary_with_override_path(
+            compile_line)
+        return compile_line
+
+    def get_s_compile_line(self):
+        _LOG.debug("ARDUINO_S_COMPILE %s",
+                   _pretty_format(self.platform["recipe.S.o.pattern"]))
+
+        compile_line = self.platform["recipe.S.o.pattern"]
+        compile_line = self._strip_includes_source_file_object_file_vars(
+            compile_line)
+        compile_line += " -I{}".format(
+            self.board[self.selected_board]["build.core.path"])
+
+        compile_line = self.replace_compile_binary_with_override_path(
+            compile_line)
+        return compile_line
+
+    def get_ar_compile_line(self):
+        _LOG.debug("ARDUINO_AR_COMPILE: %s",
+                   _pretty_format(self.platform["recipe.ar.pattern"]))
+
+        compile_line = self.platform["recipe.ar.pattern"].replace(
+            "\"{object_file}\"", "", 1)
+
+        compile_line = self.replace_compile_binary_with_override_path(
+            compile_line)
+        return compile_line
+
+    def get_cpp_compile_line(self):
+        _LOG.debug("ARDUINO_CPP_COMPILE: %s",
+                   _pretty_format(self.platform["recipe.cpp.o.pattern"]))
+
+        compile_line = self.platform["recipe.cpp.o.pattern"]
+        compile_line = self._strip_includes_source_file_object_file_vars(
+            compile_line)
+        compile_line += " -I{}".format(
+            self.board[self.selected_board]["build.core.path"])
+
+        compile_line = self.replace_compile_binary_with_override_path(
+            compile_line)
+        return compile_line
+
+    def get_link_line(self):
+        _LOG.debug("ARDUINO_LINK: %s",
+                   _pretty_format(self.platform["recipe.c.combine.pattern"]))
+
+        compile_line = self.platform["recipe.c.combine.pattern"]
+
+        compile_line = self.replace_compile_binary_with_override_path(
+            compile_line)
+        return compile_line
+
+    def get_objcopy_step_names(self):
+        names = [
+            name for name, line in self.platform.items()
+            if self.OBJCOPY_STEP_NAME_REGEX.match(name)
+        ]
+        return names
+
+    def get_objcopy_steps(self) -> List[str]:
+        lines = [
+            line for name, line in self.platform.items()
+            if self.OBJCOPY_STEP_NAME_REGEX.match(name)
+        ]
+        lines = [
+            self.replace_compile_binary_with_override_path(line)
+            for line in lines
+        ]
+        return lines
+
+    # TODO(tonymd): These recipes are probably run in sorted order
+    def get_objcopy(self, suffix):
+        # Expected vars:
+        # teensy:
+        #   recipe.objcopy.eep.pattern
+        #   recipe.objcopy.hex.pattern
+
+        pattern = "recipe.objcopy.{}.pattern".format(suffix)
+        objcopy_step_names = self.get_objcopy_step_names()
+
+        objcopy_suffixes = [
+            m[1] for m in [
+                self.OBJCOPY_STEP_NAME_REGEX.match(line)
+                for line in objcopy_step_names
+            ] if m
+        ]
+        if pattern not in objcopy_step_names:
+            _LOG.error("Error: objcopy suffix '%s' does not exist.", suffix)
+            _LOG.error("Valid suffixes: %s", ", ".join(objcopy_suffixes))
+            return sys.exit(1)
+
+        line = self._get_platform_variable(pattern)
+
+        _LOG.debug("ARDUINO_OBJCOPY_%s: %s", suffix, line)
+
+        line = self.replace_compile_binary_with_override_path(line)
+
+        return line
+
+    def get_objcopy_flags(self, suffix):
+        # TODO(tonymd): Possibly teensy specific variables.
+        flags = ""
+        if suffix == "hex":
+            flags = self.platform.get("compiler.elf2hex.flags", "")
+        elif suffix == "bin":
+            flags = self.platform.get("compiler.elf2bin.flags", "")
+        elif suffix == "eep":
+            flags = self.platform.get("compiler.objcopy.eep.flags", "")
+        return flags
+
+    # TODO(tonymd): There are more recipe hooks besides postbuild.
+    #   They are run in sorted order.
+    # TODO(tonymd): Rename this to get_hooks(hook_name, step).
+    # TODO(tonymd): Add a list-hooks and or run-hooks command
+    def get_postbuild_line(self, step_number):
+        line = self.platform["recipe.hooks.postbuild.{}.pattern".format(
+            step_number)]
+        line = self.replace_command_args_with_compiler_override_path(line)
+        return line
+
+    def get_prebuild_steps(self) -> List[str]:
+        # Teensy core uses recipe.hooks.sketch.prebuild.1.pattern
+        # stm32 core uses recipe.hooks.prebuild.1.pattern
+        # TODO(tonymd): STM32 core uses recipe.hooks.prebuild.1.pattern.windows
+        #   (should override non-windows key)
+        lines = [
+            line for name, line in self.platform.items() if re.match(
+                r"^recipe.hooks.(?:sketch.)?prebuild.[^.]+.pattern$", name)
+        ]
+        # TODO(tonymd): Write a function to fetch/replace OS specific patterns
+        #   (ending in an OS string)
+        lines = [
+            self.replace_compile_binary_with_override_path(line)
+            for line in lines
+        ]
+        return lines
+
+    def get_postbuild_steps(self) -> List[str]:
+        lines = [
+            line for name, line in self.platform.items()
+            if re.match(r"^recipe.hooks.postbuild.[^.]+.pattern$", name)
+        ]
+
+        lines = [
+            self.replace_command_args_with_compiler_override_path(line)
+            for line in lines
+        ]
+        return lines
+
+    def get_s_flags(self):
+        compile_line = self.get_s_compile_line()
+        _, compile_line = ArduinoBuilder.split_binary_from_arguments(
+            compile_line)
+        compile_line = compile_line.replace("-c", "", 1)
+        return compile_line.strip()
+
+    def get_c_flags(self):
+        compile_line = self.get_c_compile_line()
+        _, compile_line = ArduinoBuilder.split_binary_from_arguments(
+            compile_line)
+        compile_line = compile_line.replace("-c", "", 1)
+        return compile_line.strip()
+
+    def get_cpp_flags(self):
+        compile_line = self.get_cpp_compile_line()
+        _, compile_line = ArduinoBuilder.split_binary_from_arguments(
+            compile_line)
+        compile_line = compile_line.replace("-c", "", 1)
+        return compile_line.strip()
+
+    def get_ar_flags(self):
+        compile_line = self.get_ar_compile_line()
+        _, compile_line = ArduinoBuilder.split_binary_from_arguments(
+            compile_line)
+        return compile_line.strip()
+
+    def get_ld_flags(self):
+        compile_line = self.get_link_line()
+        _, compile_line = ArduinoBuilder.split_binary_from_arguments(
+            compile_line)
+
+        # TODO(tonymd): This is teensy specific
+        line_to_delete = "-o \"{build.path}/{build.project_name}.elf\" " \
+            "{object_files} \"-L{build.path}\""
+        if self.build_path:
+            line_to_delete = line_to_delete.replace("{build.path}",
+                                                    self.build_path)
+        if self.build_project_name:
+            line_to_delete = line_to_delete.replace("{build.project_name}",
+                                                    self.build_project_name)
+
+        compile_line = compile_line.replace(line_to_delete, "", 1)
+        libs = re.findall(r'(-l[^ ]+ ?)', compile_line)
+        for lib in libs:
+            compile_line = compile_line.replace(lib, "", 1)
+        libs = [lib.strip() for lib in libs]
+
+        return compile_line.strip()
+
+    def get_ld_libs(self, name_only=False):
+        compile_line = self.get_link_line()
+        libs = re.findall(r'(?P<arg>-l(?P<name>[^ ]+) ?)', compile_line)
+        if name_only:
+            libs = [lib_name.strip() for lib_arg, lib_name in libs]
+        else:
+            libs = [lib_arg.strip() for lib_arg, lib_name in libs]
+        return " ".join(libs)
+
+    def library_folders(self):
+        # Arduino library format documentation:
+        # https://arduino.github.io/arduino-cli/library-specification/#layout-of-folders-and-files
+        # - If src folder exists,
+        #   use that as the root include directory -Ilibraries/libname/src
+        # - Else lib folder as root include -Ilibraries/libname
+        #   (exclude source files in the examples folder in this case)
+
+        if not self.library_names or not self.library_path:
+            return []
+
+        library_path = self.library_path
+        folder_patterns = ["*"]
+        if self.library_names:
+            folder_patterns = self.library_names
+
+        library_folders = file_operations.find_files(library_path,
+                                                     folder_patterns,
+                                                     directories_only=True)
+        library_source_root_folders = []
+        for lib in library_folders:
+            lib_dir = os.path.join(library_path, lib)
+            src_dir = os.path.join(lib_dir, "src")
+            if os.path.exists(src_dir) and os.path.isdir(src_dir):
+                library_source_root_folders.append(src_dir)
+            else:
+                library_source_root_folders.append(lib_dir)
+
+        return library_source_root_folders
+
+    def library_include_dirs(self):
+        return [Path(lib).as_posix() for lib in self.library_folders()]
+
+    def library_includes(self):
+        include_args = []
+        library_folders = self.library_folders()
+        for lib_dir in library_folders:
+            include_args.append("-I{}".format(os.path.relpath(lib_dir)))
+        return include_args
+
+    def library_files(self, pattern):
+        sources = []
+        library_folders = self.library_folders()
+        for lib_dir in library_folders:
+            for file_path in file_operations.find_files(lib_dir, [pattern]):
+                if not file_path.startswith("examples"):
+                    sources.append((Path(lib_dir) / file_path).as_posix())
+        return sources
+
+    def library_c_files(self):
+        return self.library_files("**/*.c")
+
+    def library_s_files(self):
+        return self.library_files("**/*.S")
+
+    def library_cpp_files(self):
+        return self.library_files("**/*.cpp")
+
+    def get_core_path(self):
+        return self.board[self.selected_board]["build.core.path"]
+
+    def core_files(self, pattern):
+        sources = []
+        for file_path in file_operations.find_files(self.get_core_path(),
+                                                    [pattern]):
+            sources.append(os.path.join(self.get_core_path(), file_path))
+        return sources
+
+    def core_c_files(self):
+        return self.core_files("**/*.c")
+
+    def core_s_files(self):
+        return self.core_files("**/*.S")
+
+    def core_cpp_files(self):
+        return self.core_files("**/*.cpp")
+
+    def get_variant_path(self):
+        return self.build_variant_path
+
+    def variant_files(self, pattern):
+        sources = []
+        if self.build_variant_path:
+            for file_path in file_operations.find_files(
+                    self.get_variant_path(), [pattern]):
+                sources.append(os.path.join(self.get_variant_path(),
+                                            file_path))
+        return sources
+
+    def variant_c_files(self):
+        return self.variant_files("**/*.c")
+
+    def variant_s_files(self):
+        return self.variant_files("**/*.S")
+
+    def variant_cpp_files(self):
+        return self.variant_files("**/*.cpp")
+
+    def project_files(self, pattern):
+        sources = []
+        for file_path in file_operations.find_files(self.project_path,
+                                                    [pattern]):
+            if not file_path.startswith(
+                    "examples") and not file_path.startswith("libraries"):
+                sources.append(file_path)
+        return sources
+
+    def project_c_files(self):
+        return self.project_files("**/*.c")
+
+    def project_cpp_files(self):
+        return self.project_files("**/*.cpp")
+
+    def project_ino_files(self):
+        return self.project_files("**/*.ino")
diff --git a/pw_arduino_build/py/pw_arduino_build/core_installer.py b/pw_arduino_build/py/pw_arduino_build/core_installer.py
new file mode 100644
index 0000000..5f26834
--- /dev/null
+++ b/pw_arduino_build/py/pw_arduino_build/core_installer.py
@@ -0,0 +1,381 @@
+#!/usr/bin/env python3
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Arduino Core Installer."""
+
+import argparse
+import logging
+import operator
+import os
+import platform
+import shutil
+import stat
+import subprocess
+import sys
+import time
+from typing import Dict, List
+
+import pw_arduino_build.file_operations as file_operations
+
+_LOG = logging.getLogger(__name__)
+
+
+class ArduinoCoreNotSupported(Exception):
+    """Exception raised when a given core can not be installed."""
+
+
+# yapf: disable
+_ARDUINO_CORE_ARTIFACTS: Dict[str, Dict] = {
+    # pylint: disable=line-too-long
+    "teensy": {
+        "Linux": {
+            "arduino-ide": {
+                "url": "https://downloads.arduino.cc/arduino-1.8.13-linux64.tar.xz",
+                "file_name": "arduino-1.8.13-linux64.tar.xz",
+                "sha256": "1b20d0ec850a2a63488009518725f058668bb6cb48c321f82dcf47dc4299b4ad",
+            },
+            "teensyduino": {
+                "url": "https://www.pjrc.com/teensy/td_154-beta4/TeensyduinoInstall.linux64",
+                "file_name": "TeensyduinoInstall.linux64",
+                "sha256": "76c58babb7253b65a33d73d53f3f239c2e2ccf8602c771d69300a67d82723730",
+            },
+        },
+        # TODO(tonymd): Handle 32-bit Linux Install?
+        "Linux32": {
+            "arduino-ide": {
+                "url": "https://downloads.arduino.cc/arduino-1.8.13-linux32.tar.xz",
+                "file_name": "arduino-1.8.13-linux32.tar.xz",
+                "sha256": "",
+            },
+            "teensyduino": {
+                "url": "https://www.pjrc.com/teensy/td_153/TeensyduinoInstall.linux32",
+                "file_name": "TeensyduinoInstall.linux32",
+                "sha256": "",
+            },
+        },
+        # TODO(tonymd): Handle ARM32 (Raspberry Pi) Install?
+        "LinuxARM32": {
+            "arduino-ide": {
+                "url": "https://downloads.arduino.cc/arduino-1.8.13-linuxarm.tar.xz",
+                "file_name": "arduino-1.8.13-linuxarm.tar.xz",
+                "sha256": "",
+            },
+            "teensyduino": {
+                "url": "https://www.pjrc.com/teensy/td_153/TeensyduinoInstall.linuxarm",
+                "file_name": "TeensyduinoInstall.linuxarm",
+                "sha256": "",
+            },
+        },
+        # TODO(tonymd): Handle ARM64 Install?
+        "LinuxARM64": {
+            "arduino-ide": {
+                "url": "https://downloads.arduino.cc/arduino-1.8.13-linuxaarch64.tar.xz",
+                "file_name": "arduino-1.8.13-linuxaarch64.tar.xz",
+                "sha256": "",
+            },
+            "teensyduino": {
+                "url": "https://www.pjrc.com/teensy/td_153/TeensyduinoInstall.linuxaarch64",
+                "file_name": "TeensyduinoInstall.linuxaarch64",
+                "sha256": "",
+            },
+        },
+        "Darwin": {
+            "teensyduino": {
+                "url": "https://www.pjrc.com/teensy/td_154-beta4/Teensyduino_MacOS_Catalina.zip",
+                "file_name": "Teensyduino_MacOS_Catalina.zip",
+                "sha256": "7ca579c12d8f3a8949dbeec812b8dbef13242d575baa707dc7f02bc452c1f4a1",
+            },
+        },
+        "Windows": {
+            "arduino-ide": {
+                "url": "https://downloads.arduino.cc/arduino-1.8.13-windows.zip",
+                "file_name": "arduino-1.8.13-windows.zip",
+                "sha256": "78d3e96827b9e9b31b43e516e601c38d670d29f12483e88cbf6d91a0f89ef524",
+            },
+            "teensyduino": {
+                "url": "https://www.pjrc.com/teensy/td_154-beta4/TeensyduinoInstall.exe",
+                "file_name": "TeensyduinoInstall.exe",
+                "sha256": "f7bcc2ed45e10a5d7b003bedabcde12fb1b8cf7ef9081e2503cd668569642a90",
+            },
+        }
+    },
+    "adafruit-samd": {
+        "all": {
+            "core": {
+                "version": "1.6.2",
+                "url": "https://github.com/adafruit/ArduinoCore-samd/archive/1.6.2.tar.gz",
+                "sha256": "5875f5bc05904c10e6313f02653f28f2f716db639d3d43f5a1d8a83d15339d64",
+            }
+        },
+        "Linux": {},
+        "Darwin": {},
+        "Windows": {},
+    },
+    "arduino-samd": {
+        "all": {
+            "core": {
+                "version": "1.8.8",
+                "url": "http://downloads.arduino.cc/cores/samd-1.8.8.tar.bz2",
+                "file_name": "samd-1.8.8.tar.bz2",
+                "sha256": "7b93eb705cba9125d9ee52eba09b51fb5fe34520ada351508f4253abbc9f27fa",
+            }
+        },
+        "Linux": {},
+        "Darwin": {},
+        "Windows": {},
+    },
+    "stm32duino": {
+        "all": {
+            "core": {
+                "version": "1.9.0",
+                "url": "https://github.com/stm32duino/Arduino_Core_STM32/archive/1.9.0.tar.gz",
+                "sha256": "4f75ba7a117d90392e8f67c58d31d22393749b9cdd3279bc21e7261ec06c62bf",
+            }
+        },
+        "Linux": {},
+        "Darwin": {},
+        "Windows": {},
+    },
+}
+# yapf: enable
+
+
+def install_core_command(args: argparse.Namespace):
+    install_prefix = os.path.realpath(
+        os.path.expanduser(os.path.expandvars(args.prefix)))
+    install_dir = os.path.join(install_prefix, args.core_name)
+    cache_dir = os.path.join(install_prefix, ".cache", args.core_name)
+
+    if args.core_name in supported_cores():
+        shutil.rmtree(install_dir, ignore_errors=True)
+        os.makedirs(install_dir, exist_ok=True)
+        os.makedirs(cache_dir, exist_ok=True)
+
+    if args.core_name == "teensy":
+        if platform.system() == "Linux":
+            install_teensy_core_linux(install_prefix, install_dir, cache_dir)
+        elif platform.system() == "Darwin":
+            install_teensy_core_mac(install_prefix, install_dir, cache_dir)
+        elif platform.system() == "Windows":
+            install_teensy_core_windows(install_prefix, install_dir, cache_dir)
+    elif args.core_name == "adafruit-samd":
+        install_adafruit_samd_core(install_prefix, install_dir, cache_dir)
+    elif args.core_name == "stm32duino":
+        install_stm32duino_core(install_prefix, install_dir, cache_dir)
+    elif args.core_name == "arduino-samd":
+        install_arduino_samd_core(install_prefix, install_dir, cache_dir)
+    else:
+        raise ArduinoCoreNotSupported(
+            "Invalid core '{}'. Supported cores: {}".format(
+                args.core_name, ", ".join(supported_cores())))
+
+
+def supported_cores():
+    return _ARDUINO_CORE_ARTIFACTS.keys()
+
+
+def get_windows_process_names() -> List[str]:
+    result = subprocess.run("wmic process get description",
+                            capture_output=True)
+    output = result.stdout.decode().splitlines()
+    return [line.strip() for line in output if line]
+
+
+def install_teensy_core_windows(install_prefix, install_dir, cache_dir):
+    """Download and install Teensyduino artifacts for Windows."""
+    teensy_artifacts = _ARDUINO_CORE_ARTIFACTS["teensy"][platform.system()]
+
+    arduino_artifact = teensy_artifacts["arduino-ide"]
+    arduino_zipfile = file_operations.download_to_cache(
+        url=arduino_artifact["url"],
+        expected_sha256sum=arduino_artifact["sha256"],
+        cache_directory=cache_dir)
+
+    teensyduino_artifact = teensy_artifacts["teensyduino"]
+    teensyduino_installer = file_operations.download_to_cache(
+        url=teensyduino_artifact["url"],
+        expected_sha256sum=teensyduino_artifact["sha256"],
+        cache_directory=cache_dir)
+
+    file_operations.extract_archive(arduino_zipfile, install_dir, cache_dir)
+
+    # "teensy" here should match args.core_name
+    teensy_core_dir = os.path.join(install_prefix, "teensy")
+
+    # Change working directory for installation
+    original_working_dir = os.getcwd()
+    os.chdir(install_prefix)
+
+    install_command = [teensyduino_installer, "--dir=teensy"]
+    _LOG.info("  Running: %s", " ".join(install_command))
+    _LOG.info("    Please click yes on the Windows 'User Account Control' "
+              "dialog.")
+    _LOG.info("    You should see: 'Verified publisher: PRJC.COM LLC'")
+
+    def wait_for_process(process_name,
+                         timeout=30,
+                         result_operator=operator.truth):
+        start_time = time.time()
+        while result_operator(process_name in get_windows_process_names()):
+            time.sleep(1)
+            if time.time() > start_time + timeout:
+                _LOG.error(
+                    "Error: Installation Failed.\n"
+                    "Please click yes on the Windows 'User Account Control' "
+                    "dialog.")
+                sys.exit(1)
+
+    # Run Teensyduino installer with admin rights (non-blocking)
+    # User Account Control (UAC) will prompt the user for consent
+    import ctypes  # pylint: disable=import-outside-toplevel
+    ctypes.windll.shell32.ShellExecuteW(
+        None,  # parent window handle
+        "runas",  # operation
+        teensyduino_installer,  # file to run
+        subprocess.list2cmdline(install_command),  # command parameters
+        install_prefix,  # working directory
+        1)  # Display mode (SW_SHOWNORMAL: Activates and displays a window)
+
+    # Wait for teensyduino_installer to start running
+    wait_for_process("TeensyduinoInstall.exe", result_operator=operator.not_)
+
+    _LOG.info("Waiting for TeensyduinoInstall.exe to finish.")
+    # Wait till teensyduino_installer is finished
+    wait_for_process("TeensyduinoInstall.exe", timeout=360)
+
+    if not os.path.exists(os.path.join(teensy_core_dir, "hardware", "teensy")):
+        _LOG.error(
+            "Error: Installation Failed.\n"
+            "Please try again and ensure Teensyduino is installed in "
+            "the folder:\n"
+            "%s", teensy_core_dir)
+        sys.exit(1)
+    else:
+        _LOG.info("Install complete!")
+
+    file_operations.remove_empty_directories(install_dir)
+    os.chdir(original_working_dir)
+
+
+def install_teensy_core_mac(unused_install_prefix, install_dir, cache_dir):
+    """Download and install Teensyduino artifacts for Mac."""
+    teensy_artifacts = _ARDUINO_CORE_ARTIFACTS["teensy"][platform.system()]
+
+    teensyduino_artifact = teensy_artifacts["teensyduino"]
+    teensyduino_zip = file_operations.download_to_cache(
+        url=teensyduino_artifact["url"],
+        expected_sha256sum=teensyduino_artifact["sha256"],
+        cache_directory=cache_dir)
+
+    extracted_files = file_operations.extract_archive(
+        teensyduino_zip,
+        install_dir,
+        cache_dir,
+        remove_single_toplevel_folder=False)
+    toplevel_folder = sorted(extracted_files)[0]
+    os.symlink(os.path.join(toplevel_folder, "Contents", "Java", "hardware"),
+               os.path.join(install_dir, "hardware"),
+               target_is_directory=True)
+
+
+def install_teensy_core_linux(install_prefix, install_dir, cache_dir):
+    """Download and install Teensyduino artifacts for Windows."""
+    teensy_artifacts = _ARDUINO_CORE_ARTIFACTS["teensy"][platform.system()]
+
+    arduino_artifact = teensy_artifacts["arduino-ide"]
+    arduino_tarfile = file_operations.download_to_cache(
+        url=arduino_artifact["url"],
+        expected_sha256sum=arduino_artifact["sha256"],
+        cache_directory=cache_dir)
+
+    teensyduino_artifact = teensy_artifacts["teensyduino"]
+    teensyduino_installer = file_operations.download_to_cache(
+        url=teensyduino_artifact["url"],
+        expected_sha256sum=teensyduino_artifact["sha256"],
+        cache_directory=cache_dir)
+
+    file_operations.extract_archive(arduino_tarfile, install_dir, cache_dir)
+    os.chmod(teensyduino_installer,
+             os.stat(teensyduino_installer).st_mode | stat.S_IEXEC)
+
+    original_working_dir = os.getcwd()
+    os.chdir(install_prefix)
+    # "teensy" here should match args.core_name
+    install_command = [teensyduino_installer, "--dir=teensy"]
+    subprocess.run(install_command)
+
+    file_operations.remove_empty_directories(install_dir)
+    os.chdir(original_working_dir)
+
+
+def install_arduino_samd_core(install_prefix: str, install_dir: str,
+                              cache_dir: str):
+    artifacts = _ARDUINO_CORE_ARTIFACTS["arduino-samd"]["all"]["core"]
+    core_tarfile = file_operations.download_to_cache(
+        url=artifacts["url"],
+        expected_sha256sum=artifacts["sha256"],
+        cache_directory=cache_dir)
+
+    package_path = os.path.join(install_dir, "hardware", "samd",
+                                artifacts["version"])
+    os.makedirs(package_path, exist_ok=True)
+    file_operations.extract_archive(core_tarfile, package_path, cache_dir)
+    original_working_dir = os.getcwd()
+    os.chdir(install_prefix)
+    # TODO(tonymd): Fetch core/tools as specified by:
+    # http://downloads.arduino.cc/packages/package_index.json
+    os.chdir(original_working_dir)
+    return True
+
+
+def install_adafruit_samd_core(install_prefix: str, install_dir: str,
+                               cache_dir: str):
+    artifacts = _ARDUINO_CORE_ARTIFACTS["adafruit-samd"]["all"]["core"]
+    core_tarfile = file_operations.download_to_cache(
+        url=artifacts["url"],
+        expected_sha256sum=artifacts["sha256"],
+        cache_directory=cache_dir)
+
+    package_path = os.path.join(install_dir, "hardware", "samd",
+                                artifacts["version"])
+    os.makedirs(package_path, exist_ok=True)
+    file_operations.extract_archive(core_tarfile, package_path, cache_dir)
+
+    original_working_dir = os.getcwd()
+    os.chdir(install_prefix)
+    # TODO(tonymd): Fetch platform specific tools as specified by:
+    # https://adafruit.github.io/arduino-board-index/package_adafruit_index.json
+    # Specifically:
+    #   https://github.com/ARM-software/CMSIS_5/archive/5.4.0.tar.gz
+    os.chdir(original_working_dir)
+    return True
+
+
+def install_stm32duino_core(install_prefix, install_dir, cache_dir):
+    artifacts = _ARDUINO_CORE_ARTIFACTS["stm32duino"]["all"]["core"]
+    core_tarfile = file_operations.download_to_cache(
+        url=artifacts["url"],
+        expected_sha256sum=artifacts["sha256"],
+        cache_directory=cache_dir)
+
+    package_path = os.path.join(install_dir, "hardware", "stm32",
+                                artifacts["version"])
+    os.makedirs(package_path, exist_ok=True)
+    file_operations.extract_archive(core_tarfile, package_path, cache_dir)
+    original_working_dir = os.getcwd()
+    os.chdir(install_prefix)
+    # TODO(tonymd): Fetch platform specific tools as specified by:
+    # https://github.com/stm32duino/BoardManagerFiles/raw/master/STM32/package_stm_index.json
+    os.chdir(original_working_dir)
+    return True
diff --git a/pw_arduino_build/py/pw_arduino_build/file_operations.py b/pw_arduino_build/py/pw_arduino_build/file_operations.py
new file mode 100644
index 0000000..61b728d
--- /dev/null
+++ b/pw_arduino_build/py/pw_arduino_build/file_operations.py
@@ -0,0 +1,212 @@
+#!/usr/bin/env python3
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""File Helper Functions."""
+
+import glob
+import hashlib
+import json
+import logging
+import os
+import shutil
+import sys
+import tarfile
+import urllib.request
+import zipfile
+from pathlib import Path
+from typing import List
+
+_LOG = logging.getLogger(__name__)
+
+
+class InvalidChecksumError(Exception):
+    pass
+
+
+def find_files(starting_dir: str,
+               patterns: List[str],
+               directories_only=False) -> List[str]:
+    original_working_dir = os.getcwd()
+    if not (os.path.exists(starting_dir) and os.path.isdir(starting_dir)):
+        raise FileNotFoundError(
+            "Directory '{}' does not exist.".format(starting_dir))
+
+    os.chdir(starting_dir)
+    files = []
+    for pattern in patterns:
+        for file_path in glob.glob(pattern, recursive=True):
+            if not directories_only or (directories_only
+                                        and os.path.isdir(file_path)):
+                files.append(file_path)
+    os.chdir(original_working_dir)
+    return sorted(files)
+
+
+def sha256_sum(file_name):
+    hash_sha256 = hashlib.sha256()
+    with open(file_name, "rb") as file_handle:
+        for chunk in iter(lambda: file_handle.read(4096), b""):
+            hash_sha256.update(chunk)
+    return hash_sha256.hexdigest()
+
+
+def md5_sum(file_name):
+    hash_md5 = hashlib.md5()
+    with open(file_name, "rb") as file_handle:
+        for chunk in iter(lambda: file_handle.read(4096), b""):
+            hash_md5.update(chunk)
+    return hash_md5.hexdigest()
+
+
+def verify_file_checksum(file_path,
+                         expected_checksum,
+                         sum_function=sha256_sum):
+    downloaded_checksum = sum_function(file_path)
+    if downloaded_checksum != expected_checksum:
+        raise InvalidChecksumError(
+            f"Invalid {sum_function.__name__}\n"
+            f"{downloaded_checksum} {os.path.basename(file_path)}\n"
+            f"{expected_checksum} (expected)")
+
+    _LOG.debug("  %s:", sum_function.__name__)
+    _LOG.debug("  %s %s", downloaded_checksum, os.path.basename(file_path))
+    return True
+
+
+def download_to_cache(url: str,
+                      expected_md5sum=None,
+                      expected_sha256sum=None,
+                      cache_directory=".cache") -> str:
+
+    cache_dir = os.path.realpath(
+        os.path.expanduser(os.path.expandvars(cache_directory)))
+    downloaded_file = os.path.join(cache_dir, url.split("/")[-1])
+
+    if not os.path.exists(downloaded_file):
+        _LOG.info("Downloading: %s", url)
+        _LOG.info("Please wait...")
+        urllib.request.urlretrieve(url, filename=downloaded_file)
+
+    if os.path.exists(downloaded_file):
+        _LOG.info("Downloaded: %s",
+                  Path(downloaded_file).relative_to(os.getcwd()))
+        if expected_sha256sum:
+            verify_file_checksum(downloaded_file,
+                                 expected_sha256sum,
+                                 sum_function=sha256_sum)
+        elif expected_md5sum:
+            verify_file_checksum(downloaded_file,
+                                 expected_md5sum,
+                                 sum_function=md5_sum)
+
+    return downloaded_file
+
+
+def extract_zipfile(archive_file: str, dest_dir: str):
+    """Extract a zipfile preseving permissions."""
+    destination_path = Path(dest_dir)
+    with zipfile.ZipFile(archive_file) as archive:
+        for info in archive.infolist():
+            archive.extract(info.filename, path=dest_dir)
+            permissions = info.external_attr >> 16
+            out_path = destination_path / info.filename
+            out_path.chmod(permissions)
+
+
+def extract_tarfile(archive_file: str, dest_dir: str):
+    with tarfile.open(archive_file, 'r') as archive:
+        archive.extractall(path=dest_dir)
+
+
+def extract_archive(archive_file: str,
+                    dest_dir: str,
+                    cache_dir: str,
+                    remove_single_toplevel_folder=True):
+    """Extract a tar or zip file.
+
+    Args:
+        archive_file (str): Absolute path to the archive file.
+        dest_dir (str): Extraction destination directory.
+        cache_dir (str): Directory where temp files can be created.
+        remove_single_toplevel_folder (bool): If the archive contains only a
+            single folder move the contents of that into the destination
+            directory.
+    """
+    # Make a temporary directory to extract files into
+    temp_extract_dir = os.path.join(cache_dir,
+                                    "." + os.path.basename(archive_file))
+    os.makedirs(temp_extract_dir, exist_ok=True)
+
+    _LOG.info("Extracting: %s", Path(archive_file).relative_to(os.getcwd()))
+    if zipfile.is_zipfile(archive_file):
+        extract_zipfile(archive_file, temp_extract_dir)
+    elif tarfile.is_tarfile(archive_file):
+        extract_tarfile(archive_file, temp_extract_dir)
+    else:
+        _LOG.error("Unknown archive format: %s", archive_file)
+        return sys.exit(1)
+
+    _LOG.info("Installing into: %s", Path(dest_dir).relative_to(os.getcwd()))
+    path_to_extracted_files = temp_extract_dir
+
+    extracted_top_level_files = os.listdir(temp_extract_dir)
+    # Check if tarfile has only one folder
+    # If yes, make that the new path_to_extracted_files
+    if remove_single_toplevel_folder and len(extracted_top_level_files) == 1:
+        path_to_extracted_files = os.path.join(temp_extract_dir,
+                                               extracted_top_level_files[0])
+
+    # Move extracted files to dest_dir
+    extracted_files = os.listdir(path_to_extracted_files)
+    for file_name in extracted_files:
+        source_file = os.path.join(path_to_extracted_files, file_name)
+        dest_file = os.path.join(dest_dir, file_name)
+        shutil.move(source_file, dest_file)
+
+    # rm -rf temp_extract_dir
+    shutil.rmtree(temp_extract_dir, ignore_errors=True)
+
+    # Return List of extracted files
+    return list(Path(dest_dir).rglob("*"))
+
+
+def remove_empty_directories(directory):
+    """Recursively remove empty directories."""
+
+    for path in sorted(Path(directory).rglob("*"), reverse=True):
+        # If broken symlink
+        if path.is_symlink() and not path.exists():
+            path.unlink()
+        # if empty directory
+        elif path.is_dir() and len(os.listdir(path)) == 0:
+            path.rmdir()
+
+
+def decode_file_json(file_name):
+    """Decode JSON values from a file.
+
+    Does not raise an error if the file cannot be decoded."""
+
+    # Get absolute path to the file.
+    file_path = os.path.realpath(
+        os.path.expanduser(os.path.expandvars(file_name)))
+
+    json_file_options = {}
+    try:
+        with open(file_path, "r") as jfile:
+            json_file_options = json.loads(jfile.read())
+    except (FileNotFoundError, json.JSONDecodeError):
+        _LOG.warning("Unable to read file '%s'", file_path)
+
+    return json_file_options, file_path
diff --git a/pw_arduino_build/py/pw_arduino_build/log.py b/pw_arduino_build/py/pw_arduino_build/log.py
new file mode 100644
index 0000000..d5afb30
--- /dev/null
+++ b/pw_arduino_build/py/pw_arduino_build/log.py
@@ -0,0 +1,43 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Configure the system logger for the default pw command log format."""
+
+import logging
+
+_LOG = logging.getLogger(__name__)
+_STDERR_HANDLER = logging.StreamHandler()
+
+
+def install(level: int = logging.INFO) -> None:
+    """Configure the system logger for the arduino_builder log format."""
+
+    try:
+        import pw_cli.log  # pylint: disable=import-outside-toplevel
+        pw_cli.log.install(level=level)
+    except ImportError:
+        # Set log level on root logger to debug, otherwise any higher levels
+        # elsewhere are ignored.
+        root = logging.getLogger()
+        root.setLevel(logging.DEBUG)
+
+        _STDERR_HANDLER.setLevel(level)
+        _STDERR_HANDLER.setFormatter(
+            logging.Formatter("[%(asctime)s] "
+                              "%(levelname)s %(message)s", "%Y%m%d %H:%M:%S"))
+        root.addHandler(_STDERR_HANDLER)
+
+
+def set_level(log_level: int):
+    """Sets the log level for logs to stderr."""
+    _STDERR_HANDLER.setLevel(log_level)
diff --git a/pw_arduino_build/py/pw_arduino_build/py.typed b/pw_arduino_build/py/pw_arduino_build/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_arduino_build/py/pw_arduino_build/py.typed
diff --git a/pw_arduino_build/py/pw_arduino_build/teensy_detector.py b/pw_arduino_build/py/pw_arduino_build/teensy_detector.py
new file mode 100644
index 0000000..74e9892
--- /dev/null
+++ b/pw_arduino_build/py/pw_arduino_build/teensy_detector.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python3
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Detects attached Teensy boards connected via usb."""
+
+import logging
+import re
+import subprocess
+import typing
+
+from pathlib import Path
+from typing import List
+
+import pw_arduino_build.log
+
+_LOG = logging.getLogger('teensy_detector')
+
+
+class UnknownArduinoCore(Exception):
+    """Exception raised when a given core can not be found."""
+
+
+def log_subprocess_output(level, output):
+    """Logs subprocess output line-by-line."""
+
+    lines = output.decode('utf-8', errors='replace').splitlines()
+    for line in lines:
+        _LOG.log(level, line)
+
+
+class BoardInfo(typing.NamedTuple):
+    """Information about a connected dev board."""
+    dev_name: str
+    usb_device_path: str
+    protocol: str
+    label: str
+    arduino_upload_tool_name: str
+
+    def test_runner_args(self) -> List[str]:
+        return [
+            "--set-variable", f"serial.port.protocol={self.protocol}",
+            "--set-variable", f"serial.port={self.usb_device_path}",
+            "--set-variable", f"serial.port.label={self.dev_name}"
+        ]
+
+
+def detect_boards(arduino_package_path=False) -> list:
+    """Detect attached boards, returning a list of Board objects."""
+
+    teensy_core = Path()
+    if arduino_package_path:
+        teensy_core = Path(arduino_package_path)
+    else:
+        teensy_core = Path("third_party/arduino/cores/teensy")
+        if not teensy_core.exists():
+            teensy_core = Path(
+                "third_party/pigweed/third_party/arduino/cores/teensy")
+
+    if not teensy_core.exists():
+        raise UnknownArduinoCore
+
+    teensy_device_line_regex = re.compile(
+        r"^(?P<address>[^ ]+) (?P<dev_name>[^ ]+) "
+        r"\((?P<label>[^)]+)\) ?(?P<rest>.*)$")
+
+    boards = []
+    detect_command = [(teensy_core / "hardware" / "tools" /
+                       "teensy_ports").absolute().as_posix(), "-L"]
+
+    # TODO(tonymd): teensy_ports -L on windows does not return the right port
+    # string Example:
+    #
+    #   $ teensy_ports -L
+    #   Port_#0001.Hub_#0003 COM3 (Teensy 3.6) Serial
+    #
+    # So we get "-port=Port_#0001.Hub_#0003"
+    # But it should be "-port=usb:0/140000/0/1"
+
+    process = subprocess.run(detect_command,
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.STDOUT)
+    if process.returncode != 0:
+        _LOG.error("Command failed with exit code %d.", process.returncode)
+        _LOG.error("Full command:")
+        _LOG.error("")
+        _LOG.error("  %s", " ".join(detect_command))
+        _LOG.error("")
+        _LOG.error("Process output:")
+        log_subprocess_output(logging.ERROR, process.stdout)
+        _LOG.error('')
+    for line in process.stdout.decode("utf-8", errors="replace").splitlines():
+        device_match_result = teensy_device_line_regex.match(line)
+        if device_match_result:
+            teensy_device = device_match_result.groupdict()
+            boards.append(
+                BoardInfo(dev_name=teensy_device["dev_name"],
+                          usb_device_path=teensy_device["address"],
+                          protocol="Teensy",
+                          label=teensy_device["label"],
+                          arduino_upload_tool_name="teensyloader"))
+    return boards
+
+
+def main():
+    """This detects and then displays all attached discovery boards."""
+
+    pw_arduino_build.log.install(logging.INFO)
+
+    boards = detect_boards()
+    if not boards:
+        _LOG.info("No attached boards detected")
+    for idx, board in enumerate(boards):
+        _LOG.info("Board %d:", idx)
+        _LOG.info("  - Name: %s", board.label)
+        _LOG.info("  - Port: %s", board.dev_name)
+        _LOG.info("  - Address: %s", board.usb_device_path)
+        _LOG.info("  - Test runner args: %s",
+                  " ".join(board.test_runner_args()))
+
+
+if __name__ == "__main__":
+    main()
diff --git a/pw_arduino_build/py/pw_arduino_build/unit_test_client.py b/pw_arduino_build/py/pw_arduino_build/unit_test_client.py
new file mode 100755
index 0000000..38227f5
--- /dev/null
+++ b/pw_arduino_build/py/pw_arduino_build/unit_test_client.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Launch a pw_target_runner client that sends a test request."""
+
+import argparse
+import subprocess
+import sys
+from typing import Optional
+
+_TARGET_CLIENT_COMMAND = 'pw_target_runner_client'
+
+
+def parse_args():
+    """Parses command-line arguments."""
+
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument('binary', help='The target test binary to run')
+    parser.add_argument('--server-port',
+                        type=int,
+                        default=8081,
+                        help='Port the test server is located on')
+    parser.add_argument('runner_args',
+                        nargs=argparse.REMAINDER,
+                        help='Arguments to forward to the test runner')
+
+    return parser.parse_args()
+
+
+def launch_client(binary: str, server_port: Optional[int]) -> int:
+    """Sends a test request to the specified server port."""
+    cmd = [_TARGET_CLIENT_COMMAND, '-binary', binary]
+
+    if server_port is not None:
+        cmd.extend(['-port', str(server_port)])
+
+    return subprocess.call(cmd)
+
+
+def main() -> int:
+    """Launch a test by sending a request to a pw_target_runner_server."""
+    args = parse_args()
+    return launch_client(args.binary, args.server_port)
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/pw_arduino_build/py/pw_arduino_build/unit_test_runner.py b/pw_arduino_build/py/pw_arduino_build/unit_test_runner.py
new file mode 100755
index 0000000..4fa6011
--- /dev/null
+++ b/pw_arduino_build/py/pw_arduino_build/unit_test_runner.py
@@ -0,0 +1,368 @@
+#!/usr/bin/env python3
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""This script flashes and runs unit tests onto Arduino boards."""
+
+import argparse
+import logging
+import os
+import platform
+import re
+import subprocess
+import sys
+import time
+from pathlib import Path
+from typing import List
+
+import serial  # type: ignore
+import serial.tools.list_ports  # type: ignore
+import pw_arduino_build.log
+from pw_arduino_build import teensy_detector
+from pw_arduino_build.file_operations import decode_file_json
+
+_LOG = logging.getLogger('unit_test_runner')
+
+# Verification of test pass/failure depends on these strings. If the formatting
+# or output of the simple_printing_event_handler changes, this may need to be
+# updated.
+_TESTS_STARTING_STRING = b'[==========] Running all tests.'
+_TESTS_DONE_STRING = b'[==========] Done running all tests.'
+_TEST_FAILURE_STRING = b'[  FAILED  ]'
+
+# How long to wait for the first byte of a test to be emitted. This is longer
+# than the user-configurable timeout as there's a delay while the device is
+# flashed.
+_FLASH_TIMEOUT = 5.0
+
+
+class TestingFailure(Exception):
+    """A simple exception to be raised when a testing step fails."""
+
+
+class DeviceNotFound(Exception):
+    """A simple exception to be raised when unable to connect to a device."""
+
+
+class ArduinoCoreNotSupported(Exception):
+    """Exception raised when a given core does not support unit testing."""
+
+
+def valid_file_name(arg):
+    file_path = Path(os.path.expandvars(arg)).absolute()
+    if not file_path.is_file():
+        raise argparse.ArgumentTypeError(f"'{arg}' does not exist.")
+    return file_path
+
+
+def parse_args():
+    """Parses command-line arguments."""
+
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument('binary',
+                        help='The target test binary to run',
+                        type=valid_file_name)
+    parser.add_argument('--port',
+                        help='The name of the serial port to connect to when '
+                        'running tests')
+    parser.add_argument('--baud',
+                        type=int,
+                        default=115200,
+                        help='Target baud rate to use for serial communication'
+                        ' with target device')
+    parser.add_argument('--test-timeout',
+                        type=float,
+                        default=5.0,
+                        help='Maximum communication delay in seconds before a '
+                        'test is considered unresponsive and aborted')
+    parser.add_argument('--verbose',
+                        '-v',
+                        dest='verbose',
+                        action='store_true',
+                        help='Output additional logs as the script runs')
+
+    parser.add_argument('--flash-only',
+                        action='store_true',
+                        help="Don't check for test output after flashing.")
+
+    # arduino_builder arguments
+    # TODO(tonymd): Get these args from __main__.py or elsewhere.
+    parser.add_argument("-c",
+                        "--config-file",
+                        required=True,
+                        help="Path to a config file.")
+    parser.add_argument("--arduino-package-path",
+                        help="Path to the arduino IDE install location.")
+    parser.add_argument("--arduino-package-name",
+                        help="Name of the Arduino board package to use.")
+    parser.add_argument("--compiler-path-override",
+                        help="Path to arm-none-eabi-gcc bin folder. "
+                        "Default: Arduino core specified gcc")
+    parser.add_argument("--board", help="Name of the Arduino board to use.")
+    parser.add_argument("--upload-tool",
+                        required=True,
+                        help="Name of the Arduino upload tool to use.")
+    parser.add_argument("--set-variable",
+                        action="append",
+                        metavar='some.variable=NEW_VALUE',
+                        help="Override an Arduino recipe variable. May be "
+                        "specified multiple times. For example: "
+                        "--set-variable 'serial.port.label=/dev/ttyACM0' "
+                        "--set-variable 'serial.port.protocol=Teensy'")
+    return parser.parse_args()
+
+
+def log_subprocess_output(level, output):
+    """Logs subprocess output line-by-line."""
+
+    lines = output.decode('utf-8', errors='replace').splitlines()
+    for line in lines:
+        _LOG.log(level, line)
+
+
+def read_serial(port, baud_rate, test_timeout) -> bytes:
+    """Reads lines from a serial port until a line read times out.
+
+    Returns bytes object containing the read serial data.
+    """
+
+    serial_data = bytearray()
+    device = serial.Serial(baudrate=baud_rate,
+                           port=port,
+                           timeout=_FLASH_TIMEOUT)
+    if not device.is_open:
+        raise TestingFailure('Failed to open device')
+
+    # Flush input buffer and reset the device to begin the test.
+    device.reset_input_buffer()
+
+    # Block and wait for the first byte.
+    serial_data += device.read()
+    if not serial_data:
+        raise TestingFailure('Device not producing output')
+
+    device.timeout = test_timeout
+
+    # Read with a reasonable timeout until we stop getting characters.
+    while True:
+        bytes_read = device.readline()
+        if not bytes_read:
+            break
+        serial_data += bytes_read
+        if serial_data.rfind(_TESTS_DONE_STRING) != -1:
+            # Set to much more aggressive timeout since the last one or two
+            # lines should print out immediately. (one line if all fails or all
+            # passes, two lines if mixed.)
+            device.timeout = 0.01
+
+    # Remove carriage returns.
+    serial_data = serial_data.replace(b'\r', b'')
+
+    # Try to trim captured results to only contain most recent test run.
+    test_start_index = serial_data.rfind(_TESTS_STARTING_STRING)
+    return serial_data if test_start_index == -1 else serial_data[
+        test_start_index:]
+
+
+def wait_for_port(port):
+    """Wait for the serial port to be available."""
+    while port not in [sp.device for sp in serial.tools.list_ports.comports()]:
+        time.sleep(1)
+
+
+def flash_device(test_runner_args, upload_tool):
+    """Flash binary to a connected device using the provided configuration."""
+
+    # TODO(tonymd): Create a library function to call rather than launching
+    # the arduino_builder script.
+    flash_tool = 'arduino_builder'
+    cmd = [flash_tool, "--quiet"] + test_runner_args + [
+        "--run-objcopy", "--run-postbuilds", "--run-upload", upload_tool
+    ]
+    _LOG.info('Flashing firmware to device')
+    _LOG.debug('Running: %s', " ".join(cmd))
+
+    env = os.environ.copy()
+    process = subprocess.run(cmd,
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.STDOUT,
+                             env=env)
+    if process.returncode:
+        log_subprocess_output(logging.ERROR, process.stdout)
+        raise TestingFailure('Failed to flash target device')
+
+    log_subprocess_output(logging.DEBUG, process.stdout)
+
+    _LOG.debug('Successfully flashed firmware to device')
+
+
+def handle_test_results(test_output):
+    """Parses test output to determine whether tests passed or failed."""
+
+    if test_output.find(_TESTS_STARTING_STRING) == -1:
+        raise TestingFailure('Failed to find test start')
+
+    if test_output.rfind(_TESTS_DONE_STRING) == -1:
+        log_subprocess_output(logging.INFO, test_output)
+        raise TestingFailure('Tests did not complete')
+
+    if test_output.rfind(_TEST_FAILURE_STRING) != -1:
+        log_subprocess_output(logging.INFO, test_output)
+        raise TestingFailure('Test suite had one or more failures')
+
+    log_subprocess_output(logging.DEBUG, test_output)
+
+    _LOG.info('Test passed!')
+
+
+def run_device_test(binary, flash_only, port, baud, test_timeout, upload_tool,
+                    arduino_package_path, test_runner_args) -> bool:
+    """Flashes, runs, and checks an on-device test binary.
+
+    Returns true on test pass.
+    """
+    if test_runner_args is None:
+        test_runner_args = []
+
+    if "teensy" not in arduino_package_path:
+        raise ArduinoCoreNotSupported(arduino_package_path)
+
+    if port is None or "--set-variable" not in test_runner_args:
+        _LOG.debug('Attempting to automatically detect dev board')
+        boards = teensy_detector.detect_boards(arduino_package_path)
+        if not boards:
+            error = 'Could not find an attached device'
+            _LOG.error(error)
+            raise DeviceNotFound(error)
+        test_runner_args += boards[0].test_runner_args()
+        upload_tool = boards[0].arduino_upload_tool_name
+        if port is None:
+            port = boards[0].dev_name
+
+    # TODO(tonymd): Remove this when teensy_ports is working in teensy_detector
+    if platform.system() == "Windows":
+        # Delete the incorrect serial port.
+        index_of_port = [
+            i for i, l in enumerate(test_runner_args)
+            if l.startswith('serial.port=')
+        ]
+        if index_of_port:
+            # Delete the '--set-variable' arg
+            del test_runner_args[index_of_port[0] - 1]
+            # Delete the 'serial.port=*' arg
+            del test_runner_args[index_of_port[0] - 1]
+
+    _LOG.debug('Launching test binary %s', binary)
+    try:
+        result: List[bytes] = []
+        _LOG.info('Running test')
+        # Warning: A race condition is possible here. This assumes the host is
+        # able to connect to the port and that there isn't a test running on
+        # this serial port.
+        flash_device(test_runner_args, upload_tool)
+        wait_for_port(port)
+        if flash_only:
+            return True
+        result.append(read_serial(port, baud, test_timeout))
+        if result:
+            handle_test_results(result[0])
+    except TestingFailure as err:
+        _LOG.error(err)
+        return False
+
+    return True
+
+
+def get_option(key, config_file_values, args, required=False):
+    command_line_option = getattr(args, key, None)
+    final_option = config_file_values.get(key, command_line_option)
+    if required and command_line_option is None and final_option is None:
+        # Print a similar error message to argparse
+        executable = os.path.basename(sys.argv[0])
+        option = "--" + key.replace("_", "-")
+        print(f"{executable}: error: the following arguments are required: "
+              f"{option}")
+        sys.exit(1)
+    return final_option
+
+
+def main():
+    """Set up runner, and then flash/run device test."""
+    args = parse_args()
+
+    json_file_options, unused_config_path = decode_file_json(args.config_file)
+
+    log_level = logging.DEBUG if args.verbose else logging.INFO
+    pw_arduino_build.log.install(log_level)
+
+    # Construct arduino_builder flash arguments for a given .elf binary.
+    arduino_package_path = get_option("arduino_package_path",
+                                      json_file_options,
+                                      args,
+                                      required=True)
+    # Arduino core args.
+    arduino_builder_args = [
+        "--arduino-package-path",
+        arduino_package_path,
+        "--arduino-package-name",
+        get_option("arduino_package_name",
+                   json_file_options,
+                   args,
+                   required=True),
+    ]
+
+    # Use CIPD installed compilers.
+    compiler_path_override = get_option("compiler_path_override",
+                                        json_file_options, args)
+    if compiler_path_override:
+        arduino_builder_args += [
+            "--compiler-path-override", compiler_path_override
+        ]
+
+    # Run subcommand with board selection arg.
+    arduino_builder_args += [
+        "run", "--board",
+        get_option("board", json_file_options, args, required=True)
+    ]
+
+    # .elf file location args.
+    binary = args.binary
+    build_path = binary.parent.as_posix()
+    arduino_builder_args += ["--build-path", build_path]
+    build_project_name = binary.name
+    # Remove '.elf' extension.
+    match_result = re.match(r'(.*?)\.elf$', binary.name, re.IGNORECASE)
+    if match_result:
+        build_project_name = match_result[1]
+        arduino_builder_args += ["--build-project-name", build_project_name]
+
+    # USB port is passed to arduino_builder_args via --set-variable args.
+    if args.set_variable:
+        for var in args.set_variable:
+            arduino_builder_args += ["--set-variable", var]
+
+    if run_device_test(binary.as_posix(),
+                       args.flash_only,
+                       args.port,
+                       args.baud,
+                       args.test_timeout,
+                       args.upload_tool,
+                       arduino_package_path,
+                       test_runner_args=arduino_builder_args):
+        sys.exit(0)
+    else:
+        sys.exit(1)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/pw_arduino_build/py/pw_arduino_build/unit_test_server.py b/pw_arduino_build/py/pw_arduino_build/unit_test_server.py
new file mode 100644
index 0000000..0fafd71
--- /dev/null
+++ b/pw_arduino_build/py/pw_arduino_build/unit_test_server.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python3
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Launch a pw_test_server server to use for multi-device testing."""
+
+import argparse
+import logging
+import sys
+import tempfile
+from typing import IO, List, Optional
+
+import pw_cli.process
+import pw_arduino_build.log
+from pw_arduino_build import teensy_detector
+from pw_arduino_build.file_operations import decode_file_json
+from pw_arduino_build.unit_test_runner import ArduinoCoreNotSupported
+
+_LOG = logging.getLogger('unit_test_server')
+
+_TEST_RUNNER_COMMAND = 'arduino_unit_test_runner'
+
+_TEST_SERVER_COMMAND = 'pw_target_runner_server'
+
+
+class UnknownArduinoCore(Exception):
+    """Exception raised when no Arduino core can be found."""
+
+
+def parse_args():
+    """Parses command-line arguments."""
+
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument('--server-port',
+                        type=int,
+                        default=8081,
+                        help='Port to launch the pw_target_runner_server on')
+    parser.add_argument('--server-config',
+                        type=argparse.FileType('r'),
+                        help='Path to server config file')
+    parser.add_argument('--verbose',
+                        '-v',
+                        dest='verbose',
+                        action="store_true",
+                        help='Output additional logs as the script runs')
+    parser.add_argument("-c",
+                        "--config-file",
+                        required=True,
+                        help="Path to an arduino_builder config file.")
+    # TODO(tonymd): Explicitly split args using "--". See example in:
+    # //pw_unit_test/py/pw_unit_test/test_runner.py:326
+    parser.add_argument('runner_args',
+                        nargs=argparse.REMAINDER,
+                        help='Arguments to forward to the test runner')
+
+    return parser.parse_args()
+
+
+def generate_runner(command: str, arguments: List[str]) -> str:
+    """Generates a text-proto style pw_target_runner_server configuration."""
+    # TODO(amontanez): Use a real proto library to generate this when we have
+    # one set up.
+    for i, arg in enumerate(arguments):
+        arguments[i] = f'  args: "{arg}"'
+    runner = ['runner {', f'  command:"{command}"']
+    runner.extend(arguments)
+    runner.append('}\n')
+    return '\n'.join(runner)
+
+
+def generate_server_config(runner_args: Optional[List[str]],
+                           arduino_package_path: str) -> IO[bytes]:
+    """Returns a temporary generated file for use as the server config."""
+
+    if "teensy" not in arduino_package_path:
+        raise ArduinoCoreNotSupported(arduino_package_path)
+
+    boards = teensy_detector.detect_boards(arduino_package_path)
+    if not boards:
+        _LOG.critical('No attached boards detected')
+        sys.exit(1)
+    config_file = tempfile.NamedTemporaryFile()
+    _LOG.debug('Generating test server config at %s', config_file.name)
+    _LOG.debug('Found %d attached devices', len(boards))
+    for board in boards:
+        test_runner_args = []
+        if runner_args:
+            test_runner_args += runner_args
+        test_runner_args += ["-v"] + board.test_runner_args()
+        test_runner_args += ["--port", board.dev_name]
+        test_runner_args += ["--upload-tool", board.arduino_upload_tool_name]
+        config_file.write(
+            generate_runner(_TEST_RUNNER_COMMAND,
+                            test_runner_args).encode('utf-8'))
+    config_file.flush()
+    return config_file
+
+
+def launch_server(server_config: Optional[IO[bytes]],
+                  server_port: Optional[int], runner_args: Optional[List[str]],
+                  arduino_package_path: str) -> int:
+    """Launch a device test server with the provided arguments."""
+    if server_config is None:
+        # Auto-detect attached boards if no config is provided.
+        server_config = generate_server_config(runner_args,
+                                               arduino_package_path)
+
+    cmd = [_TEST_SERVER_COMMAND, '-config', server_config.name]
+
+    if server_port is not None:
+        cmd.extend(['-port', str(server_port)])
+
+    return pw_cli.process.run(*cmd, log_output=True).returncode
+
+
+def main():
+    """Launch a device test server with the provided arguments."""
+    args = parse_args()
+
+    if "--" in args.runner_args:
+        args.runner_args.remove("--")
+
+    log_level = logging.DEBUG if args.verbose else logging.INFO
+    pw_arduino_build.log.install(log_level)
+
+    # Get arduino_package_path from either the config file or command line args.
+    arduino_package_path = None
+    if args.config_file:
+        json_file_options, unused_config_path = decode_file_json(
+            args.config_file)
+        arduino_package_path = json_file_options.get("arduino_package_path",
+                                                     None)
+        # Must pass --config-file option in the runner_args.
+        if "--config-file" not in args.runner_args:
+            args.runner_args.append("--config-file")
+            args.runner_args.append(args.config_file)
+
+    # Check for arduino_package_path in the runner_args
+    try:
+        arduino_package_path = args.runner_args[
+            args.runner_args.index("--arduino-package-path") + 1]
+    except (ValueError, IndexError):
+        # Only raise an error if arduino_package_path not set from the json.
+        if arduino_package_path is None:
+            raise UnknownArduinoCore("Test runner arguments: '{}'".format(
+                " ".join(args.runner_args)))
+
+    exit_code = launch_server(args.server_config, args.server_port,
+                              args.runner_args, arduino_package_path)
+    sys.exit(exit_code)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/pw_arduino_build/py/setup.py b/pw_arduino_build/py/setup.py
new file mode 100644
index 0000000..13af5fc
--- /dev/null
+++ b/pw_arduino_build/py/setup.py
@@ -0,0 +1,43 @@
+# Copyright 2019 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""pw_arduino_build"""
+
+import setuptools  # type: ignore
+
+setuptools.setup(
+    name='pw_arduino_build',
+    version='0.0.1',
+    author='Pigweed Authors',
+    author_email='pigweed-developers@googlegroups.com',
+    description='Target-specific python scripts for the arduino target',
+    packages=setuptools.find_packages(),
+    package_data={'pw_arduino_build': ['py.typed']},
+    zip_safe=False,
+    entry_points={
+        'console_scripts': [
+            'arduino_builder = pw_arduino_build.__main__:main',
+            'teensy_detector = pw_arduino_build.teensy_detector:main',
+            'arduino_unit_test_runner = '
+            '    pw_arduino_build.unit_test_runner:main',
+            'arduino_test_server = '
+            '    pw_arduino_build.unit_test_server:main',
+            'arduino_test_client = '
+            '    pw_arduino_build.unit_test_client:main',
+        ]
+    },
+    install_requires=[
+        'pyserial',
+        'coloredlogs',
+        'parameterized',
+    ])
diff --git a/pw_assert/BUILD b/pw_assert/BUILD
index 16e8cec..6b799f1 100644
--- a/pw_assert/BUILD
+++ b/pw_assert/BUILD
@@ -29,6 +29,8 @@
     name = "facade",
     hdrs = [
         "public/pw_assert/assert.h",
+        "public/pw_assert/light.h",
+        "public/pw_assert/options.h",
         "public/pw_assert/internal/assert_impl.h",
     ],
     includes = ["public"],
@@ -58,6 +60,7 @@
     srcs = [
         "assert_facade_test.cc",
         "fake_backend.cc",
+        "light_test.cc",
         "public/pw_assert/internal/assert_impl.h",
         "pw_assert_test/fake_backend.h",
     ],
diff --git a/pw_assert/BUILD.gn b/pw_assert/BUILD.gn
index d8d1ad1..084aa00 100644
--- a/pw_assert/BUILD.gn
+++ b/pw_assert/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/facade.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 declare_args() {
   # Backend for the pw_assert module.
   pw_assert_BACKEND = ""
@@ -34,18 +34,51 @@
     "public/pw_assert/assert.h",
     "public/pw_assert/internal/assert_impl.h",
   ]
+  public_deps = [
+    dir_pw_preprocessor,
+
+    # Also expose light.h to all users of pw_assert.
+    ":light",
+  ]
+}
+
+# Provide a way include "pw_assert/light.h" without depending on the full
+# assert facade. This enables relying on light asserts from low-level headers
+# like polyfill or span that might trigger circular includes due to the
+# backend.
+#
+# See the docs for more discussion around where to use which assert system.
+pw_source_set("light") {
+  public_configs = [ ":default_config" ]
+  public = [
+    "public/pw_assert/light.h",
+
+    # Needed for PW_ASSERT_ENABLE_DEBUG. Note that depending on :pw_assert to
+    # get options.h won't work here since it will trigger the circular include
+    # problem that light asserts are designed to solve.
+    "public/pw_assert/options.h",
+  ]
   public_deps = [ dir_pw_preprocessor ]
 }
 
+# Note: While this is technically a test, doesn't verify any of the output and
+# is more of a compile test. The results can be visually verified if desired.
+pw_test("light_test") {
+  configs = [ ":default_config" ]
+  sources = [ "light_test.cc" ]
+  deps = [ ":pw_assert" ]
+}
+
 pw_test_group("tests") {
   tests = [
     ":assert_backend_compile_test",
     ":assert_facade_test",
+    ":light_test",
   ]
 }
 
 # The assert facade test doesn't require a backend since a fake one is
-# provided.  However, since this doesn't depend on the backend it re-includes
+# provided. However, since this doesn't depend on the backend it re-includes
 # the facade headers.
 pw_test("assert_facade_test") {
   configs = [ ":default_config" ]  # For internal/assert_impl.h
@@ -55,7 +88,10 @@
     "public/pw_assert/internal/assert_impl.h",
     "pw_assert_test/fake_backend.h",
   ]
-  deps = [ dir_pw_status ]
+  deps = [
+    ":light",
+    dir_pw_status,
+  ]
 
   # TODO(frolv): Fix this test on the QEMU target.
   enable_if = pw_build_EXECUTABLE_TARGET_TYPE != "lm3s6965evb_executable"
diff --git a/pw_assert/CMakeLists.txt b/pw_assert/CMakeLists.txt
index 508393f..9d9a259 100644
--- a/pw_assert/CMakeLists.txt
+++ b/pw_assert/CMakeLists.txt
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_add_facade(pw_assert
   PUBLIC_DEPS
     pw_preprocessor
diff --git a/pw_assert/assert_backend_compile_test.cc b/pw_assert/assert_backend_compile_test.cc
index da19e34..6e1ac2e 100644
--- a/pw_assert/assert_backend_compile_test.cc
+++ b/pw_assert/assert_backend_compile_test.cc
@@ -203,7 +203,7 @@
 
 TEST(Check, CheckOkMacrosCompile) {
   MAYBE_SKIP_TEST;
-  pw::Status status = pw::Status::UNKNOWN;
+  pw::Status status = pw::Status::Unknown();
 
   // Typical case with long names.
   PW_CHECK_OK(status);
@@ -216,10 +216,10 @@
   CHECK_OK(status, "msg: %d", 5);
 
   // Status from a literal.
-  PW_CHECK_OK(pw::Status::OK);
+  PW_CHECK_OK(pw::Status::Ok());
 
   // Status from a function.
-  PW_CHECK_OK(MakeStatus(pw::Status::OK));
+  PW_CHECK_OK(MakeStatus(pw::Status::Ok()));
 
   // Status from C enums.
   PW_CHECK_OK(PW_STATUS_OK);
diff --git a/pw_assert/assert_backend_compile_test_c.c b/pw_assert/assert_backend_compile_test_c.c
index cecc756..c602f37 100644
--- a/pw_assert/assert_backend_compile_test_c.c
+++ b/pw_assert/assert_backend_compile_test_c.c
@@ -24,6 +24,17 @@
 #define PW_ASSERT_USE_SHORT_NAMES 1
 
 #include "pw_assert/assert.h"
+
+static void EnsureNullIsIncluded() {
+  // This is a compile check to ensure NULL is defined. It comes before the
+  // status.h include to ensure we don't accidentally get NULL from status.h.
+  PW_CHECK_NOTNULL(0xa);
+  PW_CHECK_NOTNULL(0x0);
+}
+
+#include <stdbool.h>
+
+#include "pw_assert/light.h"
 #include "pw_status/status.h"
 
 #ifdef __cplusplus
@@ -115,6 +126,9 @@
     PW_CHECK_PTR_GE(x_ptr, y_ptr);
     PW_CHECK_PTR_GE(x_ptr, y_ptr, "PTR: " FAIL_IF_HIDDEN);
     PW_CHECK_PTR_GE(x_ptr, y_ptr, "PTR: " FAIL_IF_HIDDEN_ARGS, z);
+
+    PW_CHECK_NOTNULL(0xa);
+    PW_CHECK_NOTNULL(0x0);
   }
 
   {  // TEST(Check, FloatComparison)
@@ -185,7 +199,7 @@
     CHECK_INT_LE(x_int, y_int, "INT: " FAIL_IF_DISPLAYED_ARGS, z);
   }
 
-  {  // Compile tests for PW_ASSERT_OK().
+  {  // Compile tests for PW_CHECK_OK().
     PW_CHECK_OK(PW_STATUS_OK);
     PW_CHECK_OK(PW_STATUS_OK, "msg");
     PW_CHECK_OK(PW_STATUS_OK, "msg: %d", 5);
@@ -193,4 +207,20 @@
     PW_DCHECK_OK(PW_STATUS_OK, "msg");
     PW_DCHECK_OK(PW_STATUS_OK, "msg: %d", 5);
   }
+
+  {  // TEST(Assert, Basic)
+    MAYBE_SKIP_TEST;
+    PW_ASSERT(false);
+    PW_ASSERT(123 == 456);
+  }
+
+  {  // Compile tests for PW_ASSERT().
+    PW_ASSERT(true);
+    PW_ASSERT(123 != 456);
+
+    PW_DASSERT(true);
+    PW_DASSERT(123 != 456);
+  }
+
+  EnsureNullIsIncluded();
 }
diff --git a/pw_assert/assert_facade_test.cc b/pw_assert/assert_facade_test.cc
index fc48268..083d206 100644
--- a/pw_assert/assert_facade_test.cc
+++ b/pw_assert/assert_facade_test.cc
@@ -227,6 +227,16 @@
 TEST_F(AssertPass, PtrNotNull) { PW_CHECK_NOTNULL(0xa); }
 TEST_F(AssertFail, PtrNotNull) { PW_CHECK_NOTNULL(0x0); }
 
+// Note: Due to platform inconsistencies, the below test for the NOTNULL
+// message doesn't work. Some platforms print NULL formatted as %p as "(nil)",
+// others "0x0". Leaving this here for reference.
+//
+//   TEST_F(AssertFail, PtrNotNullDescription) {
+//     intptr_t intptr = 0;
+//     PW_CHECK_NOTNULL(intptr);
+//     EXPECT_MESSAGE("Check failed: intptr (=0x0) != nullptr (=0x0). ");
+//   }
+
 // PW_CHECK_FLOAT_*(...)
 // Binary checks with floats, comparisons: EXACT_LT, EXACT_LE, NEAR, EXACT_EQ,
 // EXACT_NE, EXACT_GE, EXACT_GT.
@@ -374,7 +384,7 @@
 
 // Verify side effects of debug checks work as expected.
 // Only check a couple of cases, since the logic is all the same.
-#if PW_ASSERT_ENABLE_DCHECK
+#if PW_ASSERT_ENABLE_DEBUG
 // When DCHECKs are enabled, they behave the same as normal checks.
 TEST(AssertPass, DCheckEnabledSingleSideEffectingCall) {
   global_state_for_multi_evaluate_test = 0;
@@ -407,41 +417,41 @@
   EXPECT_EQ(global_state_for_multi_evaluate_test, 2);
 }
 
-#else  // PW_ASSERT_ENABLE_DCHECK
+#else  // PW_ASSERT_ENABLE_DEBUG
 
 // When DCHECKs are disabled, they should not trip, and their arguments
 // shouldn't be evaluated.
-TEST(AssertPass, DCheckDisabledSingleSideEffectingCall) {
+TEST(AssertPass, DCheckDisabledSingleSideEffectingCall_1) {
   global_state_for_multi_evaluate_test = 0;
   PW_DCHECK(IncrementsGlobal() == 0);
   EXPECT_EQ(global_state_for_multi_evaluate_test, 0);
 }
-TEST(AssertPass, DCheckDisabledSingleSideEffectingCall) {
+TEST(AssertPass, DCheckDisabledSingleSideEffectingCall_2) {
   global_state_for_multi_evaluate_test = 0;
   PW_DCHECK(IncrementsGlobal() == 1);
   EXPECT_EQ(global_state_for_multi_evaluate_test, 0);
 }
-TEST(AssertPass, DCheckDisabledBinaryOpSingleSideEffectingCall) {
+TEST(AssertPass, DCheckDisabledBinaryOpSingleSideEffectingCall_1) {
   global_state_for_multi_evaluate_test = 0;
   PW_DCHECK_INT_EQ(0, IncrementsGlobal());
   EXPECT_EQ(global_state_for_multi_evaluate_test, 0);
 }
-TEST(AssertPass, DCheckDisabledBinaryOpTwoSideEffectingCalls) {
+TEST(AssertPass, DCheckDisabledBinaryOpTwoSideEffectingCalls_1) {
   global_state_for_multi_evaluate_test = 0;
   PW_DCHECK_INT_EQ(IncrementsGlobal(), IncrementsGlobal());
   EXPECT_EQ(global_state_for_multi_evaluate_test, 0);
 }
-TEST(AssertPass, DCheckDisabledBinaryOpSingleSideEffectingCall) {
+TEST(AssertPass, DCheckDisabledBinaryOpSingleSideEffectingCall_2) {
   global_state_for_multi_evaluate_test = 0;
   PW_DCHECK_INT_EQ(12314, IncrementsGlobal());
   EXPECT_EQ(global_state_for_multi_evaluate_test, 0);
 }
-TEST(AssertPass, DCheckDisabledBinaryOpTwoSideEffectingCalls) {
+TEST(AssertPass, DCheckDisabledBinaryOpTwoSideEffectingCalls_2) {
   global_state_for_multi_evaluate_test = 0;
   PW_DCHECK_INT_EQ(IncrementsGlobal() + 10, IncrementsGlobal());
   EXPECT_EQ(global_state_for_multi_evaluate_test, 0);
 }
-#endif  // PW_ASSERT_ENABLE_DCHECK
+#endif  // PW_ASSERT_ENABLE_DEBUG
 
 // Note: This requires enabling PW_ASSERT_USE_SHORT_NAMES 1 above.
 TEST(Check, ShortNamesWork) {
@@ -465,25 +475,25 @@
 
 // Verify PW_CHECK_OK, including message handling.
 TEST_F(AssertFail, StatusNotOK) {
-  pw::Status status = pw::Status::UNKNOWN;
+  pw::Status status = pw::Status::Unknown();
   PW_CHECK_OK(status);
   EXPECT_MESSAGE("Check failed: status (=UNKNOWN) == Status::OK (=OK). ");
 }
 
 TEST_F(AssertFail, StatusNotOKMessageNoArguments) {
-  pw::Status status = pw::Status::UNKNOWN;
+  pw::Status status = pw::Status::Unknown();
   PW_CHECK_OK(status, "msg");
   EXPECT_MESSAGE("Check failed: status (=UNKNOWN) == Status::OK (=OK). msg");
 }
 
 TEST_F(AssertFail, StatusNotOKMessageArguments) {
-  pw::Status status = pw::Status::UNKNOWN;
+  pw::Status status = pw::Status::Unknown();
   PW_CHECK_OK(status, "msg: %d", 5);
   EXPECT_MESSAGE("Check failed: status (=UNKNOWN) == Status::OK (=OK). msg: 5");
 }
 
 // Example expression for the test below.
-pw::Status DoTheThing() { return pw::Status::RESOURCE_EXHAUSTED; }
+pw::Status DoTheThing() { return pw::Status::ResourceExhausted(); }
 
 TEST_F(AssertFail, NonTrivialExpression) {
   PW_CHECK_OK(DoTheThing());
@@ -494,33 +504,26 @@
 // Note: This function seems pointless but it is not, since pw::Status::FOO
 // constants are not actually status objects, but code objects. This way we can
 // ensure the macros work with both real status objects and literals.
-pw::Status MakeStatus(pw::Status status) { return status; }
-TEST_F(AssertPass, Constant) { PW_CHECK_OK(pw::Status::OK); }
-TEST_F(AssertPass, Dynamic) { PW_CHECK_OK(MakeStatus(pw::Status::OK)); }
+TEST_F(AssertPass, Function) { PW_CHECK_OK(pw::Status::Ok()); }
 TEST_F(AssertPass, Enum) { PW_CHECK_OK(PW_STATUS_OK); }
-TEST_F(AssertFail, Constant) { PW_CHECK_OK(pw::Status::UNKNOWN); }
-TEST_F(AssertFail, Dynamic) { PW_CHECK_OK(MakeStatus(pw::Status::UNKNOWN)); }
+TEST_F(AssertFail, Function) { PW_CHECK_OK(pw::Status::Unknown()); }
 TEST_F(AssertFail, Enum) { PW_CHECK_OK(PW_STATUS_UNKNOWN); }
 
-#if PW_ASSERT_ENABLE_DCHECK
+#if PW_ASSERT_ENABLE_DEBUG
 
 // In debug mode, the asserts should check their arguments.
-TEST_F(AssertPass, DCheckConstant) { PW_DCHECK_OK(pw::Status::OK); }
-TEST_F(AssertPass, DCheckDynamic) { PW_DCHECK_OK(MakeStatus(pw::Status::OK)); }
-TEST_F(AssertFail, DCheckConstant) { PW_DCHECK_OK(pw::Status::UNKNOWN); }
-TEST_F(AssertFail, DCheckDynamic) {
-  PW_DCHECK_OK(MakeStatus(pw::Status::UNKNOWN));
-}
-#else  // PW_ASSERT_ENABLE_DCHECK
+TEST_F(AssertPass, DCheckFunction) { PW_DCHECK_OK(pw::Status::Ok()); }
+TEST_F(AssertPass, DCheckEnum) { PW_DCHECK_OK(PW_STATUS_OK); }
+TEST_F(AssertFail, DCheckFunction) { PW_DCHECK_OK(pw::Status::Unknown()); }
+TEST_F(AssertFail, DCheckEnum) { PW_DCHECK_OK(PW_STATUS_UNKNOWN); }
+#else  // PW_ASSERT_ENABLE_DEBUG
 
 // In release mode, all the asserts should pass.
-TEST_F(AssertPass, DCheckConstant) { PW_DCHECK_OK(pw::Status::OK); }
-TEST_F(AssertPass, DCheckDynamic) { PW_DCHECK_OK(MakeStatus(pw::Status::OK)); }
-TEST_F(AssertPass, DCheckConstant) { PW_DCHECK_OK(pw::Status::UNKNOWN); }
-TEST_F(AssertPass, DCheckDynamic) {
-  PW_DCHECK_OK(MakeStatus(pw::Status::UNKNOWN));
-}
-#endif  // PW_ASSERT_ENABLE_DCHECK
+TEST_F(AssertPass, DCheckFunction_Ok) { PW_DCHECK_OK(pw::Status::Ok()); }
+TEST_F(AssertPass, DCheckEnum_Ok) { PW_DCHECK_OK(PW_STATUS_OK); }
+TEST_F(AssertPass, DCheckFunction_Err) { PW_DCHECK_OK(pw::Status::Unknown()); }
+TEST_F(AssertPass, DCheckEnum_Err) { PW_DCHECK_OK(PW_STATUS_UNKNOWN); }
+#endif  // PW_ASSERT_ENABLE_DEBUG
 
 // TODO: Figure out how to run some of these tests is C.
 
diff --git a/pw_assert/docs.rst b/pw_assert/docs.rst
index 19298a6..2d96c60 100644
--- a/pw_assert/docs.rst
+++ b/pw_assert/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-assert:
-
-.. default-domain:: cpp
-
-.. highlight:: cpp
+.. _module-pw_assert:
 
 =========
 pw_assert
@@ -29,6 +25,7 @@
   a message.
 - **PW_CHECK_<type>_<cmp>(a, b[, fmt, ...])** - Assert that the expression ``a
   <cmp> b`` is true, optionally with a message.
+- **PW_ASSERT(condition)** - Header- and constexpr- assert.
 
 .. tip::
 
@@ -73,6 +70,11 @@
     // The functions ItemCount() and GetStateStr() are never called.
     PW_DCHECK_INT_LE(ItemCount(), 100, "System state: %s", GetStateStr());
 
+.. tip::
+
+  Use ``PW_ASSERT`` from ``pw_assert/light.h`` for asserts in headers or
+  asserting in ``constexpr`` contexts.
+
 Structure of assert modules
 ---------------------------
 The module is split into two components:
@@ -97,12 +99,12 @@
 
 See the Backend API section below for more details.
 
---------------------
-Facade API reference
---------------------
+----------
+Facade API
+----------
 
 The below functions describe the assert API functions that applications should
-invoke to assert.
+invoke to assert. These macros found in the ``pw_assert/assert.h`` header.
 
 .. cpp:function:: PW_CRASH(format, ...)
 
@@ -391,9 +393,90 @@
     code; for example ``status == RESOURCE_EXHAUSTED`` instead of ``status ==
     5``.
 
----------------------
-Backend API reference
----------------------
+---------
+Light API
+---------
+The normal ``PW_CHECK_*`` and ``PW_DCHECK_*`` family of macros are intended to
+provide rich debug information, like the file, line number, value of operands
+in boolean comparisons, and more. However, this comes at a cost: these macros
+depend directly on the backend headers, and may perform complicated call-site
+transformations like tokenization.
+
+There are several issues with the normal ``PW_CHECK_*`` suite of macros:
+
+1. ``PW_CHECK_*`` in headers can cause ODR violations in the case of tokenized
+   asserts, due to differing module choices.
+2. ``PW_CHECK_*`` is not constexpr-safe.
+3. ``PW_CHECK_*`` can cause code bloat with some backends; this is the tradeoff
+   to get rich assert information.
+4. ``PW_CHECK_*`` can trigger circular dependencies when asserts are used from
+   low-level contexts, like in ``<span>``.
+
+**Light asserts** solve all of the above three problems: No risk of ODR
+violations, are constexpr safe, and have a tiny call site footprint; and there
+is no header dependency on the backend preventing circular include issues.
+However, there are **no format messages, no captured line number, no captured
+file, no captured expression, or anything other than a binary indication of
+failure**.
+
+Example
+-------
+
+.. code-block:: cpp
+
+  // This example demonstrates asserting in a header.
+
+  #include "pw_assert/light.h"
+
+  class InlinedSubsystem {
+   public:
+    void DoSomething() {
+      // GOOD: No problem; PW_ASSERT is fine to inline and place in a header.
+      PW_ASSERT(IsEnabled());
+    }
+    void DoSomethingElse() {
+      // BAD: Generally avoid using PW_DCHECK() or PW_CHECK in headers. If you
+      // want rich asserts or logs, move the function into the .cc file, and
+      // then use PW_CHECK there.
+      PW_DCHECK(IsEnabled());  // DON'T DO THIS
+    }
+  };
+
+Light API reference
+-------------------
+.. cpp:function:: PW_ASSERT(condition)
+
+  A header- and constexpr-safe version of ``PW_CHECK()``.
+
+  If the given condition is false, crash the system. Otherwise, do nothing.
+  The condition is guaranteed to be evaluated. This assert implementation is
+  guaranteed to be constexpr-safe.
+
+.. cpp:function:: PW_DASSERT(condition)
+
+  A header- and constexpr-safe version of ``PW_DCHECK()``.
+
+  Same as ``PW_ASSERT()``, except that if ``PW_ASSERT_ENABLE_DEBUG == 1``, the
+  assert is disabled and condition is not evaluated.
+
+.. attention::
+
+  Unlike the ``PW_CHECK_*()`` suite of macros, ``PW_ASSERT()`` and
+  ``PW_DASSERT()`` capture no rich information like line numbers, the file,
+  expression arguments, or the stringified expression. Use these macros **only
+  when absolutely necessary**--in headers, constexr contexts, or in rare cases
+  where the call site overhead of a full PW_CHECK must be avoided.
+
+  Use ``PW_CHECK_*()`` whenever possible.
+
+Light API backend
+-----------------
+The light API ultimately calls the C function ``pw_assert_HandleFailure()``,
+which must be provided by the assert backend.
+
+-----------
+Backend API
+-----------
 
 The backend controls what to do in the case of an assertion failure. In the
 most basic cases, the backend could display the assertion failure on something
@@ -401,7 +484,7 @@
 the backend could store crash details like the current thread's stack to flash.
 
 This facade module (``pw_assert``) does not provide a backend. See
-:ref:`chapter-pw-assert-basic` for a basic implementation.
+:ref:`module-pw_assert_basic` for a basic implementation.
 
 .. attention::
 
@@ -462,9 +545,20 @@
 
   .. tip::
 
-    See :ref:`chapter-pw-assert-basic` for one way to combine these arguments
+    See :ref:`module-pw_assert_basic` for one way to combine these arguments
     into a meaningful error message.
 
+Additionally, the backend must provide a link-time function for the light
+assert handler. This does not need to appear in the backend header, but instead
+is in a ``.cc`` file.
+
+.. cpp:function:: pw_assert_HandleFailure()
+
+  Handle a low-level crash. This crash entry happens through
+  ``pw_assert/light.h``. In this crash handler, there is no access to line,
+  file, expression, or other rich assert information. Backends should do
+  something reasonable in this case; typically, capturing the stack is useful.
+
 --------------------------
 Frequently asked questions
 --------------------------
@@ -507,14 +601,15 @@
 
   **Do not return error status codes for obvious API misuse**
 
-  Returning an error code may mask the earliest sign of a bug; notifying the
-  developer of the problem depends on correct propagation of the error to upper
-  levels of the system. Instead, prefer to use the ``CHECK_*`` or ``DCHECK_*``
-  macros to ensure a prompt termination and warning to the developer.
+  Returning an error code may **mask the earliest sign of a bug** because
+  notifying the developer of the problem depends on correct propagation of the
+  error to upper levels of the system. Instead, prefer to use the ``CHECK_*``
+  or ``DCHECK_*`` macros to ensure a prompt termination and warning to the
+  developer.
 
-  Error status codes should be reserved for system misbehaviour or expected
-  exceptional cases, like a sensor is not yet ready, or a storage subsystem is
-  full when writing. Doing ``CHECK_*`` assertions in those cases would be a
+  **Error status codes should be reserved for system misbehaviour or expected
+  exceptional cases**, like a sensor is not yet ready, or a storage subsystem
+  is full when writing. Doing ``CHECK_*`` assertions in those cases would be a
   mistake; so use error codes in those cases instead.
 
 How should objects be asserted against or compared?
@@ -576,4 +671,45 @@
 -------------
 Compatibility
 -------------
-The facade is compatible with C and C++.
+The facade is compatible with both C and C++.
+
+----------------
+Roadmap & Status
+----------------
+The Pigweed assert subsystem consiststs of several modules that work in
+coordination. This module is the facade (API), then a number of backends are
+available to handle assert failures. Products can also define their own
+backends. In some cases, the backends will have backends (like
+``pw_log_tokenized``).
+
+Below is a brief summary of what modules are ready for use:
+
+Available assert backends
+-------------------------
+- ``pw_assert`` - **Stable** - The assert facade (this module). This module is
+  stable, and in production use. The documentation is comprehensive and covers
+  the functionality. There are (a) tests for the facade macro processing logic,
+  using a fake assert backend; and (b) compile tests to verify that the
+  selected backend compiles with all supported assert constructions and types.
+- ``pw_assert_basic`` - **Stable** - The assert basic module is a simple assert
+  handler that displays the failed assert line and the values of captured
+  arguments. Output is directed to ``pw_sys_io``. This module is a great
+  ready-to-roll module when bringing up a system, but is likely not the best
+  choice for production.
+- ``pw_assert_log`` - **Stable** - This assert backend redirects to logging,
+  but with a logging flag set that indicates an assert failure. This is our
+  advised approach to get **tokenized asserts**--by using tokenized logging,
+  then using the ``pw_assert_log`` backend.
+
+Note: If one desires a null assert module (where asserts are removed), use
+``pw_assert_log`` in combination with ``pw_log_null``. This will direct asserts
+to logs, then the logs are removed due to the null backend.
+
+Missing functionality
+---------------------
+- **Stack traces** - Pigweed doesn't have a reliable stack walker, which makes
+  displaying a stack trace on crash harder. We plan to add this eventually.
+- **Snapshot integration** - Pigweed doesn't yet have a rich system state
+  capture system that can capture state like number of tasks, available memory,
+  and so on. Snapshot facilities are the obvious ones to run inside an assert
+  handler. It'll happen someday.
diff --git a/pw_assert/light_test.cc b/pw_assert/light_test.cc
new file mode 100644
index 0000000..1710e4f
--- /dev/null
+++ b/pw_assert/light_test.cc
@@ -0,0 +1,53 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_assert/light.h"
+
+#include "gtest/gtest.h"
+#include "pw_assert/assert.h"
+
+// PW_ASSERT() should always be enabled, and always evaluate the expression.
+TEST(Light, AssertTrue) {
+  int evaluated = 1;
+  PW_ASSERT(++evaluated);
+  EXPECT_EQ(evaluated, 2);
+}
+
+// PW_DASSERT() might be disabled sometimes.
+TEST(Light, DebugAssertTrue) {
+  int evaluated = 1;
+  PW_DASSERT(++evaluated);
+  if (PW_ASSERT_ENABLE_DEBUG == 1) {
+    EXPECT_EQ(evaluated, 2);
+  } else {
+    EXPECT_EQ(evaluated, 1);
+  }
+}
+
+// Unfortunately, we don't have the infrastructure to test failure handling
+// automatically, since the harness crashes in the process of running this
+// test. The unsatisfying alternative is to test the functionality manually,
+// then disable the test.
+
+TEST(Light, AssertFalse) {
+  if (0) {
+    PW_ASSERT(false);
+  }
+}
+
+TEST(Light, DebugAssertFalse) {
+  if (0) {
+    PW_DASSERT(false);
+  }
+}
diff --git a/pw_assert/public/pw_assert/assert.h b/pw_assert/public/pw_assert/assert.h
index 36082c5..2a4c999 100644
--- a/pw_assert/public/pw_assert/assert.h
+++ b/pw_assert/public/pw_assert/assert.h
@@ -19,7 +19,7 @@
 //
 #pragma once
 
-#include "pw_preprocessor/macro_arg_count.h"
+#include "pw_preprocessor/arguments.h"
 
 // The pw_assert public API:
 //
diff --git a/pw_assert/public/pw_assert/internal/assert_impl.h b/pw_assert/public/pw_assert/internal/assert_impl.h
index 4b1d594..35eab1e 100644
--- a/pw_assert/public/pw_assert/internal/assert_impl.h
+++ b/pw_assert/public/pw_assert/internal/assert_impl.h
@@ -13,24 +13,18 @@
 // the License.
 #pragma once
 
+#ifndef __cplusplus
+#include <stddef.h>
+#endif  // __cplusplus
+
 // Note: This file depends on the backend header already being included.
 
-#include "pw_preprocessor/macro_arg_count.h"
-
-// Define PW_ASSERT_ENABLE_DCHECK, which controls whether DCHECKs are enabled.
-#if !defined(PW_ASSERT_ENABLE_DCHECK)
-#if defined(NDEBUG)
-// Release mode; remove all DCHECK*() asserts.
-#define PW_ASSERT_ENABLE_DCHECK 0
-#else
-// Debug mode; keep all DCHECK*() asserts.
-#define PW_ASSERT_ENABLE_DCHECK 1
-#endif  // defined (NDEBUG)
-#endif  // !defined(PW_ASSERT_ENABLE_DCHECK)
+#include "pw_assert/options.h"
+#include "pw_preprocessor/arguments.h"
+#include "pw_preprocessor/compiler.h"
 
 // PW_CRASH - Crash the system, with a message.
-#define PW_CRASH(message, ...) \
-  PW_HANDLE_CRASH(message PW_COMMA_ARGS(__VA_ARGS__))
+#define PW_CRASH PW_HANDLE_CRASH
 
 // PW_CHECK - If condition evaluates to false, crash. Message optional.
 #define PW_CHECK(condition, ...)                              \
@@ -41,9 +35,12 @@
     }                                                         \
   } while (0)
 
-#define PW_DCHECK(...)         \
-  if (PW_ASSERT_ENABLE_DCHECK) \
-  PW_CHECK(__VA_ARGS__)
+#define PW_DCHECK(...)            \
+  do {                            \
+    if (PW_ASSERT_ENABLE_DEBUG) { \
+      PW_CHECK(__VA_ARGS__);      \
+    }                             \
+  } while (0)
 
 // PW_D?CHECK_<type>_<comparison> macros - Binary comparison asserts.
 //
@@ -63,12 +60,12 @@
 #define PW_CHECK_INT_NE(arga, argb, ...) _PW_CHECK_BINARY_CMP_IMPL(arga, !=, argb, int, "%d", __VA_ARGS__)
 
 // Debug checks for int: LE, LT, GE, GT, EQ.
-#define PW_DCHECK_INT_LE(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_INT_LE(__VA_ARGS__)
-#define PW_DCHECK_INT_LT(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_INT_LT(__VA_ARGS__)
-#define PW_DCHECK_INT_GE(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_INT_GE(__VA_ARGS__)
-#define PW_DCHECK_INT_GT(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_INT_GT(__VA_ARGS__)
-#define PW_DCHECK_INT_EQ(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_INT_EQ(__VA_ARGS__)
-#define PW_DCHECK_INT_NE(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_INT_NE(__VA_ARGS__)
+#define PW_DCHECK_INT_LE(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_INT_LE(__VA_ARGS__)
+#define PW_DCHECK_INT_LT(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_INT_LT(__VA_ARGS__)
+#define PW_DCHECK_INT_GE(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_INT_GE(__VA_ARGS__)
+#define PW_DCHECK_INT_GT(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_INT_GT(__VA_ARGS__)
+#define PW_DCHECK_INT_EQ(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_INT_EQ(__VA_ARGS__)
+#define PW_DCHECK_INT_NE(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_INT_NE(__VA_ARGS__)
 
 // Checks for unsigned int: LE, LT, GE, GT, EQ.
 #define PW_CHECK_UINT_LE(arga, argb, ...) _PW_CHECK_BINARY_CMP_IMPL(arga, <=, argb, unsigned int, "%u", __VA_ARGS__)
@@ -79,32 +76,38 @@
 #define PW_CHECK_UINT_NE(arga, argb, ...) _PW_CHECK_BINARY_CMP_IMPL(arga, !=, argb, unsigned int, "%u", __VA_ARGS__)
 
 // Debug checks for unsigned int: LE, LT, GE, GT, EQ.
-#define PW_DCHECK_UINT_LE(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_UINT_LE(__VA_ARGS__)
-#define PW_DCHECK_UINT_LT(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_UINT_LT(__VA_ARGS__)
-#define PW_DCHECK_UINT_GE(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_UINT_GE(__VA_ARGS__)
-#define PW_DCHECK_UINT_GT(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_UINT_GT(__VA_ARGS__)
-#define PW_DCHECK_UINT_EQ(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_UINT_EQ(__VA_ARGS__)
-#define PW_DCHECK_UINT_NE(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_UINT_NE(__VA_ARGS__)
+#define PW_DCHECK_UINT_LE(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_UINT_LE(__VA_ARGS__)
+#define PW_DCHECK_UINT_LT(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_UINT_LT(__VA_ARGS__)
+#define PW_DCHECK_UINT_GE(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_UINT_GE(__VA_ARGS__)
+#define PW_DCHECK_UINT_GT(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_UINT_GT(__VA_ARGS__)
+#define PW_DCHECK_UINT_EQ(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_UINT_EQ(__VA_ARGS__)
+#define PW_DCHECK_UINT_NE(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_UINT_NE(__VA_ARGS__)
 
-// Checks for pointer: LE, LT, GE, GT, EQ, NE, and NOTNULL.
-#define PW_CHECK_PTR_LE(arga, argb, ...) _PW_CHECK_BINARY_CMP_IMPL(arga, <=, argb, void*, "%p", __VA_ARGS__)
-#define PW_CHECK_PTR_LT(arga, argb, ...) _PW_CHECK_BINARY_CMP_IMPL(arga, < , argb, void*, "%p", __VA_ARGS__)
-#define PW_CHECK_PTR_GE(arga, argb, ...) _PW_CHECK_BINARY_CMP_IMPL(arga, >=, argb, void*, "%p", __VA_ARGS__)
-#define PW_CHECK_PTR_GT(arga, argb, ...) _PW_CHECK_BINARY_CMP_IMPL(arga, > , argb, void*, "%p", __VA_ARGS__)
-#define PW_CHECK_PTR_EQ(arga, argb, ...) _PW_CHECK_BINARY_CMP_IMPL(arga, ==, argb, void*, "%p", __VA_ARGS__)
-#define PW_CHECK_PTR_NE(arga, argb, ...) _PW_CHECK_BINARY_CMP_IMPL(arga, !=, argb, void*, "%p", __VA_ARGS__)
+// Checks for pointer: LE, LT, GE, GT, EQ, NE.
+#define PW_CHECK_PTR_LE(arga, argb, ...) _PW_CHECK_BINARY_CMP_IMPL(arga, <=, argb, const void*, "%p", __VA_ARGS__)
+#define PW_CHECK_PTR_LT(arga, argb, ...) _PW_CHECK_BINARY_CMP_IMPL(arga, < , argb, const void*, "%p", __VA_ARGS__)
+#define PW_CHECK_PTR_GE(arga, argb, ...) _PW_CHECK_BINARY_CMP_IMPL(arga, >=, argb, const void*, "%p", __VA_ARGS__)
+#define PW_CHECK_PTR_GT(arga, argb, ...) _PW_CHECK_BINARY_CMP_IMPL(arga, > , argb, const void*, "%p", __VA_ARGS__)
+#define PW_CHECK_PTR_EQ(arga, argb, ...) _PW_CHECK_BINARY_CMP_IMPL(arga, ==, argb, const void*, "%p", __VA_ARGS__)
+#define PW_CHECK_PTR_NE(arga, argb, ...) _PW_CHECK_BINARY_CMP_IMPL(arga, !=, argb, const void*, "%p", __VA_ARGS__)
+
+// Check for pointer: NOTNULL. Use "nullptr" in C++, "NULL" in C.
+#ifdef __cplusplus
 #define PW_CHECK_NOTNULL(arga, ...) \
-  _PW_CHECK_BINARY_CMP_IMPL(arga, !=, NULL, void*, "%p", __VA_ARGS__)
+  _PW_CHECK_BINARY_CMP_IMPL(arga, !=, nullptr, const void*, "%p", __VA_ARGS__)
+#else  // __cplusplus
+#define PW_CHECK_NOTNULL(arga, ...) \
+  _PW_CHECK_BINARY_CMP_IMPL(arga, !=, NULL, const void*, "%p", __VA_ARGS__)
+#endif  // __cplusplus
 
 // Debug checks for pointer: LE, LT, GE, GT, EQ, NE, and NOTNULL.
-#define PW_DCHECK_PTR_LE(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_PTR_LE(__VA_ARGS__)
-#define PW_DCHECK_PTR_LT(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_PTR_LT(__VA_ARGS__)
-#define PW_DCHECK_PTR_GE(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_PTR_GE(__VA_ARGS__)
-#define PW_DCHECK_PTR_GT(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_PTR_GT(__VA_ARGS__)
-#define PW_DCHECK_PTR_EQ(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_PTR_EQ(__VA_ARGS__)
-#define PW_DCHECK_PTR_NE(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_PTR_NE(__VA_ARGS__)
-#define PW_DCHECK_NOTNULL(...) \
-  if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_NOTNULL(__VA_ARGS__)
+#define PW_DCHECK_PTR_LE(...)  if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_PTR_LE(__VA_ARGS__)
+#define PW_DCHECK_PTR_LT(...)  if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_PTR_LT(__VA_ARGS__)
+#define PW_DCHECK_PTR_GE(...)  if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_PTR_GE(__VA_ARGS__)
+#define PW_DCHECK_PTR_GT(...)  if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_PTR_GT(__VA_ARGS__)
+#define PW_DCHECK_PTR_EQ(...)  if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_PTR_EQ(__VA_ARGS__)
+#define PW_DCHECK_PTR_NE(...)  if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_PTR_NE(__VA_ARGS__)
+#define PW_DCHECK_NOTNULL(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_NOTNULL(__VA_ARGS__)
 
 // Checks for float: EXACT_LE, EXACT_LT, EXACT_GE, EXACT_GT, EXACT_EQ, EXACT_NE,
 // NEAR.
@@ -119,14 +122,13 @@
 
 // Debug checks for float: NEAR, EXACT_LE, EXACT_LT, EXACT_GE, EXACT_GT,
 // EXACT_EQ.
-#define PW_DCHECK_FLOAT_NEAR(...) \
-  if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_FLOAT_NEAR(__VA_ARGS__)
-#define PW_DCHECK_FLOAT_EXACT_LE(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_FLOAT_EXACT_LE(__VA_ARGS__)
-#define PW_DCHECK_FLOAT_EXACT_LT(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_FLOAT_EXACT_LT(__VA_ARGS__)
-#define PW_DCHECK_FLOAT_EXACT_GE(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_FLOAT_EXACT_GE(__VA_ARGS__)
-#define PW_DCHECK_FLOAT_EXACT_GT(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_FLOAT_EXACT_GT(__VA_ARGS__)
-#define PW_DCHECK_FLOAT_EXACT_EQ(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_FLOAT_EXACT_EQ(__VA_ARGS__)
-#define PW_DCHECK_FLOAT_EXACT_NE(...) if (PW_ASSERT_ENABLE_DCHECK) PW_CHECK_FLOAT_EXACT_NE(__VA_ARGS__)
+#define PW_DCHECK_FLOAT_NEAR(...)     if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_FLOAT_NEAR(__VA_ARGS__)
+#define PW_DCHECK_FLOAT_EXACT_LE(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_FLOAT_EXACT_LE(__VA_ARGS__)
+#define PW_DCHECK_FLOAT_EXACT_LT(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_FLOAT_EXACT_LT(__VA_ARGS__)
+#define PW_DCHECK_FLOAT_EXACT_GE(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_FLOAT_EXACT_GE(__VA_ARGS__)
+#define PW_DCHECK_FLOAT_EXACT_GT(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_FLOAT_EXACT_GT(__VA_ARGS__)
+#define PW_DCHECK_FLOAT_EXACT_EQ(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_FLOAT_EXACT_EQ(__VA_ARGS__)
+#define PW_DCHECK_FLOAT_EXACT_NE(...) if (!(PW_ASSERT_ENABLE_DEBUG)) {} else PW_CHECK_FLOAT_EXACT_NE(__VA_ARGS__)
 
 // clang-format on
 
@@ -141,9 +143,10 @@
     }                                                     \
   } while (0)
 
-#define PW_DCHECK_OK(...)      \
-  if (PW_ASSERT_ENABLE_DCHECK) \
-  PW_CHECK_OK(__VA_ARGS__)
+#define PW_DCHECK_OK(...)          \
+  if (!(PW_ASSERT_ENABLE_DEBUG)) { \
+  } else                           \
+    PW_CHECK_OK(__VA_ARGS__)
 
 // =========================================================================
 // Implementation for PW_CHECK
diff --git a/pw_assert/public/pw_assert/light.h b/pw_assert/public/pw_assert/light.h
new file mode 100644
index 0000000..95457a8
--- /dev/null
+++ b/pw_assert/public/pw_assert/light.h
@@ -0,0 +1,58 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_assert/options.h"  // For PW_ASSERT_ENABLE_DEBUG
+#include "pw_preprocessor/util.h"
+
+PW_EXTERN_C_START
+
+void pw_assert_HandleFailure(void);
+
+PW_EXTERN_C_END
+
+// A header- and constexpr-safe version of PW_CHECK().
+//
+// If the given condition is false, crash the system. Otherwise, do nothing.
+// The condition is guaranteed to be evaluated. This assert implementation is
+// guaranteed to be constexpr-safe.
+//
+// IMPORTANT: Unlike the PW_CHECK_*() suite of macros, this API captures no
+// rich information like line numbers, the file, expression arguments, or the
+// stringified expression. Use these macros only when absolutely necessary --
+// in headers, constexr contexts, or in rare cases where the call site overhead
+// of a full PW_CHECK must be avoided. Use PW_CHECK_*() whenever possible.
+#define PW_ASSERT(condition)     \
+  do {                           \
+    if (!(condition)) {          \
+      pw_assert_HandleFailure(); \
+    }                            \
+  } while (0)
+
+// A header- and constexpr-safe version of PW_DCHECK().
+//
+// Same as PW_ASSERT(), except that if PW_ASSERT_ENABLE_DEBUG == 1, the assert
+// is disabled and condition is not evaluated.
+//
+// IMPORTANT: Unlike the PW_CHECK_*() suite of macros, this API captures no
+// rich information like line numbers, the file, expression arguments, or the
+// stringified expression. Use these macros only when absolutely necessary --
+// in headers, constexr contexts, or in rare cases where the call site overhead
+// of a full PW_CHECK must be avoided. Use PW_DCHECK_*() whenever possible.
+#define PW_DASSERT(condition)                            \
+  do {                                                   \
+    if ((PW_ASSERT_ENABLE_DEBUG == 1) && !(condition)) { \
+      pw_assert_HandleFailure();                         \
+    }                                                    \
+  } while (0)
diff --git a/pw_assert/public/pw_assert/options.h b/pw_assert/public/pw_assert/options.h
new file mode 100644
index 0000000..e7ea02f
--- /dev/null
+++ b/pw_assert/public/pw_assert/options.h
@@ -0,0 +1,28 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+// PW_ASSERT_ENABLE_DEBUG controls whether DCHECKs and DASSERTs are enabled.
+//
+// This block defines PW_ASSERT_ENABLE_DEBUG if it is not already, taking into
+// account traditional NDEBUG macro.
+#if !defined(PW_ASSERT_ENABLE_DEBUG)
+#if defined(NDEBUG)
+// Release mode; remove all DCHECK*() and DASSERT() asserts.
+#define PW_ASSERT_ENABLE_DEBUG 0
+#else
+// Debug mode; keep all DCHECK*() and DASSERT() asserts.
+#define PW_ASSERT_ENABLE_DEBUG 1
+#endif  // defined (NDEBUG)
+#endif  // !defined(PW_ASSERT_ENABLE_DEBUG)
diff --git a/pw_assert_basic/BUILD.gn b/pw_assert_basic/BUILD.gn
index 12ddf2e..a43c95c 100644
--- a/pw_assert_basic/BUILD.gn
+++ b/pw_assert_basic/BUILD.gn
@@ -12,11 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -32,7 +32,6 @@
   ]
   deps = [ ":core" ]
   public = [ "public_overrides/pw_assert_backend/assert_backend.h" ]
-  sources = public
 }
 
 pw_source_set("core") {
@@ -44,7 +43,7 @@
     "$dir_pw_sys_io",
   ]
   public = [ "public/pw_assert_basic/assert_basic.h" ]
-  sources = public + [ "assert_basic.cc" ]
+  sources = [ "assert_basic.cc" ]
 }
 
 pw_doc_group("docs") {
diff --git a/pw_assert_basic/CMakeLists.txt b/pw_assert_basic/CMakeLists.txt
index 7e6c46a..ec2121e 100644
--- a/pw_assert_basic/CMakeLists.txt
+++ b/pw_assert_basic/CMakeLists.txt
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_auto_add_simple_module(pw_assert_basic
   IMPLEMENTS_FACADE
     pw_assert
@@ -20,9 +22,3 @@
     pw_string
     pw_sys_io
 )
-
-target_include_directories(pw_assert_basic PUBLIC public_overrides)
-
-# TODO(hepler): Declare pw_assert_basic as the pw_assert backend for now.
-add_library(pw_assert.backend INTERFACE)
-target_link_libraries(pw_assert.backend INTERFACE pw_assert_basic)
diff --git a/pw_assert_basic/assert_basic.cc b/pw_assert_basic/assert_basic.cc
index 837ce52..e6a64f5 100644
--- a/pw_assert_basic/assert_basic.cc
+++ b/pw_assert_basic/assert_basic.cc
@@ -21,6 +21,7 @@
 
 #include <cstring>
 
+#include "pw_assert/options.h"
 #include "pw_preprocessor/util.h"
 #include "pw_string/string_builder.h"
 #include "pw_sys_io/sys_io.h"
@@ -155,3 +156,11 @@
     WriteLine("");
   }
 }
+
+extern "C" void pw_assert_HandleFailure(void) {
+#if PW_ASSERT_ENABLE_DEBUG
+  pw_Crash("", 0, "", "Crash: PW_ASSERT() or PW_DASSERT() failure");
+#else
+  pw_Crash("", 0, "", "Crash: PW_ASSERT() failure. Note: PW_DASSERT disabled");
+#endif  // PW_ASSERT_ENABLE_DEBUG
+}
diff --git a/pw_assert_basic/docs.rst b/pw_assert_basic/docs.rst
index bc649d2..45ff8f7 100644
--- a/pw_assert_basic/docs.rst
+++ b/pw_assert_basic/docs.rst
@@ -1,8 +1,4 @@
-.. default-domain:: cpp
-
-.. highlight:: cpp
-
-.. _chapter-pw-assert-basic:
+.. _module-pw_assert_basic:
 
 ===============
 pw_assert_basic
diff --git a/pw_assert_log/BUILD b/pw_assert_log/BUILD
new file mode 100644
index 0000000..25768d1
--- /dev/null
+++ b/pw_assert_log/BUILD
@@ -0,0 +1,49 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_library",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])  # Apache License 2.0
+
+pw_cc_library(
+    name = "headers",
+    hdrs = [
+        "public/pw_assert_log/assert_log.h",
+        "public_overrides/pw_assert_backend/assert_backend.h",
+    ],
+    includes = [
+        "public",
+        "public_overrides",
+    ],
+    srcs = [
+        "assert_log.cc",
+    ],
+    deps = [
+        "//pw_preprocessor",
+    ],
+)
+
+pw_cc_library(
+    name = "pw_assert_log",
+    deps = [
+        ":headers",
+        "//pw_assert:facade",
+        "//pw_preprocessor",
+    ],
+)
diff --git a/pw_assert_log/BUILD.gn b/pw_assert_log/BUILD.gn
new file mode 100644
index 0000000..083c5e8
--- /dev/null
+++ b/pw_assert_log/BUILD.gn
@@ -0,0 +1,51 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
+
+config("default_config") {
+  include_dirs = [ "public" ]
+}
+
+config("backend_config") {
+  include_dirs = [ "public_overrides" ]
+}
+
+pw_source_set("pw_assert_log") {
+  public_configs = [
+    ":backend_config",
+    ":default_config",
+  ]
+  deps = [ ":core" ]
+  public_deps = [ "$dir_pw_log" ]
+  public = [ "public_overrides/pw_assert_backend/assert_backend.h" ]
+}
+
+pw_source_set("core") {
+  public_configs = [ ":default_config" ]
+  public_deps = [ "$dir_pw_log" ]
+  deps = [
+    "$dir_pw_assert:facade",
+    "$dir_pw_preprocessor",
+  ]
+  public = [ "public/pw_assert_log/assert_log.h" ]
+  sources = [ "assert_log.cc" ]
+}
+
+pw_doc_group("docs") {
+  sources = [ "docs.rst" ]
+}
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_assert_log/CMakeLists.txt
similarity index 75%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_assert_log/CMakeLists.txt
index 3c3be32..96776e0 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_assert_log/CMakeLists.txt
@@ -12,8 +12,13 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
+pw_auto_add_simple_module(pw_assert_log
+  IMPLEMENTS_FACADE
+    pw_assert
+  PUBLIC_DEPS
+    pw_log
+  PRIVATE_DEPS
+    pw_preprocessor
+)
diff --git a/pw_assert_log/assert_log.cc b/pw_assert_log/assert_log.cc
new file mode 100644
index 0000000..9d8b9ba
--- /dev/null
+++ b/pw_assert_log/assert_log.cc
@@ -0,0 +1,30 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_assert_log/assert_log.h"
+
+#include "pw_assert/options.h"
+
+extern "C" void pw_assert_HandleFailure(void) {
+#if PW_ASSERT_ENABLE_DEBUG
+  PW_LOG(PW_LOG_LEVEL_CRITICAL,
+         PW_LOG_ASSERT_FAILED_FLAG,
+         "Crash: PW_ASSERT() or PW_DASSERT() failure");
+#else
+  PW_LOG(PW_LOG_LEVEL_CRITICAL,
+         PW_LOG_ASSERT_FAILED_FLAG,
+         "Crash: PW_ASSERT() failure. Note: PW_DASSERT disabled");
+#endif  // PW_ASSERT_ENABLE_DEBUG
+  PW_UNREACHABLE;
+}
diff --git a/pw_assert_log/docs.rst b/pw_assert_log/docs.rst
new file mode 100644
index 0000000..012e1c8
--- /dev/null
+++ b/pw_assert_log/docs.rst
@@ -0,0 +1,19 @@
+.. _module-pw_assert_log:
+
+=============
+pw_assert_log
+=============
+
+--------
+Overview
+--------
+This assert backend implements the ``pw_assert`` facade, by routing the assert
+message into the logger with the ``PW_LOG_ASSERT_FAILED`` flag set. This is an
+easy way to tokenize your assert messages, by using the ``pw_log_tokenized``
+log backend for logging, then using ``pw_assert_log`` to route the tokenized
+messages into the tokenized log handler.
+
+To use this module:
+
+1. Set your assert backend: ``pw_assert_BACKEND = dir_pw_assert_log``
+2. Ensure your logging backend knows how to handle the assert failure flag
diff --git a/pw_assert_log/public/pw_assert_log/assert_log.h b/pw_assert_log/public/pw_assert_log/assert_log.h
new file mode 100644
index 0000000..d26d339
--- /dev/null
+++ b/pw_assert_log/public/pw_assert_log/assert_log.h
@@ -0,0 +1,77 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_log/levels.h"
+#include "pw_log/log.h"
+#include "pw_log/options.h"
+#include "pw_preprocessor/compiler.h"
+#include "pw_preprocessor/util.h"
+
+// Use the highest available log flag to indicate an assert failure.
+#define PW_LOG_ASSERT_FAILED_FLAG (1u << (PW_LOG_FLAG_BITS - 1u))
+
+// Die with a message with several attributes included. This crash frontend
+// funnels everything into the logger, which must then handle the true crash
+// behaviour.
+#define PW_HANDLE_CRASH(message, ...)       \
+  do {                                      \
+    PW_LOG(PW_LOG_LEVEL_CRITICAL,           \
+           PW_LOG_ASSERT_FAILED_FLAG,       \
+           __FILE__ ":%d: Crash: " message, \
+           __LINE__,                        \
+           __VA_ARGS__);                    \
+    PW_UNREACHABLE;                         \
+  } while (0)
+
+// Die with a message with several attributes included. This assert frontend
+// funnels everything into the logger, which is responsible for displaying the
+// log, then crashing/rebooting the device.
+#define PW_HANDLE_ASSERT_FAILURE(condition_string, message, ...)         \
+  do {                                                                   \
+    PW_LOG(PW_LOG_LEVEL_CRITICAL,                                        \
+           PW_LOG_ASSERT_FAILED_FLAG,                                    \
+           __FILE__ ":%d: Check failed: " condition_string ". " message, \
+           __LINE__,                                                     \
+           __VA_ARGS__);                                                 \
+    PW_UNREACHABLE;                                                      \
+  } while (0)
+
+// Sample assert failure message produced by the below implementation:
+//
+//   foo.cc:25: Check failed: old_x (=610) < new_x (=50). Details: foo=10, bar.
+//
+// Putting the value next to the operand makes the string easier to read.
+
+// clang-format off
+// This is too hairy for clang format to handle and retain readability.
+#define PW_HANDLE_ASSERT_BINARY_COMPARE_FAILURE(arg_a_str,                \
+                                                arg_a_val,                \
+                                                comparison_op_str,        \
+                                                arg_b_str,                \
+                                                arg_b_val,                \
+                                                type_fmt,                 \
+                                                message, ...)             \
+  do {                                                                    \
+    PW_LOG(PW_LOG_LEVEL_CRITICAL,                                         \
+           PW_LOG_ASSERT_FAILED_FLAG,                                     \
+           __FILE__ ":%d: Check failed: "                                 \
+                 arg_a_str " (=" type_fmt ") "                            \
+                 comparison_op_str " "                                    \
+                 arg_b_str " (=" type_fmt ")"                             \
+                 ". " message,                                            \
+              __LINE__, arg_a_val, arg_b_val, __VA_ARGS__);               \
+    PW_UNREACHABLE;                                                       \
+  } while(0)
+// clang-format on
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_assert_log/public_overrides/pw_assert_backend/assert_backend.h
similarity index 68%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to pw_assert_log/public_overrides/pw_assert_backend/assert_backend.h
index 1670b7d..376f51e 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_assert_log/public_overrides/pw_assert_backend/assert_backend.h
@@ -12,6 +12,9 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-#include "pw_boot_armv7m/boot.h"
+// This override header merely points to the true backend, in this case the
+// basic one. The reason to redirect is to permit the use of multiple backends
+// (though only pw_assert/assert.h can only point to 1 backend).
+#pragma once
 
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+#include "pw_assert_log/assert_log.h"
diff --git a/pw_base64/BUILD.gn b/pw_base64/BUILD.gn
index 6fdc1dc..006b427 100644
--- a/pw_base64/BUILD.gn
+++ b/pw_base64/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -26,7 +26,6 @@
   public_configs = [ ":default_config" ]
   public = [ "public/pw_base64/base64.h" ]
   sources = [ "base64.cc" ]
-  sources += public
   public_deps = [ "$dir_pw_span" ]
 }
 
diff --git a/pw_base64/CMakeLists.txt b/pw_base64/CMakeLists.txt
index 95e63a9..69f99f8 100644
--- a/pw_base64/CMakeLists.txt
+++ b/pw_base64/CMakeLists.txt
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_auto_add_simple_module(pw_base64
   PUBLIC_DEPS
     pw_span
diff --git a/pw_base64/docs.rst b/pw_base64/docs.rst
index 3872e3e..9013341 100644
--- a/pw_base64/docs.rst
+++ b/pw_base64/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-base64:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_base64:
 
 ---------
 pw_base64
diff --git a/pw_bloat/BUILD.gn b/pw_bloat/BUILD.gn
index 8011fc2..9c6d123 100644
--- a/pw_bloat/BUILD.gn
+++ b/pw_bloat/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("bloat.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -27,10 +27,10 @@
 pw_source_set("bloat_this_binary") {
   public_configs = [ ":default_config" ]
   public = [ "public/pw_bloat/bloat_this_binary.h" ]
-  sources = [ "bloat_this_binary.cc" ] + public
+  sources = [ "bloat_this_binary.cc" ]
 }
 
-source_set("base_main") {
+pw_source_set("base_main") {
   sources = [ "base_main.cc" ]
 }
 
diff --git a/pw_bloat/bloat.gni b/pw_bloat/bloat.gni
index 14e64a4..9acfd05 100644
--- a/pw_bloat/bloat.gni
+++ b/pw_bloat/bloat.gni
@@ -12,10 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
-import("$dir_pw_build/python_script.gni")
+import("$dir_pw_build/python_action.gni")
+
 declare_args() {
   # Path to the Bloaty configuration file that defines the memory layout and
   # capacities for the target binaries.
@@ -113,9 +113,9 @@
 
       # Allow each binary to override the global bloaty config.
       if (defined(binary.bloaty_config)) {
-        _bloaty_configs += [ get_path_info(binary.bloaty_config, "abspath") ]
+        _bloaty_configs += [ rebase_path(binary.bloaty_config) ]
       } else {
-        _bloaty_configs += [ get_path_info(pw_bloat_BLOATY_CONFIG, "abspath") ]
+        _bloaty_configs += [ rebase_path(pw_bloat_BLOATY_CONFIG) ]
       }
 
       _binary_path += ";" + "<TARGET_FILE($_binary_base)>"
@@ -128,7 +128,7 @@
       "--bloaty-config",
       string_join(";", _bloaty_configs),
       "--out-dir",
-      target_gen_dir,
+      rebase_path(target_gen_dir),
       "--target",
       target_name,
       "--title",
@@ -150,19 +150,17 @@
 
     _doc_rst_output = "$target_gen_dir/${target_name}"
 
-    # TODO(frolv): Size reports are temporarily disabled pending the toolchain
-    # refactor.
-    if (true || host_os == "win") {
+    if (host_os == "win") {
       # Bloaty is not yet packaged for Windows systems; display a message
       # indicating this.
       not_needed("*")
       not_needed(invoker, "*")
 
-      pw_python_script(target_name) {
+      pw_python_action(target_name) {
         metadata = {
           pw_doc_sources = rebase_path([ _doc_rst_output ], root_build_dir)
         }
-        script = "$dir_pw_bloat/py/no_bloaty.py"
+        script = "$dir_pw_bloat/py/pw_bloat/no_bloaty.py"
         args = [ rebase_path(_doc_rst_output) ]
         outputs = [ _doc_rst_output ]
       }
@@ -173,20 +171,17 @@
     } else {
       # Create an action which runs the size report script on the provided
       # targets.
-      pw_python_script(target_name) {
+      pw_python_action(target_name) {
         metadata = {
           pw_doc_sources = rebase_path([ _doc_rst_output ], root_build_dir)
         }
-        script = "$dir_pw_bloat/py/bloat.py"
-        inputs = [
-                   "$dir_pw_bloat/py/binary_diff.py",
-                   "$dir_pw_bloat/py/bloat_output.py",
-                 ] + _bloaty_configs
+        script = "$dir_pw_bloat/py/pw_bloat/bloat.py"
+        inputs = _bloaty_configs
         outputs = [
           "$target_gen_dir/${target_name}.txt",
           _doc_rst_output,
         ]
-        deps = _all_target_dependencies
+        deps = _all_target_dependencies + [ "$dir_pw_bloat/py" ]
         args = _bloat_script_args + _binary_paths
 
         # Print size reports to stdout when they are generated.
@@ -321,11 +316,11 @@
     not_needed(invoker, "*")
 
     _doc_rst_output = "$target_gen_dir/$target_name"
-    pw_python_script(target_name) {
+    pw_python_action(target_name) {
       metadata = {
         pw_doc_sources = rebase_path([ _doc_rst_output ], root_build_dir)
       }
-      script = "$dir_pw_bloat/py/no_toolchains.py"
+      script = "$dir_pw_bloat/py/pw_bloat/no_toolchains.py"
       args = [ rebase_path(_doc_rst_output) ]
       outputs = [ _doc_rst_output ]
     }
diff --git a/pw_bloat/docs.rst b/pw_bloat/docs.rst
index bbbbb48..8c00345 100644
--- a/pw_bloat/docs.rst
+++ b/pw_bloat/docs.rst
@@ -1,8 +1,4 @@
-.. default-domain:: cpp
-
-.. highlight:: sh
-
-.. _chapter-bloat:
+.. _module-pw_bloat:
 
 --------
 pw_bloat
diff --git a/pw_bloat/examples/BUILD.gn b/pw_bloat/examples/BUILD.gn
index 3d59c1f..fc4fcc2 100644
--- a/pw_bloat/examples/BUILD.gn
+++ b/pw_bloat/examples/BUILD.gn
@@ -12,10 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("../bloat.gni")
+
 pw_toolchain_size_report("simple_bloat_loop") {
   base_executable = {
     sources = [ "simple_base.cc" ]
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_bloat/py/BUILD.gn
similarity index 63%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_bloat/py/BUILD.gn
index 3c3be32..74f574f 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_bloat/py/BUILD.gn
@@ -12,8 +12,18 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_bloat/__init__.py",
+    "pw_bloat/binary_diff.py",
+    "pw_bloat/bloat.py",
+    "pw_bloat/bloat_output.py",
+    "pw_bloat/no_bloaty.py",
+    "pw_bloat/no_toolchains.py",
+  ]
 }
diff --git a/pw_bloat/py/pw_bloat/__init__.py b/pw_bloat/py/pw_bloat/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_bloat/py/pw_bloat/__init__.py
diff --git a/pw_bloat/py/binary_diff.py b/pw_bloat/py/pw_bloat/binary_diff.py
similarity index 100%
rename from pw_bloat/py/binary_diff.py
rename to pw_bloat/py/pw_bloat/binary_diff.py
diff --git a/pw_bloat/py/bloat.py b/pw_bloat/py/pw_bloat/bloat.py
similarity index 98%
rename from pw_bloat/py/bloat.py
rename to pw_bloat/py/pw_bloat/bloat.py
index 8302fcf..c3e145b 100755
--- a/pw_bloat/py/bloat.py
+++ b/pw_bloat/py/pw_bloat/bloat.py
@@ -23,8 +23,8 @@
 
 from typing import List, Iterable, Optional
 
-from binary_diff import BinaryDiff
-import bloat_output
+from pw_bloat.binary_diff import BinaryDiff
+from pw_bloat import bloat_output
 
 import pw_cli.log
 
diff --git a/pw_bloat/py/bloat_output.py b/pw_bloat/py/pw_bloat/bloat_output.py
similarity index 97%
rename from pw_bloat/py/bloat_output.py
rename to pw_bloat/py/pw_bloat/bloat_output.py
index 569cb1f..d203717 100644
--- a/pw_bloat/py/bloat_output.py
+++ b/pw_bloat/py/pw_bloat/bloat_output.py
@@ -15,10 +15,10 @@
 
 import abc
 import enum
-from typing import Callable, Collection, Dict, List, Optional, Tuple, Type
-from typing import Union
+from typing import (Callable, Collection, Dict, List, Optional, Tuple, Type,
+                    Union)
 
-from binary_diff import BinaryDiff, FormattedDiff
+from pw_bloat.binary_diff import BinaryDiff, FormattedDiff
 
 
 class Output(abc.ABC):
diff --git a/pw_bloat/py/no_bloaty.py b/pw_bloat/py/pw_bloat/no_bloaty.py
similarity index 100%
rename from pw_bloat/py/no_bloaty.py
rename to pw_bloat/py/pw_bloat/no_bloaty.py
diff --git a/pw_bloat/py/no_toolchains.py b/pw_bloat/py/pw_bloat/no_toolchains.py
similarity index 100%
rename from pw_bloat/py/no_toolchains.py
rename to pw_bloat/py/pw_bloat/no_toolchains.py
diff --git a/pw_bloat/py/pw_bloat/py.typed b/pw_bloat/py/pw_bloat/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_bloat/py/pw_bloat/py.typed
diff --git a/pw_bloat/py/setup.py b/pw_bloat/py/setup.py
index 8827b83..73410da 100644
--- a/pw_bloat/py/setup.py
+++ b/pw_bloat/py/setup.py
@@ -13,7 +13,7 @@
 # the License.
 """pw_bloat"""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='pw_bloat',
@@ -22,4 +22,6 @@
     author_email='pigweed-developers@googlegroups.com',
     description='Tools for generating binary size report cards',
     packages=setuptools.find_packages(),
+    package_data={'pw_bloat': ['py.typed']},
+    zip_safe=False,
 )
diff --git a/pw_blob_store/BUILD b/pw_blob_store/BUILD
index e45ee1c..0ff8d86 100644
--- a/pw_blob_store/BUILD
+++ b/pw_blob_store/BUILD
@@ -24,6 +24,7 @@
 
 pw_cc_library(
     name = "pw_blob_store",
+    srcs = [ "blob_store.cc" ],
     hdrs = [
         "public/pw_blob_store/blob_store.h",
     ],
@@ -32,8 +33,54 @@
         "//pw_checksum",
         "//pw_containers",
         "//pw_log",
-        "//pw_log:facade",
         "//pw_span",
         "//pw_status",
     ],
 )
+
+pw_cc_test(
+    name = "blob_store_test",
+    srcs = [
+        "blob_store_test.cc",
+    ],
+    deps = [
+        ":pw_blob_store",
+        "//pw_kvs:crc16",
+        "//pw_kvs:fake_flash",
+        "//pw_kvs:fake_flash_test_key_value_store",
+        "//pw_log",
+        "//pw_random",
+        "//pw_unit_test",
+    ],
+)
+
+pw_cc_test(
+    name = "blob_store_chunk_write_test",
+    srcs = [
+        "blob_store_chunk_write_test.cc",
+    ],
+    deps = [
+        ":pw_blob_store",
+        "//pw_kvs:crc16",
+        "//pw_kvs:fake_flash",
+        "//pw_kvs:fake_flash_test_key_value_store",
+        "//pw_log",
+        "//pw_random",
+        "//pw_unit_test",
+    ],
+)
+pw_cc_test(
+    name = "blob_store_deferred_write_test",
+    srcs = [
+        "blob_store_deferred_write_test.cc",
+    ],
+    deps = [
+        ":pw_blob_store",
+        "//pw_kvs:crc16",
+        "//pw_kvs:fake_flash",
+        "//pw_kvs:fake_flash_test_key_value_store",
+        "//pw_log",
+        "//pw_random",
+        "//pw_unit_test",
+    ],
+)
diff --git a/pw_blob_store/BUILD.gn b/pw_blob_store/BUILD.gn
index 3022b97..e6c9311 100644
--- a/pw_blob_store/BUILD.gn
+++ b/pw_blob_store/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -25,19 +25,61 @@
 pw_source_set("pw_blob_store") {
   public_configs = [ ":default_config" ]
   public = [ "public/pw_blob_store/blob_store.h" ]
+  sources = [ "blob_store.cc" ]
   public_deps = [
     dir_pw_kvs,
     dir_pw_status,
     dir_pw_stream,
   ]
+  deps = [
+    dir_pw_assert,
+    dir_pw_checksum,
+    dir_pw_log,
+  ]
 }
 
 pw_test_group("tests") {
-  tests = [ ":blob_store_test" ]
+  tests = [
+    ":blob_store_test",
+    ":blob_store_deferred_write_test",
+    ":blob_store_chunk_write_test",
+  ]
 }
 
 pw_test("blob_store_test") {
-  deps = [ ":pw_blob_store" ]
+  deps = [
+    ":pw_blob_store",
+    "$dir_pw_kvs:crc16",
+    "$dir_pw_kvs:fake_flash",
+    "$dir_pw_kvs:fake_flash_test_key_value_store",
+    dir_pw_log,
+    dir_pw_random,
+  ]
+  sources = [ "blob_store_test.cc" ]
+}
+
+pw_test("blob_store_chunk_write_test") {
+  deps = [
+    ":pw_blob_store",
+    "$dir_pw_kvs:crc16",
+    "$dir_pw_kvs:fake_flash",
+    "$dir_pw_kvs:fake_flash_test_key_value_store",
+    dir_pw_log,
+    dir_pw_random,
+  ]
+  sources = [ "blob_store_chunk_write_test.cc" ]
+}
+
+pw_test("blob_store_deferred_write_test") {
+  deps = [
+    ":pw_blob_store",
+    "$dir_pw_kvs:crc16",
+    "$dir_pw_kvs:fake_flash",
+    "$dir_pw_kvs:fake_flash_test_key_value_store",
+    dir_pw_log,
+    dir_pw_random,
+  ]
+  sources = [ "blob_store_deferred_write_test.cc" ]
 }
 
 pw_doc_group("docs") {
diff --git a/pw_blob_store/CMakeLists.txt b/pw_blob_store/CMakeLists.txt
index 181eae4..8041124 100644
--- a/pw_blob_store/CMakeLists.txt
+++ b/pw_blob_store/CMakeLists.txt
@@ -12,14 +12,19 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_auto_add_simple_module(pw_blob_store
   PUBLIC_DEPS
     pw_containers
+    pw_kvs
     pw_span
     pw_status
+    pw_stream
   PRIVATE_DEPS
     pw_assert
     pw_checksum
     pw_log
+    pw_random
     pw_string
 )
diff --git a/pw_blob_store/blob_store.cc b/pw_blob_store/blob_store.cc
new file mode 100644
index 0000000..27398cc
--- /dev/null
+++ b/pw_blob_store/blob_store.cc
@@ -0,0 +1,532 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_blob_store/blob_store.h"
+
+#include <algorithm>
+
+#include "pw_log/log.h"
+#include "pw_status/try.h"
+
+namespace pw::blob_store {
+
+Status BlobStore::Init() {
+  if (initialized_) {
+    return Status::Ok();
+  }
+
+  PW_LOG_INFO("Init BlobStore");
+
+  const size_t write_buffer_size_alignment =
+      write_buffer_.size_bytes() % partition_.alignment_bytes();
+  PW_CHECK_UINT_EQ((write_buffer_size_alignment), 0);
+  PW_CHECK_UINT_GE(write_buffer_.size_bytes(), partition_.alignment_bytes());
+  PW_CHECK_UINT_LE(write_buffer_.size_bytes(), partition_.sector_size_bytes());
+  PW_CHECK_UINT_GE(write_buffer_.size_bytes(), flash_write_size_bytes_);
+  PW_CHECK_UINT_GE(flash_write_size_bytes_, partition_.alignment_bytes());
+
+  ResetChecksum();
+  initialized_ = true;
+
+  if (LoadMetadata().ok()) {
+    valid_data_ = true;
+    write_address_ = metadata_.data_size_bytes;
+    flash_address_ = metadata_.data_size_bytes;
+
+    PW_LOG_DEBUG("BlobStore init - Have valid blob of %u bytes",
+                 static_cast<unsigned>(write_address_));
+    return Status::Ok();
+  }
+
+  // No saved blob, check for flash being erased.
+  bool erased = false;
+  if (partition_.IsErased(&erased).ok() && erased) {
+    flash_erased_ = true;
+
+    // Blob data is considered valid as soon as the flash is erased. Even though
+    // there are 0 bytes written, they are valid.
+    valid_data_ = true;
+    PW_LOG_DEBUG("BlobStore init - is erased");
+  } else {
+    PW_LOG_DEBUG("BlobStore init - not erased");
+  }
+  return Status::Ok();
+}
+
+Status BlobStore::LoadMetadata() {
+  if (!kvs_.Get(MetadataKey(), &metadata_).ok()) {
+    // If no metadata was read, make sure the metadata is reset.
+    metadata_.reset();
+    return Status::NotFound();
+  }
+
+  if (!ValidateChecksum().ok()) {
+    PW_LOG_ERROR("BlobStore init - Invalidating blob with invalid checksum");
+    Invalidate();
+    return Status::DataLoss();
+  }
+
+  return Status::Ok();
+}
+
+size_t BlobStore::MaxDataSizeBytes() const { return partition_.size_bytes(); }
+
+Status BlobStore::OpenWrite() {
+  if (!initialized_) {
+    return Status::FailedPrecondition();
+  }
+
+  // Writer can only be opened if there are no other writer or readers already
+  // open.
+  if (writer_open_ || readers_open_ != 0) {
+    return Status::Unavailable();
+  }
+
+  PW_LOG_DEBUG("Blob writer open");
+
+  writer_open_ = true;
+
+  Invalidate();
+
+  return Status::Ok();
+}
+
+Status BlobStore::OpenRead() {
+  if (!initialized_) {
+    return Status::FailedPrecondition();
+  }
+
+  // Reader can only be opened if there is no writer open.
+  if (writer_open_) {
+    return Status::Unavailable();
+  }
+
+  if (!ValidToRead()) {
+    PW_LOG_ERROR("Blob reader unable open without valid data");
+    return Status::FailedPrecondition();
+  }
+
+  PW_LOG_DEBUG("Blob reader open");
+
+  readers_open_++;
+  return Status::Ok();
+}
+
+Status BlobStore::CloseWrite() {
+  auto do_close_write = [&]() -> Status {
+    // If not valid to write, there was data loss and the close will result in a
+    // not valid blob. Don't need to flush any write buffered bytes.
+    if (!ValidToWrite()) {
+      return Status::DataLoss();
+    }
+
+    if (write_address_ == 0) {
+      return Status::Ok();
+    }
+
+    PW_LOG_DEBUG(
+        "Blob writer close of %u byte blob, with %u bytes still in write "
+        "buffer",
+        static_cast<unsigned>(write_address_),
+        static_cast<unsigned>(WriteBufferBytesUsed()));
+
+    // Do a Flush of any flash_write_size_bytes_ sized chunks so any remaining
+    // bytes in the write buffer are less than flash_write_size_bytes_.
+    PW_TRY(Flush());
+
+    // If any bytes remain in buffer it is because it is a chunk less than
+    // flash_write_size_bytes_. Pad the chunk to flash_write_size_bytes_ and
+    // write it to flash.
+    if (!WriteBufferEmpty()) {
+      PW_TRY(FlushFinalPartialChunk());
+    }
+    PW_DCHECK(WriteBufferEmpty());
+
+    // If things are still good, save the blob metadata.
+    metadata_ = {.checksum = 0, .data_size_bytes = flash_address_};
+    if (checksum_algo_ != nullptr) {
+      ConstByteSpan checksum = checksum_algo_->Finish();
+      std::memcpy(&metadata_.checksum,
+                  checksum.data(),
+                  std::min(checksum.size(), sizeof(metadata_.checksum)));
+    }
+
+    if (!ValidateChecksum().ok()) {
+      Invalidate();
+      return Status::DataLoss();
+    }
+
+    if (!kvs_.Put(MetadataKey(), metadata_).ok()) {
+      return Status::DataLoss();
+    }
+
+    return Status::Ok();
+  };
+
+  const Status status = do_close_write();
+  writer_open_ = false;
+
+  if (!status.ok()) {
+    valid_data_ = false;
+    return Status::DataLoss();
+  }
+  return Status::Ok();
+}
+
+Status BlobStore::CloseRead() {
+  PW_CHECK_UINT_GT(readers_open_, 0);
+  readers_open_--;
+  PW_LOG_DEBUG("Blob reader close");
+  return Status::Ok();
+}
+
+Status BlobStore::Write(ConstByteSpan data) {
+  if (!ValidToWrite()) {
+    return Status::DataLoss();
+  }
+  if (data.size_bytes() == 0) {
+    return Status::Ok();
+  }
+  if (WriteBytesRemaining() == 0) {
+    return Status::OutOfRange();
+  }
+  if (WriteBytesRemaining() < data.size_bytes()) {
+    return Status::ResourceExhausted();
+  }
+
+  if (!EraseIfNeeded().ok()) {
+    return Status::DataLoss();
+  }
+
+  // Write in (up to) 3 steps:
+  // 1) Finish filling write buffer and if full write it to flash.
+  // 2) Write as many whole block-sized chunks as the data has remaining
+  //    after 1.
+  // 3) Put any remaining bytes less than flash write size in the write buffer.
+
+  // Step 1) If there is any data in the write buffer, finish filling write
+  //         buffer and if full write it to flash.
+  if (!WriteBufferEmpty()) {
+    size_t bytes_in_buffer = WriteBufferBytesUsed();
+
+    // Non-deferred writes only use the first flash_write_size_bytes_ of the
+    // write buffer to buffer writes less than flash_write_size_bytes_.
+    PW_CHECK_UINT_GT(flash_write_size_bytes_, bytes_in_buffer);
+
+    // Not using WriteBufferBytesFree() because non-deferred writes (which
+    // is this method) only use the first flash_write_size_bytes_ of the write
+    // buffer.
+    size_t buffer_remaining = flash_write_size_bytes_ - bytes_in_buffer;
+
+    // Add bytes up to filling the flash write size.
+    size_t add_bytes = std::min(buffer_remaining, data.size_bytes());
+    std::memcpy(write_buffer_.data() + bytes_in_buffer, data.data(), add_bytes);
+    write_address_ += add_bytes;
+    bytes_in_buffer += add_bytes;
+    data = data.subspan(add_bytes);
+
+    if (bytes_in_buffer != flash_write_size_bytes_) {
+      // If there was not enough bytes to finish filling the write buffer, there
+      // should not be any bytes left.
+      PW_DCHECK(data.size_bytes() == 0);
+      return Status::Ok();
+    }
+
+    // The write buffer is full, flush to flash.
+    if (!CommitToFlash(write_buffer_).ok()) {
+      return Status::DataLoss();
+    }
+
+    PW_DCHECK(WriteBufferEmpty());
+  }
+
+  // At this point, if data.size_bytes() > 0, the write buffer should be empty.
+  // This invariant is checked as part of of steps 2 & 3.
+
+  // Step 2) Write as many block-sized chunks as the data has remaining after
+  //         step 1.
+  while (data.size_bytes() >= flash_write_size_bytes_) {
+    PW_DCHECK(WriteBufferEmpty());
+
+    write_address_ += flash_write_size_bytes_;
+    if (!CommitToFlash(data.first(flash_write_size_bytes_)).ok()) {
+      return Status::DataLoss();
+    }
+
+    data = data.subspan(flash_write_size_bytes_);
+  }
+
+  // step 3) Put any remaining bytes to the buffer. Put the bytes starting at
+  //         the begining of the buffer, since it must be empty if there are
+  //         still bytes due to step 1 either cleaned out the buffer or didn't
+  //         have any more data to write.
+  if (data.size_bytes() > 0) {
+    PW_DCHECK(WriteBufferEmpty());
+    std::memcpy(write_buffer_.data(), data.data(), data.size_bytes());
+    write_address_ += data.size_bytes();
+  }
+
+  return Status::Ok();
+}
+
+Status BlobStore::AddToWriteBuffer(ConstByteSpan data) {
+  if (!ValidToWrite()) {
+    return Status::DataLoss();
+  }
+  if (WriteBytesRemaining() == 0) {
+    return Status::OutOfRange();
+  }
+  if (WriteBufferBytesFree() < data.size_bytes()) {
+    return Status::ResourceExhausted();
+  }
+
+  size_t bytes_in_buffer = WriteBufferBytesUsed();
+
+  std::memcpy(
+      write_buffer_.data() + bytes_in_buffer, data.data(), data.size_bytes());
+  write_address_ += data.size_bytes();
+
+  return Status::Ok();
+}
+
+Status BlobStore::Flush() {
+  if (!ValidToWrite()) {
+    return Status::DataLoss();
+  }
+  if (WriteBufferBytesUsed() == 0) {
+    return Status::Ok();
+  }
+  // Don't need to check available space, AddToWriteBuffer() will not enqueue
+  // more than can be written to flash.
+
+  if (!EraseIfNeeded().ok()) {
+    return Status::DataLoss();
+  }
+
+  ByteSpan data = std::span(write_buffer_.data(), WriteBufferBytesUsed());
+  while (data.size_bytes() >= flash_write_size_bytes_) {
+    if (!CommitToFlash(data.first(flash_write_size_bytes_)).ok()) {
+      return Status::DataLoss();
+    }
+
+    data = data.subspan(flash_write_size_bytes_);
+  }
+
+  // Only a multiple of flash_write_size_bytes_ are written in the flush. Any
+  // remainder is held until later for either a flush with
+  // flash_write_size_bytes buffered or the writer is closed.
+  if (!WriteBufferEmpty()) {
+    PW_DCHECK_UINT_EQ(data.size_bytes(), WriteBufferBytesUsed());
+    // For any leftover bytes less than the flash write size, move them to the
+    // start of the bufer.
+    std::memmove(write_buffer_.data(), data.data(), data.size_bytes());
+  } else {
+    PW_DCHECK_UINT_EQ(data.size_bytes(), 0);
+  }
+
+  return Status::Ok();
+}
+
+Status BlobStore::FlushFinalPartialChunk() {
+  size_t bytes_in_buffer = WriteBufferBytesUsed();
+
+  PW_DCHECK_UINT_GT(bytes_in_buffer, 0);
+  PW_DCHECK_UINT_LE(bytes_in_buffer, flash_write_size_bytes_);
+  PW_DCHECK_UINT_LE(flash_write_size_bytes_, WriteBytesRemaining());
+
+  PW_LOG_DEBUG(
+      "  Remainder %u bytes in write buffer to zero-pad to flash write "
+      "size and commit",
+      static_cast<unsigned>(bytes_in_buffer));
+
+  // Zero out the remainder of the buffer.
+  auto zero_span = write_buffer_.subspan(bytes_in_buffer);
+  std::memset(zero_span.data(),
+              static_cast<int>(partition_.erased_memory_content()),
+              zero_span.size_bytes());
+
+  ConstByteSpan remaining_bytes = write_buffer_.first(flash_write_size_bytes_);
+  return CommitToFlash(remaining_bytes, bytes_in_buffer);
+}
+
+Status BlobStore::CommitToFlash(ConstByteSpan source, size_t data_bytes) {
+  if (data_bytes == 0) {
+    data_bytes = source.size_bytes();
+  }
+  flash_erased_ = false;
+  StatusWithSize result = partition_.Write(flash_address_, source);
+  flash_address_ += data_bytes;
+  if (checksum_algo_ != nullptr) {
+    checksum_algo_->Update(source.first(data_bytes));
+  }
+
+  if (!result.status().ok()) {
+    valid_data_ = false;
+  }
+
+  return result.status();
+}
+
+// Needs to be in .cc file since PW_CHECK doesn't like being in .h files.
+size_t BlobStore::WriteBufferBytesUsed() const {
+  PW_CHECK_UINT_GE(write_address_, flash_address_);
+  return write_address_ - flash_address_;
+}
+
+// Needs to be in .cc file since PW_DCHECK doesn't like being in .h files.
+size_t BlobStore::WriteBufferBytesFree() const {
+  PW_DCHECK_UINT_GE(write_buffer_.size_bytes(), WriteBufferBytesUsed());
+  size_t buffer_remaining = write_buffer_.size_bytes() - WriteBufferBytesUsed();
+  return std::min(buffer_remaining, WriteBytesRemaining());
+}
+
+Status BlobStore::EraseIfNeeded() {
+  if (flash_address_ == 0) {
+    // Always just erase. Erase is smart enough to only erase if needed.
+    return Erase();
+  }
+  return Status::Ok();
+}
+
+StatusWithSize BlobStore::Read(size_t offset, ByteSpan dest) const {
+  if (!ValidToRead()) {
+    return StatusWithSize::FailedPrecondition();
+  }
+  if (offset >= ReadableDataBytes()) {
+    return StatusWithSize::OutOfRange();
+  }
+
+  size_t available_bytes = ReadableDataBytes() - offset;
+  size_t read_size = std::min(available_bytes, dest.size_bytes());
+
+  return partition_.Read(offset, dest.first(read_size));
+}
+
+Result<ConstByteSpan> BlobStore::GetMemoryMappedBlob() const {
+  if (!ValidToRead()) {
+    return Status::FailedPrecondition();
+  }
+
+  std::byte* mcu_address = partition_.PartitionAddressToMcuAddress(0);
+  if (mcu_address == nullptr) {
+    return Status::Unimplemented();
+  }
+  return ConstByteSpan(mcu_address, ReadableDataBytes());
+}
+
+size_t BlobStore::ReadableDataBytes() const {
+  // TODO: clean up state related to readable bytes.
+  return flash_address_;
+}
+
+Status BlobStore::Erase() {
+  // If already erased our work here is done.
+  if (flash_erased_) {
+    // The write buffer might already have bytes when this call happens, due to
+    // a deferred write.
+    PW_DCHECK_UINT_LE(write_address_, write_buffer_.size_bytes());
+    PW_DCHECK_UINT_EQ(flash_address_, 0);
+
+    // Erased blobs should be valid as soon as the flash is erased. Even though
+    // there are 0 bytes written, they are valid.
+    PW_DCHECK(valid_data_);
+    return Status::Ok();
+  }
+
+  Invalidate();
+
+  Status status = partition_.Erase();
+
+  if (status.ok()) {
+    flash_erased_ = true;
+
+    // Blob data is considered valid as soon as the flash is erased. Even though
+    // there are 0 bytes written, they are valid.
+    valid_data_ = true;
+  }
+  return status;
+}
+
+Status BlobStore::Invalidate() {
+  metadata_.reset();
+
+  // Blob data is considered if the flash is erased. Even though
+  // there are 0 bytes written, they are valid.
+  valid_data_ = flash_erased_;
+  ResetChecksum();
+  write_address_ = 0;
+  flash_address_ = 0;
+
+  Status status = kvs_.Delete(MetadataKey());
+
+  return (status == Status::Ok() || status == Status::NotFound())
+             ? Status::Ok()
+             : Status::Internal();
+}
+
+Status BlobStore::ValidateChecksum() {
+  if (metadata_.data_size_bytes == 0) {
+    PW_LOG_INFO("Blob unable to validate checksum of an empty blob");
+    return Status::Unavailable();
+  }
+
+  if (checksum_algo_ == nullptr) {
+    if (metadata_.checksum != 0) {
+      PW_LOG_ERROR(
+          "Blob invalid to have a checkum value with no checksum algo");
+      return Status::DataLoss();
+    }
+
+    return Status::Ok();
+  }
+
+  PW_LOG_DEBUG("Validate checksum of 0x%08x in flash for blob of %u bytes",
+               static_cast<unsigned>(metadata_.checksum),
+               static_cast<unsigned>(metadata_.data_size_bytes));
+  PW_TRY(CalculateChecksumFromFlash(metadata_.data_size_bytes));
+
+  Status status =
+      checksum_algo_->Verify(as_bytes(std::span(&metadata_.checksum, 1)));
+  PW_LOG_DEBUG("  checksum verify of %s", status.str());
+
+  return status;
+}
+
+Status BlobStore::CalculateChecksumFromFlash(size_t bytes_to_check) {
+  if (checksum_algo_ == nullptr) {
+    return Status::Ok();
+  }
+
+  checksum_algo_->Reset();
+
+  kvs::FlashPartition::Address address = 0;
+  const kvs::FlashPartition::Address end = bytes_to_check;
+
+  constexpr size_t kReadBufferSizeBytes = 32;
+  std::array<std::byte, kReadBufferSizeBytes> buffer;
+  while (address < end) {
+    const size_t read_size = std::min(size_t(end - address), buffer.size());
+    PW_TRY(partition_.Read(address, std::span(buffer).first(read_size)));
+
+    checksum_algo_->Update(buffer.data(), read_size);
+    address += read_size;
+  }
+
+  // Safe to ignore the return from Finish, checksum_algo_ keeps the state
+  // information that it needs.
+  checksum_algo_->Finish();
+  return Status::Ok();
+}
+
+}  // namespace pw::blob_store
diff --git a/pw_blob_store/blob_store_chunk_write_test.cc b/pw_blob_store/blob_store_chunk_write_test.cc
new file mode 100644
index 0000000..cf7219d
--- /dev/null
+++ b/pw_blob_store/blob_store_chunk_write_test.cc
@@ -0,0 +1,171 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <array>
+#include <cstddef>
+#include <cstring>
+#include <span>
+
+#include "gtest/gtest.h"
+#include "pw_blob_store/blob_store.h"
+#include "pw_kvs/crc16_checksum.h"
+#include "pw_kvs/fake_flash_memory.h"
+#include "pw_kvs/flash_memory.h"
+#include "pw_kvs/test_key_value_store.h"
+#include "pw_log/log.h"
+#include "pw_random/xor_shift.h"
+
+namespace pw::blob_store {
+namespace {
+
+class BlobStoreChunkTest : public ::testing::Test {
+ protected:
+  BlobStoreChunkTest() : flash_(kFlashAlignment), partition_(&flash_) {}
+
+  void InitFlashTo(std::span<const std::byte> contents) {
+    partition_.Erase();
+    std::memcpy(flash_.buffer().data(), contents.data(), contents.size());
+  }
+
+  void InitSourceBufferToRandom(uint64_t seed) {
+    partition_.Erase();
+    random::XorShiftStarRng64 rng(seed);
+    rng.Get(source_buffer_);
+  }
+
+  void InitSourceBufferToFill(char fill) {
+    partition_.Erase();
+    std::memset(source_buffer_.data(), fill, source_buffer_.size());
+  }
+
+  // Fill the source buffer with random pattern based on given seed, written to
+  // BlobStore in specified chunk size.
+  void ChunkWriteTest(size_t chunk_size) {
+    constexpr size_t kBufferSize = 256;
+    kvs::ChecksumCrc16 checksum;
+
+    char name[16] = {};
+    snprintf(name, sizeof(name), "Blob%u", static_cast<unsigned>(chunk_size));
+
+    BlobStoreBuffer<kBufferSize> blob(
+        name, partition_, &checksum, kvs::TestKvs());
+    EXPECT_EQ(Status::Ok(), blob.Init());
+
+    BlobStore::BlobWriter writer(blob);
+    EXPECT_EQ(Status::Ok(), writer.Open());
+    EXPECT_EQ(Status::Ok(), writer.Erase());
+
+    ByteSpan source = source_buffer_;
+    while (source.size_bytes() > 0) {
+      const size_t write_size = std::min(source.size_bytes(), chunk_size);
+
+      PW_LOG_DEBUG("Do write of %u bytes, %u bytes remain",
+                   static_cast<unsigned>(write_size),
+                   static_cast<unsigned>(source.size_bytes()));
+
+      ASSERT_EQ(Status::Ok(), writer.Write(source.first(write_size)));
+
+      source = source.subspan(write_size);
+    }
+
+    EXPECT_EQ(Status::Ok(), writer.Close());
+
+    // Use reader to check for valid data.
+    BlobStore::BlobReader reader(blob);
+    ASSERT_EQ(Status::Ok(), reader.Open());
+    Result<ConstByteSpan> result = reader.GetMemoryMappedBlob();
+    ASSERT_TRUE(result.ok());
+    VerifyFlash(result.value());
+    EXPECT_EQ(Status::Ok(), reader.Close());
+  }
+
+  void VerifyFlash(ConstByteSpan verify_bytes) {
+    // Should be defined as same size.
+    EXPECT_EQ(source_buffer_.size(), flash_.buffer().size_bytes());
+
+    // Can't allow it to march off the end of source_buffer_.
+    ASSERT_LE(verify_bytes.size_bytes(), source_buffer_.size());
+
+    for (size_t i = 0; i < verify_bytes.size_bytes(); i++) {
+      EXPECT_EQ(source_buffer_[i], verify_bytes[i]);
+    }
+  }
+
+  static constexpr size_t kFlashAlignment = 16;
+  static constexpr size_t kSectorSize = 2048;
+  static constexpr size_t kSectorCount = 2;
+  static constexpr size_t kBlobDataSize = (kSectorCount * kSectorSize);
+
+  kvs::FakeFlashMemoryBuffer<kSectorSize, kSectorCount> flash_;
+  kvs::FlashPartition partition_;
+  std::array<std::byte, kBlobDataSize> source_buffer_;
+};
+
+TEST_F(BlobStoreChunkTest, ChunkWrite1) {
+  InitSourceBufferToRandom(0x8675309);
+  ChunkWriteTest(1);
+}
+
+TEST_F(BlobStoreChunkTest, ChunkWrite2) {
+  InitSourceBufferToRandom(0x8675);
+  ChunkWriteTest(2);
+}
+
+TEST_F(BlobStoreChunkTest, ChunkWrite3) {
+  InitSourceBufferToFill(0);
+  ChunkWriteTest(3);
+}
+
+TEST_F(BlobStoreChunkTest, ChunkWrite4) {
+  InitSourceBufferToFill(1);
+  ChunkWriteTest(4);
+}
+
+TEST_F(BlobStoreChunkTest, ChunkWrite5) {
+  InitSourceBufferToFill(0xff);
+  ChunkWriteTest(5);
+}
+
+TEST_F(BlobStoreChunkTest, ChunkWrite16) {
+  InitSourceBufferToRandom(0x86);
+  ChunkWriteTest(16);
+}
+
+TEST_F(BlobStoreChunkTest, ChunkWrite64) {
+  InitSourceBufferToRandom(0x9);
+  ChunkWriteTest(64);
+}
+
+TEST_F(BlobStoreChunkTest, ChunkWrite256) {
+  InitSourceBufferToRandom(0x12345678);
+  ChunkWriteTest(256);
+}
+
+TEST_F(BlobStoreChunkTest, ChunkWrite512) {
+  InitSourceBufferToRandom(0x42);
+  ChunkWriteTest(512);
+}
+
+TEST_F(BlobStoreChunkTest, ChunkWrite4096) {
+  InitSourceBufferToRandom(0x89);
+  ChunkWriteTest(4096);
+}
+
+TEST_F(BlobStoreChunkTest, ChunkWriteSingleFull) {
+  InitSourceBufferToRandom(0x98765);
+  ChunkWriteTest(kBlobDataSize);
+}
+
+}  // namespace
+}  // namespace pw::blob_store
diff --git a/pw_blob_store/blob_store_deferred_write_test.cc b/pw_blob_store/blob_store_deferred_write_test.cc
new file mode 100644
index 0000000..71996e1
--- /dev/null
+++ b/pw_blob_store/blob_store_deferred_write_test.cc
@@ -0,0 +1,178 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <array>
+#include <cstddef>
+#include <cstring>
+#include <span>
+
+#include "gtest/gtest.h"
+#include "pw_blob_store/blob_store.h"
+#include "pw_kvs/crc16_checksum.h"
+#include "pw_kvs/fake_flash_memory.h"
+#include "pw_kvs/flash_memory.h"
+#include "pw_kvs/test_key_value_store.h"
+#include "pw_log/log.h"
+#include "pw_random/xor_shift.h"
+
+namespace pw::blob_store {
+namespace {
+
+class DeferredWriteTest : public ::testing::Test {
+ protected:
+  DeferredWriteTest() : flash_(kFlashAlignment), partition_(&flash_) {}
+
+  void InitFlashTo(std::span<const std::byte> contents) {
+    partition_.Erase();
+    std::memcpy(flash_.buffer().data(), contents.data(), contents.size());
+  }
+
+  void InitBufferToRandom(uint64_t seed) {
+    partition_.Erase();
+    random::XorShiftStarRng64 rng(seed);
+    rng.Get(buffer_);
+  }
+
+  void InitBufferToFill(char fill) {
+    partition_.Erase();
+    std::memset(buffer_.data(), fill, buffer_.size());
+  }
+
+  // Fill the source buffer with random pattern based on given seed, written to
+  // BlobStore in specified chunk size.
+  void ChunkWriteTest(size_t chunk_size, size_t flush_interval) {
+    constexpr size_t kBufferSize = 256;
+    constexpr size_t kWriteSize = 64;
+    kvs::ChecksumCrc16 checksum;
+
+    size_t bytes_since_flush = 0;
+
+    char name[16] = {};
+    snprintf(name, sizeof(name), "Blob%u", static_cast<unsigned>(chunk_size));
+
+    BlobStoreBuffer<kBufferSize> blob(
+        name, partition_, &checksum, kvs::TestKvs(), kWriteSize);
+    EXPECT_EQ(Status::Ok(), blob.Init());
+
+    BlobStore::DeferredWriter writer(blob);
+    EXPECT_EQ(Status::Ok(), writer.Open());
+
+    ByteSpan source = buffer_;
+    while (source.size_bytes() > 0) {
+      const size_t write_size = std::min(source.size_bytes(), chunk_size);
+
+      PW_LOG_DEBUG("Do write of %u bytes, %u bytes remain",
+                   static_cast<unsigned>(write_size),
+                   static_cast<unsigned>(source.size_bytes()));
+
+      ASSERT_EQ(Status::Ok(), writer.Write(source.first(write_size)));
+      // TODO: Add check that the write did not go to flash yet.
+
+      source = source.subspan(write_size);
+      bytes_since_flush += write_size;
+
+      if (bytes_since_flush >= flush_interval) {
+        bytes_since_flush = 0;
+        ASSERT_EQ(Status::Ok(), writer.Flush());
+      }
+    }
+
+    EXPECT_EQ(Status::Ok(), writer.Close());
+
+    // Use reader to check for valid data.
+    BlobStore::BlobReader reader(blob);
+    ASSERT_EQ(Status::Ok(), reader.Open());
+    Result<ConstByteSpan> result = reader.GetMemoryMappedBlob();
+    ASSERT_TRUE(result.ok());
+    VerifyFlash(result.value());
+    EXPECT_EQ(Status::Ok(), reader.Close());
+  }
+
+  void VerifyFlash(ConstByteSpan verify_bytes) {
+    // Should be defined as same size.
+    EXPECT_EQ(buffer_.size(), flash_.buffer().size_bytes());
+
+    // Can't allow it to march off the end of buffer_.
+    ASSERT_LE(verify_bytes.size_bytes(), buffer_.size());
+
+    for (size_t i = 0; i < verify_bytes.size_bytes(); i++) {
+      EXPECT_EQ(buffer_[i], verify_bytes[i]);
+    }
+  }
+
+  static constexpr size_t kFlashAlignment = 16;
+  static constexpr size_t kSectorSize = 2048;
+  static constexpr size_t kSectorCount = 2;
+
+  kvs::FakeFlashMemoryBuffer<kSectorSize, kSectorCount> flash_;
+  kvs::FlashPartition partition_;
+  std::array<std::byte, kSectorCount * kSectorSize> buffer_;
+};
+
+TEST_F(DeferredWriteTest, ChunkWrite1) {
+  InitBufferToRandom(0x8675309);
+  ChunkWriteTest(1, 16);
+}
+
+TEST_F(DeferredWriteTest, ChunkWrite2) {
+  InitBufferToRandom(0x8675);
+  ChunkWriteTest(2, 16);
+}
+
+TEST_F(DeferredWriteTest, ChunkWrite3) {
+  InitBufferToFill(0);
+  ChunkWriteTest(3, 16);
+}
+
+TEST_F(DeferredWriteTest, ChunkWrite4) {
+  InitBufferToFill(1);
+  ChunkWriteTest(4, 64);
+}
+
+TEST_F(DeferredWriteTest, ChunkWrite5) {
+  InitBufferToFill(0xff);
+  ChunkWriteTest(5, 64);
+}
+
+TEST_F(DeferredWriteTest, ChunkWrite16) {
+  InitBufferToRandom(0x86);
+  ChunkWriteTest(16, 128);
+}
+
+TEST_F(DeferredWriteTest, ChunkWrite64) {
+  InitBufferToRandom(0x9);
+  ChunkWriteTest(64, 128);
+}
+
+TEST_F(DeferredWriteTest, ChunkWrite64FullBufferFill) {
+  InitBufferToRandom(0x9);
+  ChunkWriteTest(64, 256);
+}
+
+TEST_F(DeferredWriteTest, ChunkWrite256) {
+  InitBufferToRandom(0x12345678);
+  ChunkWriteTest(256, 256);
+}
+
+// TODO: test that has dirty flash, invalidated blob, open writer, invalidate
+// (not erase) and start writing (does the auto/implicit erase).
+
+// TODO: test that has dirty flash, invalidated blob, open writer, explicit
+// erase and start writing.
+
+// TODO: test start with dirty flash/invalid blob, open writer, write, close.
+// Verifies erase logic when write buffer has contents.
+
+}  // namespace
+}  // namespace pw::blob_store
diff --git a/pw_blob_store/blob_store_test.cc b/pw_blob_store/blob_store_test.cc
new file mode 100644
index 0000000..39666b4
--- /dev/null
+++ b/pw_blob_store/blob_store_test.cc
@@ -0,0 +1,316 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_blob_store/blob_store.h"
+
+#include <array>
+#include <cstddef>
+#include <cstring>
+#include <span>
+
+#include "gtest/gtest.h"
+#include "pw_kvs/crc16_checksum.h"
+#include "pw_kvs/fake_flash_memory.h"
+#include "pw_kvs/flash_memory.h"
+#include "pw_kvs/test_key_value_store.h"
+#include "pw_log/log.h"
+#include "pw_random/xor_shift.h"
+
+namespace pw::blob_store {
+namespace {
+
+class BlobStoreTest : public ::testing::Test {
+ protected:
+  BlobStoreTest() : flash_(kFlashAlignment), partition_(&flash_) {}
+
+  void InitFlashTo(std::span<const std::byte> contents) {
+    partition_.Erase();
+    std::memcpy(flash_.buffer().data(), contents.data(), contents.size());
+  }
+
+  void InitSourceBufferToRandom(uint64_t seed,
+                                size_t init_size_bytes = kBlobDataSize) {
+    ASSERT_LE(init_size_bytes, source_buffer_.size());
+    random::XorShiftStarRng64 rng(seed);
+
+    std::memset(source_buffer_.data(),
+                static_cast<int>(flash_.erased_memory_content()),
+                source_buffer_.size());
+    rng.Get(std::span(source_buffer_).first(init_size_bytes));
+  }
+
+  void InitSourceBufferToFill(char fill,
+                              size_t fill_size_bytes = kBlobDataSize) {
+    ASSERT_LE(fill_size_bytes, source_buffer_.size());
+    std::memset(source_buffer_.data(),
+                static_cast<int>(flash_.erased_memory_content()),
+                source_buffer_.size());
+    std::memset(source_buffer_.data(), fill, fill_size_bytes);
+  }
+
+  // Fill the source buffer with random pattern based on given seed, written to
+  // BlobStore in specified chunk size.
+  void WriteTestBlock(size_t write_size_bytes = kBlobDataSize) {
+    ASSERT_LE(write_size_bytes, source_buffer_.size());
+    constexpr size_t kBufferSize = 256;
+    kvs::ChecksumCrc16 checksum;
+
+    ConstByteSpan write_data =
+        std::span(source_buffer_).first(write_size_bytes);
+
+    char name[16] = {};
+    snprintf(name, sizeof(name), "TestBlobBlock");
+
+    BlobStoreBuffer<kBufferSize> blob(
+        name, partition_, &checksum, kvs::TestKvs());
+    EXPECT_EQ(Status::Ok(), blob.Init());
+
+    BlobStore::BlobWriter writer(blob);
+    EXPECT_EQ(Status::OK, writer.Open());
+    ASSERT_EQ(Status::OK, writer.Write(write_data));
+    EXPECT_EQ(Status::OK, writer.Close());
+
+    // Use reader to check for valid data.
+    BlobStore::BlobReader reader(blob);
+    ASSERT_EQ(Status::Ok(), reader.Open());
+    Result<ConstByteSpan> result = reader.GetMemoryMappedBlob();
+    ASSERT_TRUE(result.ok());
+    EXPECT_EQ(write_size_bytes, result.value().size_bytes());
+    VerifyFlash(result.value());
+    VerifyFlash(flash_.buffer());
+    EXPECT_EQ(Status::OK, reader.Close());
+  }
+
+  // Open a new blob instance and read the blob using the given read chunk size.
+  void ChunkReadTest(size_t read_chunk_size) {
+    kvs::ChecksumCrc16 checksum;
+
+    VerifyFlash(flash_.buffer());
+
+    char name[16] = "TestBlobBlock";
+    BlobStoreBuffer<16> blob(name, partition_, &checksum, kvs::TestKvs());
+    EXPECT_EQ(Status::Ok(), blob.Init());
+
+    // Use reader to check for valid data.
+    BlobStore::BlobReader reader1(blob);
+    ASSERT_EQ(Status::Ok(), reader1.Open());
+    Result<ConstByteSpan> possible_blob = reader1.GetMemoryMappedBlob();
+    ASSERT_TRUE(possible_blob.ok());
+    VerifyFlash(possible_blob.value());
+    EXPECT_EQ(Status::Ok(), reader1.Close());
+
+    BlobStore::BlobReader reader(blob);
+    ASSERT_EQ(Status::Ok(), reader.Open());
+
+    std::array<std::byte, kBlobDataSize> read_buffer_;
+
+    ByteSpan read_span = read_buffer_;
+    while (read_span.size_bytes() > 0) {
+      size_t read_size = std::min(read_span.size_bytes(), read_chunk_size);
+
+      PW_LOG_DEBUG("Do write of %u bytes, %u bytes remain",
+                   static_cast<unsigned>(read_size),
+                   static_cast<unsigned>(read_span.size_bytes()));
+
+      ASSERT_EQ(read_span.size_bytes(), reader.ConservativeReadLimit());
+      auto result = reader.Read(read_span.first(read_size));
+      ASSERT_EQ(result.status(), Status::Ok());
+      read_span = read_span.subspan(read_size);
+    }
+    EXPECT_EQ(Status::Ok(), reader.Close());
+
+    VerifyFlash(read_buffer_);
+  }
+
+  void VerifyFlash(ConstByteSpan verify_bytes, size_t offset = 0) {
+    // Should be defined as same size.
+    EXPECT_EQ(source_buffer_.size(), flash_.buffer().size_bytes());
+
+    // Can't allow it to march off the end of source_buffer_.
+    ASSERT_LE((verify_bytes.size_bytes() + offset), source_buffer_.size());
+
+    for (size_t i = 0; i < verify_bytes.size_bytes(); i++) {
+      ASSERT_EQ(source_buffer_[i + offset], verify_bytes[i]);
+    }
+  }
+
+  static constexpr size_t kFlashAlignment = 16;
+  static constexpr size_t kSectorSize = 2048;
+  static constexpr size_t kSectorCount = 2;
+  static constexpr size_t kBlobDataSize = (kSectorCount * kSectorSize);
+
+  kvs::FakeFlashMemoryBuffer<kSectorSize, kSectorCount> flash_;
+  kvs::FlashPartition partition_;
+  std::array<std::byte, kBlobDataSize> source_buffer_;
+};
+
+TEST_F(BlobStoreTest, Init_Ok) {
+  // TODO: Do init test with flash/kvs explicitly in the different possible
+  // entry states.
+  BlobStoreBuffer<256> blob("Blob_OK", partition_, nullptr, kvs::TestKvs());
+  EXPECT_EQ(Status::Ok(), blob.Init());
+}
+
+TEST_F(BlobStoreTest, Discard) {
+  InitSourceBufferToRandom(0x8675309);
+  WriteTestBlock();
+  constexpr char blob_title[] = "TestBlobBlock";
+  std::array<std::byte, 64> tmp_buffer = {};
+
+  kvs::ChecksumCrc16 checksum;
+
+  // TODO: Do this test with flash/kvs in the different entry state
+  // combinations.
+
+  BlobStoreBuffer<256> blob(blob_title, partition_, &checksum, kvs::TestKvs());
+  EXPECT_EQ(Status::OK, blob.Init());
+
+  BlobStore::BlobWriter writer(blob);
+
+  EXPECT_EQ(Status::OK, writer.Open());
+  EXPECT_EQ(Status::OK, writer.Write(tmp_buffer));
+
+  // The write does an implicit erase so there should be no key for this blob.
+  EXPECT_EQ(Status::NOT_FOUND,
+            kvs::TestKvs().Get(blob_title, tmp_buffer).status());
+  EXPECT_EQ(Status::OK, writer.Close());
+
+  EXPECT_EQ(Status::OK, kvs::TestKvs().Get(blob_title, tmp_buffer).status());
+
+  EXPECT_EQ(Status::OK, writer.Open());
+  EXPECT_EQ(Status::OK, writer.Discard());
+  EXPECT_EQ(Status::OK, writer.Close());
+
+  EXPECT_EQ(Status::NOT_FOUND,
+            kvs::TestKvs().Get(blob_title, tmp_buffer).status());
+}
+
+TEST_F(BlobStoreTest, MultipleErase) {
+  BlobStoreBuffer<256> blob("Blob_OK", partition_, nullptr, kvs::TestKvs());
+  EXPECT_EQ(Status::Ok(), blob.Init());
+
+  BlobStore::BlobWriter writer(blob);
+  EXPECT_EQ(Status::Ok(), writer.Open());
+
+  EXPECT_EQ(Status::Ok(), writer.Erase());
+  EXPECT_EQ(Status::Ok(), writer.Erase());
+  EXPECT_EQ(Status::Ok(), writer.Erase());
+}
+
+TEST_F(BlobStoreTest, OffsetRead) {
+  InitSourceBufferToRandom(0x11309);
+  WriteTestBlock();
+
+  constexpr size_t kOffset = 10;
+  ASSERT_LT(kOffset, kBlobDataSize);
+
+  kvs::ChecksumCrc16 checksum;
+
+  char name[16] = "TestBlobBlock";
+  BlobStoreBuffer<16> blob(name, partition_, &checksum, kvs::TestKvs());
+  EXPECT_EQ(Status::Ok(), blob.Init());
+  BlobStore::BlobReader reader(blob);
+  ASSERT_EQ(Status::Ok(), reader.Open(kOffset));
+
+  std::array<std::byte, kBlobDataSize - kOffset> read_buffer_;
+  ByteSpan read_span = read_buffer_;
+  ASSERT_EQ(read_span.size_bytes(), reader.ConservativeReadLimit());
+
+  auto result = reader.Read(read_span);
+  ASSERT_EQ(result.status(), Status::Ok());
+  EXPECT_EQ(Status::Ok(), reader.Close());
+  VerifyFlash(read_buffer_, kOffset);
+}
+
+TEST_F(BlobStoreTest, InvalidReadOffset) {
+  InitSourceBufferToRandom(0x11309);
+  WriteTestBlock();
+
+  constexpr size_t kOffset = kBlobDataSize;
+
+  kvs::ChecksumCrc16 checksum;
+
+  char name[16] = "TestBlobBlock";
+  BlobStoreBuffer<16> blob(name, partition_, &checksum, kvs::TestKvs());
+  EXPECT_EQ(Status::Ok(), blob.Init());
+  BlobStore::BlobReader reader(blob);
+  ASSERT_EQ(Status::InvalidArgument(), reader.Open(kOffset));
+}
+
+TEST_F(BlobStoreTest, ChunkRead1) {
+  InitSourceBufferToRandom(0x8675309);
+  WriteTestBlock();
+  ChunkReadTest(1);
+}
+
+TEST_F(BlobStoreTest, ChunkRead3) {
+  InitSourceBufferToFill(0);
+  WriteTestBlock();
+  ChunkReadTest(3);
+}
+
+TEST_F(BlobStoreTest, ChunkRead4) {
+  InitSourceBufferToFill(1);
+  WriteTestBlock();
+  ChunkReadTest(4);
+}
+
+TEST_F(BlobStoreTest, ChunkRead5) {
+  InitSourceBufferToFill(0xff);
+  WriteTestBlock();
+  ChunkReadTest(5);
+}
+
+TEST_F(BlobStoreTest, ChunkRead16) {
+  InitSourceBufferToRandom(0x86);
+  WriteTestBlock();
+  ChunkReadTest(16);
+}
+
+TEST_F(BlobStoreTest, ChunkRead64) {
+  InitSourceBufferToRandom(0x9);
+  WriteTestBlock();
+  ChunkReadTest(64);
+}
+
+TEST_F(BlobStoreTest, ChunkReadFull) {
+  InitSourceBufferToRandom(0x9);
+  WriteTestBlock();
+  ChunkReadTest(kBlobDataSize);
+}
+
+TEST_F(BlobStoreTest, PartialBufferThenClose) {
+  // Do write of only a partial chunk, which will only have bytes in buffer
+  // (none written to flash) at close.
+  size_t data_bytes = 12;
+  InitSourceBufferToRandom(0x111, data_bytes);
+  WriteTestBlock(data_bytes);
+
+  // Do write with several full chunks and then some partial.
+  data_bytes = 158;
+  InitSourceBufferToRandom(0x3222, data_bytes);
+}
+
+// Test to do write/close, write/close multiple times.
+TEST_F(BlobStoreTest, MultipleWrites) {
+  InitSourceBufferToRandom(0x1121);
+  WriteTestBlock();
+  InitSourceBufferToRandom(0x515);
+  WriteTestBlock();
+  InitSourceBufferToRandom(0x4321);
+  WriteTestBlock();
+}
+
+}  // namespace
+}  // namespace pw::blob_store
diff --git a/pw_blob_store/docs.rst b/pw_blob_store/docs.rst
index e3673b1..a84a22e 100644
--- a/pw_blob_store/docs.rst
+++ b/pw_blob_store/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-blob_store:
-
-.. default-domain:: cpp
-
-.. highlight:: cpp
+.. _module-pw_blob_store:
 
 -------------
 pw_blob_store
diff --git a/pw_blob_store/public/pw_blob_store/blob_store.h b/pw_blob_store/public/pw_blob_store/blob_store.h
index bffafa1..e00eb6f 100644
--- a/pw_blob_store/public/pw_blob_store/blob_store.h
+++ b/pw_blob_store/public/pw_blob_store/blob_store.h
@@ -15,8 +15,10 @@
 
 #include <span>
 
+#include "pw_assert/light.h"
 #include "pw_kvs/checksum.h"
 #include "pw_kvs/flash_memory.h"
+#include "pw_kvs/key_value_store.h"
 #include "pw_status/status.h"
 #include "pw_stream/stream.h"
 
@@ -29,8 +31,8 @@
 //
 // Write and read are only done using the BlobWriter and BlobReader classes.
 //
-// Once a blob write is closed, reopening followed by a Discard(), Write(), or
-// Erase() will discard the previous blob.
+// Once a blob write is closed, reopening to write will discard the previous
+// blob.
 //
 // Write blob:
 //  0) Create BlobWriter instance
@@ -48,79 +50,186 @@
  public:
   // Implement the stream::Writer and erase interface for a BlobStore. If not
   // already erased, the Write will do any needed erase.
-  class BlobWriter final : public stream::Writer {
+  //
+  // Only one writter (of either type) is allowed to be open at a time.
+  // Additionally, writters are unable to open if a reader is already open.
+  class BlobWriter : public stream::Writer {
    public:
-    constexpr BlobWriter(BlobStore& store) : store_(store) {}
+    constexpr BlobWriter(BlobStore& store) : store_(store), open_(false) {}
     BlobWriter(const BlobWriter&) = delete;
     BlobWriter& operator=(const BlobWriter&) = delete;
-    ~BlobWriter() { Close(); }
+    virtual ~BlobWriter() {
+      if (open_) {
+        Close();
+      }
+    }
 
-    // Open a blob for writing/erasing. Returns:
+    // Open a blob for writing/erasing. Open will invalidate any existing blob
+    // that may be stored. Can not open when already open. Only one writer is
+    // allowed to be open at a time. Returns:
     //
     // OK - success.
-    // UNAVAILABLE - Unable to open, already open.
-    Status Open() { return Status::UNIMPLEMENTED; }
+    // UNAVAILABLE - Unable to open, another writer or reader instance is
+    //     already open.
+    Status Open() {
+      PW_DASSERT(!open_);
+      Status status = store_.OpenWrite();
+      if (status.ok()) {
+        open_ = true;
+      }
+      return status;
+    }
 
     // Finalize a blob write. Flush all remaining buffered data to storage and
-    // store blob metadata. Returns:
+    // store blob metadata. Close fails in the closed state, do NOT retry Close
+    // on error. An error may or may not result in an invalid blob stored.
+    // Returns:
     //
     // OK - success.
-    // FAILED_PRECONDITION - blob is not open.
-    Status Close() { return Status::UNIMPLEMENTED; }
+    // DATA_LOSS - Error writing data or fail to verify written data.
+    Status Close() {
+      PW_DASSERT(open_);
+      open_ = false;
+      return store_.CloseWrite();
+    }
 
-    // Erase the partition and reset state for a new blob. Returns:
+    // Erase the blob partition and reset state for a new blob. Explicit calls
+    // to Erase are optional, beginning a write will do any needed Erase.
+    // Returns:
     //
     // OK - success.
-    // UNAVAILABLE - Unable to erase, already open.
+    // UNAVAILABLE - Unable to erase while reader is open.
     // [error status] - flash erase failed.
-    Status Erase() { return Status::UNIMPLEMENTED; }
+    Status Erase() {
+      PW_DASSERT(open_);
+      return store_.Erase();
+    }
 
-    // Discard blob (in-progress or valid stored). Any written bytes are
-    // considered invalid. Returns:
+    // Discard the current blob. Any written bytes to this point are considered
+    // invalid. Returns:
     //
     // OK - success.
     // FAILED_PRECONDITION - not open.
-    Status Discard() { return Status::UNIMPLEMENTED; }
+    Status Discard() {
+      PW_DASSERT(open_);
+      return store_.Invalidate();
+    }
 
     // Probable (not guaranteed) minimum number of bytes at this time that can
-    // be written. Returns zero if, in the current state, Write would return
+    // be written. This is not necessarily the full number of bytes remaining in
+    // the blob. Returns zero if, in the current state, Write would return
     // status other than OK. See stream.h for additional details.
     size_t ConservativeWriteLimit() const override {
-      return store_.MaxDataSizeBytes() - store_.ReadableDataBytes();
+      PW_DASSERT(open_);
+      return store_.WriteBytesRemaining();
+    }
+
+    size_t CurrentSizeBytes() {
+      PW_DASSERT(open_);
+      return store_.write_address_;
+    }
+
+   protected:
+    Status DoWrite(ConstByteSpan data) override {
+      PW_DASSERT(open_);
+      return store_.Write(data);
+    }
+
+    BlobStore& store_;
+    bool open_;
+  };
+
+  // Implement the stream::Writer and erase interface with deferred action for a
+  // BlobStore. If not already erased, the Flush will do any needed erase.
+  //
+  // Only one writter (of either type) is allowed to be open at a time.
+  // Additionally, writters are unable to open if a reader is already open.
+  class DeferredWriter final : public BlobWriter {
+   public:
+    constexpr DeferredWriter(BlobStore& store) : BlobWriter(store) {}
+    DeferredWriter(const DeferredWriter&) = delete;
+    DeferredWriter& operator=(const DeferredWriter&) = delete;
+    virtual ~DeferredWriter() {}
+
+    // Flush data in the write buffer. Only a multiple of flash_write_size_bytes
+    // are written in the flush. Any remainder is held until later for either
+    // a flush with flash_write_size_bytes buffered or the writer is closed.
+    Status Flush() {
+      PW_DASSERT(open_);
+      return store_.Flush();
+    }
+
+    // Probable (not guaranteed) minimum number of bytes at this time that can
+    // be written. This is not necessarily the full number of bytes remaining in
+    // the blob. Returns zero if, in the current state, Write would return
+    // status other than OK. See stream.h for additional details.
+    size_t ConservativeWriteLimit() const override {
+      PW_DASSERT(open_);
+      // Deferred writes need to fit in the write buffer.
+      return store_.WriteBufferBytesFree();
     }
 
    private:
-    Status DoWrite(ConstByteSpan data) override { return store_.Write(data); }
-
-    BlobStore& store_;
+    Status DoWrite(ConstByteSpan data) override {
+      PW_DASSERT(open_);
+      return store_.AddToWriteBuffer(data);
+    }
   };
 
-  // Implement stream::Reader interface for BlobStore.
+  // Implement stream::Reader interface for BlobStore. Multiple readers may be
+  // open at the same time, but readers may not be open with a writer open.
   class BlobReader final : public stream::Reader {
    public:
-    constexpr BlobReader(BlobStore& store, size_t offset)
-        : store_(store), offset_(offset) {}
+    constexpr BlobReader(BlobStore& store)
+        : store_(store), open_(false), offset_(0) {}
     BlobReader(const BlobReader&) = delete;
     BlobReader& operator=(const BlobReader&) = delete;
-    ~BlobReader() { Close(); }
+    ~BlobReader() {
+      if (open_) {
+        Close();
+      }
+    }
 
-    // Open to do a blob read. Currently only a single reader at a time is
-    // supported. Returns:
+    // Open to do a blob read at the given offset in to the blob. Can not open
+    // when already open. Multiple readers can be open at the same time.
+    // Returns:
     //
     // OK - success.
+    // FAILED_PRECONDITION - No readable blob available.
+    // INVALID_ARGUMENT - Invalid offset.
     // UNAVAILABLE - Unable to open, already open.
-    Status Open() { return Status::UNIMPLEMENTED; }
+    Status Open(size_t offset = 0) {
+      PW_DASSERT(!open_);
+      if (!store_.ValidToRead()) {
+        return Status::FailedPrecondition();
+      }
+      if (offset >= store_.ReadableDataBytes()) {
+        return Status::InvalidArgument();
+      }
 
-    // Finish reading a blob. Returns:
+      offset_ = offset;
+      Status status = store_.OpenRead();
+      if (status.ok()) {
+        open_ = true;
+      }
+      return status;
+    }
+
+    // Finish reading a blob. Close fails in the closed state, do NOT retry
+    // Close on error. Returns:
     //
     // OK - success.
-    // FAILED_PRECONDITION - blob is not open.
-    Status Close() { return Status::UNIMPLEMENTED; }
+    Status Close() {
+      PW_DASSERT(open_);
+      open_ = false;
+      return store_.CloseRead();
+    }
 
     // Probable (not guaranteed) minimum number of bytes at this time that can
     // be read. Returns zero if, in the current state, Read would return status
     // other than OK. See stream.h for additional details.
     size_t ConservativeReadLimit() const override {
+      PW_DASSERT(open_);
       return store_.ReadableDataBytes() - offset_;
     }
 
@@ -129,28 +238,60 @@
     // OK with span - Valid span respresenting the blob data
     // FAILED_PRECONDITION - Reader not open.
     // UNIMPLEMENTED - Memory mapped access not supported for this blob.
-    Result<ByteSpan> GetMemoryMappedBlob() {
+    Result<ConstByteSpan> GetMemoryMappedBlob() {
+      PW_DASSERT(open_);
       return store_.GetMemoryMappedBlob();
     }
 
    private:
     StatusWithSize DoRead(ByteSpan dest) override {
-      return store_.Read(offset_, dest);
+      PW_DASSERT(open_);
+      StatusWithSize status = store_.Read(offset_, dest);
+      if (status.ok()) {
+        PW_DASSERT(status.size() == dest.size_bytes());
+        offset_ += status.size();
+      }
+      return status;
     }
 
     BlobStore& store_;
+    bool open_;
     size_t offset_;
   };
 
-  BlobStore(kvs::FlashPartition* partition,
+  // BlobStore
+  // name - Name of blob store, used for metadata KVS key
+  // partition - Flash partiton to use for this blob. Blob uses the entire
+  //     partition for blob data.
+  // checksum_algo - Optional checksum for blob integrity checking. Use nullptr
+  //     for no check.
+  // kvs - KVS used for storing blob metadata.
+  // write_buffer - Used for buffering writes. Needs to be at least
+  //     flash_write_size_bytes.
+  // flash_write_size_bytes - Size in bytes to use for flash write operations.
+  //     This should be chosen to balance optimal write size and required buffer
+  //     size. Must be greater than or equal to flash write alignment, less than
+  //     or equal to flash sector size.
+  BlobStore(std::string_view name,
+            kvs::FlashPartition& partition,
             kvs::ChecksumAlgorithm* checksum_algo,
-            ByteSpan write_buffer)
-      : partition_(*partition),
+            kvs::KeyValueStore& kvs,
+            ByteSpan write_buffer,
+            size_t flash_write_size_bytes)
+      : name_(name),
+        partition_(partition),
         checksum_algo_(checksum_algo),
+        kvs_(kvs),
         write_buffer_(write_buffer),
-        state_(BlobStore::State::kInvalidData),
+        flash_write_size_bytes_(flash_write_size_bytes),
+        initialized_(false),
+        valid_data_(false),
+        flash_erased_(false),
+        writer_open_(false),
+        readers_open_(0),
+        metadata_({}),
         write_address_(0),
-        flush_address_(0) {}
+        flash_address_(0) {}
 
   BlobStore(const BlobStore&) = delete;
   BlobStore& operator=(const BlobStore&) = delete;
@@ -165,44 +306,105 @@
   size_t MaxDataSizeBytes() const;
 
  private:
-  enum class OpenType {
-    // Open for doing read operations.
-    kRead,
+  typedef uint32_t ChecksumValue;
 
-    // Open for Write operations.
-    kWrite,
-  };
+  Status LoadMetadata();
 
-  // Is the blob erased and ready to write.
-  bool erased() const { return state_ == State::kErased; }
-
-  bool IsOpen();
-
-  // Open to do a blob read or write. Returns:
+  // Open to do a blob write. Returns:
   //
   // OK - success.
-  // UNAVAILABLE - Unable to open, already open.
-  Status Open(BlobStore::OpenType type);
+  // UNAVAILABLE - Unable to open writer, another writer or reader instance is
+  //     already open.
+  Status OpenWrite();
+
+  // Open to do a blob read. Returns:
+  //
+  // OK - success.
+  // FAILED_PRECONDITION - Unable to open, no valid blob available.
+  Status OpenRead();
 
   // Finalize a blob write. Flush all remaining buffered data to storage and
   // store blob metadata. Returns:
   //
-  // OK - success.
-  // FAILED_PRECONDITION - blob is not open.
-  Status Close();
+  // OK - success, valid complete blob.
+  // DATA_LOSS - Error during write (this close or previous write/flush). Blob
+  //     is closed and marked as invalid.
+  Status CloseWrite();
+  Status CloseRead();
 
   // Write/append data to the in-progress blob write. Data is written
   // sequentially, with each append added directly after the previous. Data is
   // not guaranteed to be fully written out to storage on Write return. Returns:
   //
   // OK - successful write/enqueue of data.
-  // FAILED_PRECONDITION - blob is not in an open (in-progress) write state.
   // RESOURCE_EXHAUSTED - unable to write all of requested data at this time. No
   //     data written.
   // OUT_OF_RANGE - Writer has been exhausted, similar to EOF. No data written,
   //     no more will be written.
+  // DATA_LOSS - Error during write (this write or previous write/flush). No
+  //     more will be written by following Write calls for current blob (until
+  //     erase/new blob started).
   Status Write(ConstByteSpan data);
 
+  // Similar to Write, but instead immediately writing out to flash, it only
+  // buffers the data. A flush or Close is reqired to get bytes writen out to
+  // flash
+  //
+  // OK - successful write/enqueue of data.
+  // RESOURCE_EXHAUSTED - unable to write all of requested data at this time. No
+  //     data written.
+  // OUT_OF_RANGE - Writer has been exhausted, similar to EOF. No data written,
+  //     no more will be written.
+  // DATA_LOSS - Error during a previous write/flush. No more will be written by
+  //     following Write calls for current blob (until erase/new blob started).
+  Status AddToWriteBuffer(ConstByteSpan data);
+
+  // Flush data in the write buffer. Only a multiple of flash_write_size_bytes
+  // are written in the flush. Any remainder is held until later for either a
+  // flush with flash_write_size_bytes buffered or the writer is closed.
+  //
+  // OK - successful write/enqueue of data.
+  // DATA_LOSS - Error during write (this flush or previous write/flush). No
+  //     more will be written by following Write calls for current blob (until
+  //     erase/new blob started).
+  Status Flush();
+
+  // Flush a chunk of data in the write buffer smaller than
+  // flash_write_size_bytes. This is only for the final flush as part of the
+  // CloseWrite. The partial chunk is padded to flash_write_size_bytes and a
+  // flash_write_size_bytes chunk is written to flash.
+  //
+  // OK - successful write/enqueue of data.
+  // DATA_LOSS - Error during write (this flush or previous write/flush). No
+  //     more will be written by following Write calls for current blob (until
+  //     erase/new blob started).
+  Status FlushFinalPartialChunk();
+
+  // Commit data to flash and update flash_address_ with data bytes written. The
+  // only time data_bytes should be manually specified is for a CloseWrite with
+  // an unaligned-size chunk remaining in the buffer that has been zero padded
+  // to alignment.
+  Status CommitToFlash(ConstByteSpan source, size_t data_bytes = 0);
+
+  // Blob is valid/OK to write to. Blob is considered valid to write if no data
+  // has been written due to the auto/implicit erase on write start.
+  //
+  // true - Blob is valid and OK to write to.
+  // false - Blob has previously had an error and not valid for writing new
+  //     data.
+  bool ValidToWrite() { return (valid_data_ == true) || (write_address_ == 0); }
+
+  bool WriteBufferEmpty() const { return flash_address_ == write_address_; }
+
+  size_t WriteBufferBytesUsed() const;
+
+  size_t WriteBufferBytesFree() const;
+
+  Status EraseIfNeeded();
+
+  // Blob is valid/OK and has data to read.
+  bool ValidToRead() const { return (valid_data_ && ReadableDataBytes() > 0); }
+
   // Read valid data. Attempts to read the lesser of output.size_bytes() or
   // available bytes worth of data. Returns:
   //
@@ -214,29 +416,133 @@
   //     Try again once bytes become available.
   // OUT_OF_RANGE - Reader has been exhausted, similar to EOF. No bytes read, no
   //     more will be read.
-  StatusWithSize Read(size_t offset, ByteSpan dest);
+  StatusWithSize Read(size_t offset, ByteSpan dest) const;
 
   // Get a span with the MCU pointer and size of the data. Returns:
   //
   // OK with span - Valid span respresenting the blob data
   // FAILED_PRECONDITION - Blob not in a state to read data
   // UNIMPLEMENTED - Memory mapped access not supported for this blob.
-  Result<ByteSpan> GetMemoryMappedBlob();
+  Result<ConstByteSpan> GetMemoryMappedBlob() const;
 
-  // Current size of blob/readable data, in bytes. For a completed write this is
-  // the size of the data blob. For all other cases this is zero bytes.
+  // Size of blob/readable data, in bytes.
   size_t ReadableDataBytes() const;
 
-  Status EraseInternal();
+  size_t WriteBytesRemaining() const {
+    return MaxDataSizeBytes() - write_address_;
+  }
 
-  Status InvalidateInternal();
+  Status Erase();
 
+  Status Invalidate();
+
+  void ResetChecksum() {
+    if (checksum_algo_ != nullptr) {
+      checksum_algo_->Reset();
+    }
+  }
+
+  Status ValidateChecksum();
+
+  Status CalculateChecksumFromFlash(size_t bytes_to_check);
+
+  const std::string_view MetadataKey() { return name_; }
+
+  // Changes to the metadata format should also get a different key signature to
+  // avoid new code improperly reading old format metadata.
+  struct BlobMetadata {
+    // The checksum of the blob data stored in flash.
+    ChecksumValue checksum;
+
+    // Number of blob data bytes stored in flash.
+    size_t data_size_bytes;
+
+    constexpr void reset() {
+      *this = {
+          .checksum = 0,
+          .data_size_bytes = 0,
+      };
+    }
+  };
+
+  std::string_view name_;
   kvs::FlashPartition& partition_;
+  // checksum_algo_ of nullptr indicates no checksum algorithm.
   kvs::ChecksumAlgorithm* const checksum_algo_;
+  kvs::KeyValueStore& kvs_;
   ByteSpan write_buffer_;
-  State state_;
+
+  // Size in bytes of flash write operations. This should be chosen to balance
+  // optimal write size and required buffer size. Must be GE flash write
+  // alignment, LE flash sector size.
+  const size_t flash_write_size_bytes_;
+
+  //
+  // Internal state for Blob store
+  //
+  // TODO: Consolidate blob state to a single struct
+
+  // Initialization has been done.
+  bool initialized_;
+
+  // Bytes stored are valid and good. Blob is OK to read and write to. Set as
+  // soon as blob is erased. Even when bytes written is still 0, they are valid.
+  bool valid_data_;
+
+  // Blob partition is currently erased and ready to write a new blob.
+  bool flash_erased_;
+
+  // BlobWriter instance is currently open
+  bool writer_open_;
+
+  // Count of open BlobReader instances
+  size_t readers_open_;
+
+  // Metadata for the blob.
+  BlobMetadata metadata_;
+
+  // Current index for end of overal blob data. Represents current byte size of
+  // blob data since the FlashPartition starts at address 0.
   kvs::FlashPartition::Address write_address_;
-  kvs::FlashPartition::Address flush_address_;
+
+  // Current index of end of data written to flash. Number of buffered data
+  // bytes is write_address_ - flash_address_.
+  kvs::FlashPartition::Address flash_address_;
+};
+
+// Creates a BlobStore with the buffer of kBufferSizeBytes.
+//
+// kBufferSizeBytes - Size in bytes of write buffer to create.
+// name - Name of blob store, used for metadata KVS key
+// partition - Flash partiton to use for this blob. Blob uses the entire
+//     partition for blob data.
+// checksum_algo - Optional checksum for blob integrity checking. Use nullptr
+//     for no check.
+// kvs - KVS used for storing blob metadata.
+// write_buffer - Used for buffering writes. Needs to be at least
+//     flash_write_size_bytes.
+// flash_write_size_bytes - Size in bytes to use for flash write operations.
+//     This should be chosen to balance optimal write size and required buffer
+//     size. Must be greater than or equal to flash write alignment, less than
+//     or equal to flash sector size.
+
+template <size_t kBufferSizeBytes>
+class BlobStoreBuffer : public BlobStore {
+ public:
+  explicit BlobStoreBuffer(std::string_view name,
+                           kvs::FlashPartition& partition,
+                           kvs::ChecksumAlgorithm* checksum_algo,
+                           kvs::KeyValueStore& kvs,
+                           size_t flash_write_size_bytes = kBufferSizeBytes)
+      : BlobStore(name,
+                  partition,
+                  checksum_algo,
+                  kvs,
+                  buffer_,
+                  flash_write_size_bytes) {}
+
+ private:
+  std::array<std::byte, kBufferSizeBytes> buffer_;
 };
 
 }  // namespace pw::blob_store
diff --git a/pw_boot_armv7m/BUILD.gn b/pw_boot_armv7m/BUILD.gn
index 38b3a0e..8b99407 100644
--- a/pw_boot_armv7m/BUILD.gn
+++ b/pw_boot_armv7m/BUILD.gn
@@ -12,20 +12,16 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/linker_script.gni")
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
+
 declare_args() {
   # TODO(frolv): Move this into pw_boot module when it is created.
   pw_boot_BACKEND = ""
 
-  # Whether or not to include code that signals to QEMU to shut down the
-  # emulator. This should only be enabled on QEMU targets.
-  pw_boot_armv7m_QEMU_SHUTDOWN = false
-
   # This list should contain the necessary defines for setting pw_boot linker
   # script memory regions.
   pw_boot_armv7m_LINK_CONFIG_DEFINES = []
@@ -45,15 +41,10 @@
 
   pw_source_set("pw_boot_armv7m") {
     public_configs = [ ":default_config" ]
-    deps = [
-      ":armv7m_linker_script",
-      "$dir_pw_preprocessor",
-    ]
-    if (pw_boot_armv7m_QEMU_SHUTDOWN) {
-      defines = [ "PW_BOOT_ARMV7M_QEMU_SHUTDOWN=1" ]
-    }
     public = [ "public/pw_boot_armv7m/boot.h" ]
-    sources = [ "core_init.c" ] + public
+    public_deps = [ "$dir_pw_preprocessor" ]
+    deps = [ ":armv7m_linker_script" ]
+    sources = [ "core_init.c" ]
   }
 }
 
diff --git a/pw_boot_armv7m/basic_armv7m.ld b/pw_boot_armv7m/basic_armv7m.ld
index b799cb1..7ca09de 100644
--- a/pw_boot_armv7m/basic_armv7m.ld
+++ b/pw_boot_armv7m/basic_armv7m.ld
@@ -60,7 +60,7 @@
  *       (Reset_Handler). However, this DOES tell the compiler how to optimize
  *       when --gc-sections is enabled.
  */
-ENTRY(pw_BootEntry)
+ENTRY(pw_boot_Entry)
 
 MEMORY
 {
@@ -88,11 +88,11 @@
    * Register) MUST point to this memory location in order to be used. This can
    * be done by ensuring this section exists at the default location of the VTOR
    * so it's used on reset, or by explicitly setting the VTOR in a bootloader
-   * manually to point to &pw_vector_table_addr before interrupts are enabled.
+   * manually to point to &pw_boot_vector_table_addr before interrupts are enabled.
    */
   .vector_table : ALIGN(512)
   {
-    pw_vector_table_addr = .;
+    pw_boot_vector_table_addr = .;
     KEEP(*(.vector_table))
   } >VECTOR_TABLE
 
@@ -149,7 +149,7 @@
   } >RAM AT> FLASH
 
   /* Zero initialized global/static data. (.bss)
-   * This section is zero initialized in pw_BootEntry(). */
+   * This section is zero initialized in pw_boot_Entry(). */
   .zero_init_ram : ALIGN(8)
   {
     *(.bss)
@@ -160,24 +160,24 @@
 
   .heap : ALIGN(8)
   {
-    pw_heap_low_addr = .;
+    pw_boot_heap_low_addr = .;
     . = . + PW_BOOT_HEAP_SIZE;
     . = ALIGN(8);
-    pw_heap_high_addr = .;
+    pw_boot_heap_high_addr = .;
   } >RAM
 
   /* Link-time check for stack overlaps. */
   .stack (NOLOAD) : ALIGN(8)
   {
     /* Set the address that the main stack pointer should be initialized to. */
-    pw_stack_low_addr = .;
+    pw_boot_stack_low_addr = .;
     HIDDEN(_stack_size = ORIGIN(RAM) + LENGTH(RAM) - .);
     /* Align the stack to a lower address to ensure it isn't out of range. */
     HIDDEN(_stack_high = (. + _stack_size) & ~0x7);
     ASSERT(_stack_high - . >= PW_BOOT_MIN_STACK_SIZE,
            "Error: Not enough RAM for desired minimum stack size.");
     . = _stack_high;
-    pw_stack_high_addr = .;
+    pw_boot_stack_high_addr = .;
   } >RAM
 }
 
diff --git a/pw_boot_armv7m/core_init.c b/pw_boot_armv7m/core_init.c
index b9a38cb..c1b7307 100644
--- a/pw_boot_armv7m/core_init.c
+++ b/pw_boot_armv7m/core_init.c
@@ -42,10 +42,14 @@
 // The simple flow is as follows:
 //   1. Power on
 //   2. PC and SP set (from vector_table by SoC, or by bootloader)
-//   3. pw_BootEntry()
-//     3.1. Static-init RAM (.data, .bss, C++ constructors)
-//     3.2. pw_PreMainInit()
-//     3.3. main()
+//   3. pw_boot_Entry()
+//     3.1. pw_boot_PreStaticMemoryInit();
+//     3.2. Static-init memory (.data, .bss)
+//     3.3. pw_boot_PreStaticConstructorInit();
+//     3.4. Static C++ constructors
+//     3.5. pw_boot_PreMainInit()
+//     3.6. main()
+//     3.7. pw_boot_PostMain()
 
 #include <stdbool.h>
 #include <stdint.h>
@@ -69,7 +73,7 @@
 // completes. The context before this function violates the C spec
 // (Section 6.7.8, paragraph 10 for example, which requires uninitialized static
 // values to be zero-initialized).
-void StaticInit(void) {
+void StaticMemoryInit(void) {
   // Static-init RAM (load static values into ram, .data section init).
   memcpy(&_pw_static_init_ram_start,
          &_pw_static_init_flash_start,
@@ -79,12 +83,6 @@
   memset(&_pw_zero_init_ram_start,
          0,
          &_pw_zero_init_ram_end - &_pw_zero_init_ram_start);
-
-  // Run any init that must be done before C++ static constructors.
-  pw_PreStaticConstructorInit();
-
-  // Call static constructors.
-  __libc_init_array();
 }
 
 // WARNING: This code is run immediately upon boot, and performs initialization
@@ -96,23 +94,34 @@
 //
 // This function runs immediately at boot because it is at index 1 of the
 // interrupt vector table.
-void pw_BootEntry() {
-  StaticInit();
+void pw_boot_Entry() {
+  // Run any init that must be done before static init of RAM which preps the
+  // .data (static values not yet loaded into ram) and .bss sections (not yet
+  // zero-initialized).
+  pw_boot_PreStaticMemoryInit();
+
+  // Note that code running before this function finishes memory
+  // initialization will violate the C spec (Section 6.7.8, paragraph 10 for
+  // example, which requires uninitialized static values to be
+  // zero-initialized). Be EXTREMELY careful when running code before this
+  // function finishes static memory initialization.
+  StaticMemoryInit();
+
+  // Run any init that must be done before C++ static constructors.
+  pw_boot_PreStaticConstructorInit();
+
+  // Call static constructors.
+  __libc_init_array();
 
   // This function is not provided by pw_boot_armv7m, a platform layer, project,
   // or application is expected to implement it.
-  pw_PreMainInit();
+  pw_boot_PreMainInit();
 
   // Run main.
   main();
 
-#if PW_BOOT_ARMV7M_QEMU_SHUTDOWN
-  // QEMU requires a special command to tell the VM to shut down.
-  volatile uint32_t* aircr = (uint32_t*)(0xE000ED0CU);
-  *aircr = 0x5fa0004;
-#endif  // PW_BOOT_ARMV7M_QEMU_SHUTDOWN
+  // In case main() returns, invoke this hook.
+  pw_boot_PostMain();
 
-  // In case main() returns, just sit here until the device is reset.
-  while (true) {
-  }
+  PW_UNREACHABLE;
 }
diff --git a/pw_boot_armv7m/docs.rst b/pw_boot_armv7m/docs.rst
index 7bebba6..d00bec4 100644
--- a/pw_boot_armv7m/docs.rst
+++ b/pw_boot_armv7m/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-boot-armv7m:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_boot_armv7m:
 
 --------------
 pw_boot_armv7m
@@ -26,26 +22,81 @@
   This module is currently NOT stable! Depending on this module may cause
   breakages as this module is updated.
 
+Sequence
+========
+
+The high level pw_boot_armv7m boot sequence looks like the following psuedo-code
+invocation of the user-implemented functions:
+
+.. code:: cpp
+
+  void pw_boot_Entry() {  // Boot entry point.
+    pw_boot_PreStaticMemoryInit();  // User-implemented function.
+    // Static memory initialization.
+    pw_boot_PreStaticConstructorInit();  // User-implemented function.
+    // C++ static constructors are invoked.
+    pw_boot_PreMainInit();  // User-implemented function.
+    main();  // User-implemented function.
+    pw_boot_PostMain();  // User-implemented function.
+    PW_UNREACHABLE;
+  }
+
 Setup
 =====
 
 User-Implemented Functions
 --------------------------
-This module expects two extern "C" functions to be defined outside this module.
+This module expects all of these extern "C" functions to be defined outside this
+module:
 
  - ``int main()``: This is where applications reside.
- - ``void pw_PreStaticConstructorInit()``: This function executes just before
-   C++ static constructors are called. At this point, other static memory has
-   been zero or data initialized. This function should set up any early
-   initialization that should be done before C++ static constructors are run
-   (e.g. enabling FPU).
- - ``void pw_PreMainInit()``: This function executes just before main, and
+ - ``void pw_boot_PreStaticMemoryInit()``: This function executes just before
+   static memory has been zerod and static data is intialized. This function
+   should set up any early initialization that should be done before static
+   memory is initialized, such as:
+
+   - Enabling the FPU or other coprocessors.
+   - Opting into extra restrictions such as disabling unaligned access to ensure
+     the restrictions are active during static RAM initialization.
+   - Initial CPU clock, flash, and memory configurations including potentially
+     enabling extra memory regions with .bss and .data sections, such as SDRAM
+     or backup powered SRAM.
+   - Fault handler initialization if required before static memory
+     initialization.
+
+   .. warning::
+     Code running in this hook is violating the C spec as static values are not
+     yet initialized, meaning they have not been loaded (.data) nor
+     zero-initialized (.bss).
+
+ - ``void pw_boot_PreStaticConstructorInit()``: This function executes just
+   before C++ static constructors are called. At this point, other static memory
+   has been zero or data initialized. This function should set up any early
+   initialization that should be done before C++ static constructors are run,
+   such as:
+
+   - Run time dependencies such as Malloc, and ergo sometimes the RTOS.
+   - Persistent memory that survives warm reboots.
+   - Enabling the MPU to catch nullptr dereferences during construction.
+   - Main stack watermarking.
+   - Further fault handling configuration necessary for your platform which
+     were not safe before pw_boot_PreStaticRamInit().
+   - Boot count and/or boot session UUID management.
+
+ - ``void pw_boot_PreMainInit()``: This function executes just before main, and
    can be used for any device initialization that isn't application specific.
    Depending on your platform, this might be turning on a UART, setting up
    default clocks, etc.
 
-If either of these functions are unimplemented, executables will encounter a
-link error.
+ - ``PW_NO_RETURN void pw_boot_PostMain()``: This function executes after main
+   has returned. This could be used for device specific teardown such as an
+   infinite loop, soft reset, or QEMU shutdown. In addition, if relevant for
+   your application, this would be the place to invoke the global static
+   destructors. This function must not return!
+
+
+If any of these functions are unimplemented, executables will encounter a link
+error.
 
 Required Configs
 ----------------
@@ -80,11 +131,11 @@
       // This address is NOT an interrupt handler/function pointer, it is simply
       // the address that the main stack pointer should be initialized to. The
       // value is reinterpret casted because it needs to be in the vector table.
-      [0] = reinterpret_cast<InterruptHandler>(&pw_stack_high_addr),
+      [0] = reinterpret_cast<InterruptHandler>(&pw_boot_stack_high_addr),
 
       // Reset handler, dictates how to handle reset interrupt. This is the
       // address that the Program Counter (PC) is initialized to at boot.
-      [1] = pw_BootEntry,
+      [1] = pw_boot_Entry,
 
       // NMI handler.
       [2] = DefaultFaultHandler,
@@ -103,15 +154,15 @@
 These symbols are declared as ``uint8_t`` variables. The variables themselves
 do not contain the addresses, they only reside at the memory location they
 reference. To retrieve the memory locations, simply take the reference of the
-symbol (e.g. ``&pw_vector_table_addr``).
+symbol (e.g. ``&pw_boot_vector_table_addr``).
 
-``pw_heap_[low/high]_addr``: Beginning and end of the memory range of the heap.
+``pw_boot_heap_[low/high]_addr``: Beginning and end of the memory range of the heap.
 These addresses may be identical, indicating a heap with a size of zero bytes.
 
-``pw_stack_[low/high]_addr``: Beginning and end of the memory range of the main
+``pw_boot_stack_[low/high]_addr``: Beginning and end of the memory range of the main
 stack. This might not be the only stack in the system.
 
-``pw_vector_table_addr``: Beginning of the ARMv7-M interrupt vector table.
+``pw_boot_vector_table_addr``: Beginning of the ARMv7-M interrupt vector table.
 
 Configuration
 =============
diff --git a/pw_boot_armv7m/public/pw_boot_armv7m/boot.h b/pw_boot_armv7m/public/pw_boot_armv7m/boot.h
index e897bd1..9b683d5 100644
--- a/pw_boot_armv7m/public/pw_boot_armv7m/boot.h
+++ b/pw_boot_armv7m/public/pw_boot_armv7m/boot.h
@@ -32,13 +32,13 @@
 //    they contains the expected values when code begins to run. The SoC doesn't
 //    inherently have a notion of how to do this, so before ANYTHING else the
 //    memory must be initialized. This is done at the beginning of
-//    pw_BootEntry().
+//    pw_boot_Entry().
 //
 //
 // The simple flow is as follows:
-//   Power on -> PC and SP set (from vector_table by SoC) -> pw_BootEntry()
+//   Power on -> PC and SP set (from vector_table by SoC) -> pw_boot_Entry()
 //
-// In pw_BootEntry():
+// In pw_boot_Entry():
 //   Initialize memory -> pw_PreMainInit() -> main()
 
 #include <stdint.h>
@@ -52,48 +52,77 @@
 // values are accessible via the reference of the symbol.
 //
 // Example:
-//   if (stack_pointer < &pw_stack_low_addr) {
+//   if (stack_pointer < &pw_boot_stack_low_addr) {
 //     PW_LOG_ERROR("Main stack overflowed!")
 //   }
 
-// pw_stack_[low/high]_addr indicate the range of the main stack. Note that this
-// might not be the only stack in the system.
+// pw_boot_stack_[low/high]_addr indicate the range of the main stack. Note that
+// this might not be the only stack in the system.
 //
-// The main stack pointer (sp_main) should be initialized to pw_stack_high_addr.
-// This can be done by inserting the address into index 0 of the ARMv7-M vector
-// table. (See ARMv7-M Architecture Reference Manual DDI 0403E.b section B1.5.3)
-extern uint8_t pw_stack_low_addr;
-extern uint8_t pw_stack_high_addr;
+// The main stack pointer (sp_main) should be initialized to
+// pw_boot_stack_high_addr. This can be done by inserting the address into index
+// 0 of the ARMv7-M vector table. (See ARMv7-M Architecture Reference Manual DDI
+// 0403E.b section B1.5.3)
+extern uint8_t pw_boot_stack_low_addr;
+extern uint8_t pw_boot_stack_high_addr;
 
-// pw_heap_[low/high]_addr indicate the address range reserved for the heap.
-extern uint8_t pw_heap_low_addr;
-extern uint8_t pw_heap_high_addr;
+// pw_boot_heap_[low/high]_addr indicate the address range reserved for the
+// heap.
+extern uint8_t pw_boot_heap_low_addr;
+extern uint8_t pw_boot_heap_high_addr;
 
 // The address that denotes the beginning of the .vector_table section. This
 // can be used to set VTOR (vector table offset register) by the bootloader.
-extern uint8_t pw_vector_table_addr;
+extern uint8_t pw_boot_vector_table_addr;
 
 // Forward declaration of main. Pigweed applications are expected to implement
 // this function. An implementation of main() is NOT provided by this module.
 int main();
 
+// Reset handler or boot entry point.
+//
 // For this module to work as expected, index 1 of the ARMv7-M vector table
 // (which usually points to Reset_Handler) must be set to point to this
 // function. This function is implemented by pw_boot_armv7m, and does early
 // memory initialization.
-PW_NO_PROLOGUE void pw_BootEntry();
+PW_NO_PROLOGUE void pw_boot_Entry();
 
-// This function is called just after zero initialization of RAM and loading
-// values into static memory (commonly labeled as the .data section in an ELF
-// file). Per the naming, this function is called just before C++ static
-// constructors are initialized. It is safe to run C code, but NOT safe to call
-// out to any C++ code.
-void pw_PreStaticConstructorInit();
+// pw_boot hook: Before static memory is initialized (user supplied)
+//
+// This is a hook function that users of pw_boot must supply. It is called
+// immediately upon entry to pw_boot_Entry() and before zero initialization of
+// RAM (.bss) and loading values into static memory (commonly labeled as the
+// .data section in an ELF file).
+// WARNING: Be EXTREMELY careful when in the context of this function as it
+// violates the C spec in several ways as .bss has not yet been zero-initialized
+// and static values have not yet been loaded into memory. This function is NOT
+// implemented by pw_boot_armv7m.
+void pw_boot_PreStaticMemoryInit();
 
-// This function is called by pw_BootEntry() after memory initialization but
-// before main. This allows targets to have pre-main initialization of the
-// device and seamlessly swap out the main() implementation. This function is
-// NOT implemented by pw_boot_armv7m.
-void pw_PreMainInit();
+// pw_boot hook: Before C++ static constructors are invoked (user supplied).
+//
+// This is a hook function that users of pw_boot must supply. It is called just
+// after zero initialization of RAM and loading values into static memory
+// (commonly labeled as the .data section in an ELF file). Per the naming, this
+// function is called just before C++ static constructors are invoked. It is
+// safe to run C code, but NOT safe to call out to any C++ code. This function
+// is NOT implemented by pw_boot_armv7m.
+void pw_boot_PreStaticConstructorInit();
+
+// pw_boot hook: Before main is invoked (user supplied).
+//
+// This is a hook function that users of pw_boot must supply. It is called by
+// pw_boot_Entry() after memory initialization but before main. This allows
+// targets to have pre-main initialization of the device and seamlessly swap out
+// the main() implementation. This function is NOT implemented by
+// pw_boot_armv7m.
+void pw_boot_PreMainInit();
+
+// pw_boot hook: After main returned (user supplied).
+//
+// This is a hook function that users of pw_boot must supply. It is called by
+// pw_boot_Entry() after main() has returned. This function must not return!
+// This function is NOT implemented by pw_boot_armv7m.
+PW_NO_RETURN void pw_boot_PostMain();
 
 PW_EXTERN_C_END
diff --git a/pw_build/BUILD.gn b/pw_build/BUILD.gn
index cf61010..d30bbc9 100644
--- a/pw_build/BUILD.gn
+++ b/pw_build/BUILD.gn
@@ -1,4 +1,4 @@
-# Copyright 2019 The Pigweed Authors
+# Copyright 2020 The Pigweed Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may not
 # use this file except in compliance with the License. You may obtain a copy of
@@ -12,10 +12,13 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_docgen/docs.gni")
+
+# IMPORTANT: The compilation flags in this file must be kept in sync with
+#            the CMake flags pw_build/CMakeLists.txt.
+
 config("colorize_output") {
   cflags = [
     # Colorize output. Ninja's Clang invocation disables color by default.
@@ -70,6 +73,10 @@
   cflags = [
     "-Wall",
     "-Wextra",
+    "-Wimplicit-fallthrough",
+    "-Wcast-qual",
+    "-Wundef",
+    "-Wpointer-arith",
 
     # Make all warnings errors, except for the exemptions below.
     "-Werror",
@@ -83,7 +90,10 @@
 # enable, but are enabled for upstream Pigweed for maximum project
 # compatibility.
 config("extra_strict_warnings") {
-  cflags = [ "-Wshadow" ]
+  cflags = [
+    "-Wshadow",
+    "-Wredundant-decls",
+  ]
 }
 
 config("cpp11") {
@@ -103,6 +113,17 @@
   ]
 }
 
+# This empty target is used as the default value for module configurations.
+# Projects may set pw_build_DEFAULT_MODULE_CONFIG to a different GN target that
+# overrides modules' configuration options via macro definitions or a header
+# forcibly included with `-include`.
+group("empty") {
+}
+
+pool("pip_pool") {
+  depth = 1
+}
+
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
 }
diff --git a/pw_build/CMakeLists.txt b/pw_build/CMakeLists.txt
new file mode 100644
index 0000000..b2eeef0
--- /dev/null
+++ b/pw_build/CMakeLists.txt
@@ -0,0 +1,86 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# IMPORTANT: The compilation flags in this file must be kept in sync with
+#            the GN flags //pw_build/BUILD.gn.
+
+# Target that specifies the standard Pigweed build options.
+add_library(pw_build INTERFACE)
+target_compile_options(pw_build INTERFACE "-g")
+target_link_libraries(pw_build
+  INTERFACE
+    pw_build.reduced_size
+    pw_build.cpp17
+)
+target_compile_options(pw_build
+  INTERFACE
+    # Force the compiler use colorized output. This is required for Ninja.
+    $<$<CXX_COMPILER_ID:Clang>:-fcolor-diagnostics>
+    $<$<CXX_COMPILER_ID:GNU>:-fdiagnostics-color=always>
+)
+
+# Declare top-level targets for tests.
+add_custom_target(pw_tests.default)
+add_custom_target(pw_run_tests.default)
+
+add_custom_target(pw_tests DEPENDS pw_tests.default)
+add_custom_target(pw_run_tests DEPENDS pw_run_tests.default)
+
+# Define the standard Pigweed compile options.
+add_library(pw_build.reduced_size INTERFACE)
+target_compile_options(pw_build.reduced_size
+  INTERFACE
+    "-fno-common"
+    "-fno-exceptions"
+    "-ffunction-sections"
+    "-fdata-sections"
+    $<$<COMPILE_LANGUAGE:CXX>:-fno-rtti>
+)
+
+add_library(pw_build.strict_warnings INTERFACE)
+target_compile_options(pw_build.strict_warnings
+  INTERFACE
+    "-Wall"
+    "-Wextra"
+    "-Wimplicit-fallthrough"
+    "-Wcast-qual"
+    "-Wundef"
+    "-Wpointer-arith"
+
+    # Make all warnings errors, except for the exemptions below.
+    "-Werror"
+    "-Wno-error=cpp"  # preprocessor #warning statement
+    "-Wno-error=deprecated-declarations"  # [[deprecated]] attribute
+
+    $<$<COMPILE_LANGUAGE:CXX>:-Wnon-virtual-dtor>
+)
+
+add_library(pw_build.extra_strict_warnings INTERFACE)
+target_compile_options(pw_build.extra_strict_warnings
+  INTERFACE
+    "-Wshadow"
+    "-Wredundant-decls"
+)
+
+add_library(pw_build.cpp17 INTERFACE)
+target_compile_options(pw_build.cpp17
+  INTERFACE
+    $<$<COMPILE_LANGUAGE:CXX>:-std=c++17>
+    # Allow uses of the register keyword, which may appear in C headers.
+    $<$<COMPILE_LANGUAGE:CXX>:-Wno-register>
+)
+
+# Create an empty source file and library for general use.
+file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/empty_file.c" "")
+add_library(pw_build.empty OBJECT "${CMAKE_CURRENT_BINARY_DIR}/empty_file.c" "")
diff --git a/pw_build/defaults.gni b/pw_build/defaults.gni
index 5a481f4..6d341e7 100644
--- a/pw_build/defaults.gni
+++ b/pw_build/defaults.gni
@@ -12,8 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
+
 declare_args() {
   # Default configs and dependencies targets provided by the toolchain. These
   # are applied to all of the pw_* target types. They are set from a toolchain's
@@ -40,7 +40,7 @@
     "$dir_pw_build:strict_warnings",
     "$dir_pw_build:cpp17",
   ]
-  public_deps = [ "$dir_pw_polyfill:overrides" ]
+  public_deps += [ "$dir_pw_polyfill:overrides" ]
 }
 
 # One more pass, to remove configs
diff --git a/pw_build/docs.rst b/pw_build/docs.rst
index 16e9d9f..d1c5b46 100644
--- a/pw_build/docs.rst
+++ b/pw_build/docs.rst
@@ -1,8 +1,4 @@
-.. default-domain:: cpp
-
-.. highlight:: sh
-
-.. _chapter-pw-build:
+.. _module-pw_build:
 
 --------
 pw_build
@@ -46,7 +42,7 @@
 
 Target types
 ^^^^^^^^^^^^
-.. code::
+.. code-block::
 
   import("$dir_pw_build/target_types.gni")
 
@@ -79,24 +75,56 @@
 All of the ``pw_*`` target type overrides accept any arguments, as they simply
 forward them through to the underlying target.
 
-pw_python_script
+.. _module-pw_build-facade:
+
+pw_facade
+^^^^^^^^^
+In their simplest form, a :ref:`facade<docs-module-structure-facades>` is a GN
+build arg used to change a dependency at compile time. Pigweed targets configure
+these facades as needed.
+
+The ``pw_facade`` template bundles a ``pw_source_set`` with a facade build arg.
+This allows the facade to provide header files, compilation options or anything
+else a GN ``source_set`` provides.
+
+The ``pw_facade`` template declares two targets:
+
+* ``$target_name``: the public-facing ``pw_source_set``, with a ``public_dep``
+  on the backend
+* ``$target_name.facade``: target used by the backend to avoid circular
+  dependencies
+
+.. code-block::
+
+  # Declares ":foo" and ":foo.facade" GN targets
+  pw_facade("foo") {
+    backend = pw_log_BACKEND
+    public_configs = [ ":public_include_path" ]
+    public = [ "public/pw_foo/foo.h" ]
+  }
+
+.. _module-pw_build-python-action:
+
+pw_python_action
 ^^^^^^^^^^^^^^^^
-The ``pw_python_script`` template is a convenience wrapper around ``action`` for
+The ``pw_python_action`` template is a convenience wrapper around ``action`` for
 running Python scripts. The main benefit it provides is resolution of GN target
 labels to compiled binary files. This allows Python scripts to be written
 independently of GN, taking only filesystem paths as arguments.
 
 Another convenience provided by the template is to allow running scripts without
 any outputs. Sometimes scripts run in a build do not directly produce output
-files, but GN requires that all actions have an output. ``pw_python_script``
+files, but GN requires that all actions have an output. ``pw_python_action``
 solves this by accepting a boolean ``stamp`` argument which tells it to create a
 dummy output file for the action.
 
 **Arguments**
 
-``pw_python_script`` accepts all of the arguments of a regular ``action``
+``pw_python_action`` accepts all of the arguments of a regular ``action``
 target. Additionally, it has some of its own arguments:
 
+* ``module``: Run the specified Python module instead of a script. Either
+  ``script`` or ``module`` must be specified, but not both.
 * ``capture_output``: Optional boolean. If true, script output is hidden unless
   the script fails with an error. Defaults to true.
 * ``stamp``: Optional variable indicating whether to automatically create a
@@ -104,10 +132,14 @@
   specifying ``outputs``. If ``stamp`` is true, a generic output file is
   used. If ``stamp`` is a file path, that file is used as a stamp file. Like any
   output file, ``stamp`` must be in the build directory. Defaults to false.
+* ``directory``: Optional path. Change to this directory before executing the
+  command. Paths in arguments may need to be adjusted.
+* ``environment``: Optional list of strings. Environment variables to set,
+  passed as NAME=VALUE strings.
 
 **Expressions**
 
-``pw_python_script`` evaluates expressions in ``args``, the arguments passed to
+``pw_python_action`` evaluates expressions in ``args``, the arguments passed to
 the script. These expressions function similarly to generator expressions in
 CMake. Expressions may be passed as a standalone argument or as part of another
 argument. A single argument may contain multiple expressions.
@@ -123,13 +155,13 @@
   Evaluates to the output file of the provided GN target. For example, the
   expression
 
-  .. code::
+  .. code-block::
 
     "<TARGET_FILE(//foo/bar:static_lib)>"
 
   might expand to
 
-  .. code::
+  .. code-block::
 
     "/home/User/project_root/out/obj/foo/bar/static_lib.a"
 
@@ -154,14 +186,14 @@
 
   For example, consider this expression:
 
-  .. code::
+  .. code-block::
 
     "--database=<TARGET_FILE_IF_EXISTS(//alpha/bravo)>"
 
   If the ``//alpha/bravo`` target file exists, this might expand to the
   following:
 
-  .. code::
+  .. code-block::
 
     "--database=/home/User/project/out/obj/alpha/bravo/bravo.elf"
 
@@ -177,13 +209,13 @@
 
   For example, the expression
 
-  .. code::
+  .. code-block::
 
     "<TARGET_OBJECTS(//foo/bar:a_source_set)>"
 
   might expand to multiple separate arguments:
 
-  .. code::
+  .. code-block::
 
     "/home/User/project_root/out/obj/foo/bar/a_source_set.file_a.cc.o"
     "/home/User/project_root/out/obj/foo/bar/a_source_set.file_b.cc.o"
@@ -191,11 +223,11 @@
 
 **Example**
 
-.. code::
+.. code-block::
 
-  import("$dir_pw_build/python_script.gni")
+  import("$dir_pw_build/python_action.gni")
 
-  pw_python_script("postprocess_main_image") {
+  pw_python_action("postprocess_main_image") {
     script = "py/postprocess_binary.py"
     args = [
       "--database",
@@ -231,7 +263,7 @@
 
 **Example**
 
-.. code::
+.. code-block::
 
   import("$dir_pw_build/input_group.gni")
 
@@ -249,21 +281,117 @@
 Targets depending on ``foo_metadata`` will rebuild when any of the ``.foo``
 files are modified.
 
+pw_zip
+^^^^^^
+``pw_zip`` is a target that allows users to zip up a set of input files and
+directories into a single output ``.zip`` file—a simple automation of a
+potentially repetitive task.
+
+**Arguments**
+
+* ``inputs``: List of source files as well as the desired relative zip
+  destination. See below for the input syntax.
+* ``dirs``: List of entire directories to be zipped as well as the desired
+  relative zip destination. See below for the input syntax.
+* ``output``: Filename of output ``.zip`` file.
+* ``deps``: List of dependencies for the target.
+
+**Input Syntax**
+
+Inputs all need to follow the correct syntax:
+
+#. Path to source file or directory. Directories must end with a ``/``.
+#. The delimiter (defaults to ``>``).
+#. The desired destination of the contents within the ``.zip``. Must start
+   with ``/`` to indicate the zip root. Any number of subdirectories are
+   allowed. If the source is a file it can be put into any subdirectory of the
+   root. If the source is a file, the zip copy can also be renamed by ending
+   the zip destination with a filename (no trailing ``/``).
+
+Thus, it should look like the following: ``"[source file or dir] > /"``.
+
+**Example**
+
+Let's say we have the following structure for a ``//source/`` directory:
+
+.. code-block::
+
+  source/
+  ├── file1.txt
+  ├── file2.txt
+  ├── file3.txt
+  └── some_dir/
+      ├── file4.txt
+      └── some_other_dir/
+          └── file5.txt
+
+And we create the following build target:
+
+.. code-block::
+
+  import("$dir_pw_build/zip.gni")
+
+  pw_zip("target_name") {
+    inputs = [
+      "//source/file1.txt > /",             # Copied to the zip root dir.
+      "//source/file2.txt > /renamed.txt",  # File renamed.
+      "//source/file3.txt > /bar/",         # File moved to the /bar/ dir.
+    ]
+
+    dirs = [
+      "//source/some_dir/ > /bar/some_dir/",  # All /some_dir/ contents copied
+                                              # as /bar/some_dir/.
+    ]
+
+    # Note on output: if the specific output directory isn't defined
+    # (such as output = "zoo.zip") then the .zip will output to the
+    # same directory as the BUILD.gn file that called the target.
+    output = "//$target_out_dir/foo.zip"  # Where the foo.zip will end up
+  }
+
+This will result in a ``.zip`` file called ``foo.zip`` stored in
+``//$target_out_dir`` with the following structure:
+
+.. code-block::
+
+  foo.zip
+  ├── bar/
+  │   ├── file3.txt
+  │   └── some_dir/
+  │       ├── file4.txt
+  │       └── some_other_dir/
+  │           └── file5.txt
+  ├── file1.txt
+  └── renamed.txt
+
 CMake / Ninja
 =============
+Pigweed's `CMake`_ support is provided primarily for projects that have an
+existing CMake build and wish to integrate Pigweed without switching to a new
+build system.
 
-Pigweed's CMake support is provided primarily for projects that have an existing
-CMake build and wish to integrate Pigweed without switching to a new build
-system.
+The following command generates Ninja build files for a host build in the
+``out/cmake_host`` directory:
 
-The following command generates Ninja build files in the out/cmake directory.
+.. code-block:: sh
 
-.. code:: sh
+  cmake -B out/cmake_host -S "$PW_ROOT" -G Ninja -DCMAKE_TOOLCHAIN_FILE=$PW_ROOT/pw_toolchain/host_clang/toolchain.cmake
 
-  cmake -B out/cmake -S /path/to/pigweed -G Ninja
+The ``PW_ROOT`` environment variable must point to the root of the Pigweed
+directory. This variable is set by Pigweed's environment setup.
 
-Tests can be executed with the ``pw_run_tests_GROUP`` targets. To run the basic
-Pigweed tests, run ``ninja -C out/cmake pw_run_tests_modules``.
+Tests can be executed with the ``pw_run_tests.GROUP`` targets. To run Pigweed
+module tests, execute ``pw_run_tests.modules``:
+
+.. code-block:: sh
+
+  ninja -C out/cmake_host pw_run_tests.modules
+
+:ref:`module-pw_watch` supports CMake, so you can also run
+
+.. code-block:: sh
+
+  pw watch out/cmake_host pw_run_tests.modules
 
 CMake functions
 ---------------
@@ -271,7 +399,9 @@
 
 * ``pw_auto_add_simple_module`` -- For modules with only one library,
   automatically declare the library and its tests.
+* ``pw_auto_add_module_tests`` -- Create test targets for all tests in a module.
 * ``pw_add_facade`` -- Declare a module facade.
+* ``pw_set_backend`` -- Set the backend library to use for a facade.
 * ``pw_add_module_library`` -- Add a library that is part of a module.
 * ``pw_add_test`` -- Declare a test target.
 
@@ -281,12 +411,87 @@
 Special libraries that do not fit well with these functions are created with the
 standard CMake functions, such as ``add_library`` and ``target_link_libraries``.
 
+Facades and backends
+--------------------
+The CMake build uses CMake cache variables for configuring
+:ref:`facades<docs-module-structure-facades>` and backends. Cache variables are
+similar to GN's build args set with ``gn args``. Unlike GN, CMake does not
+support multi-toolchain builds, so these variables have a single global value
+per build directory.
+
+The ``pw_add_facade`` function declares a cache variable named
+``<module_name>_BACKEND`` for each facade. Cache variables can be awkward to
+work with, since their values only change when they're assigned, but then
+persist accross CMake invocations. These variables should be set in one of the
+following ways:
+
+* Call ``pw_set_backend`` to set backends appropriate for the target in the
+  target's toolchain file. The toolchain file is provided to ``cmake`` with
+  ``-DCMAKE_TOOLCHAIN_FILE=<toolchain file>``.
+* Call ``pw_set_backend`` in the top-level ``CMakeLists.txt`` before other
+  CMake code executes.
+* Set the backend variable at the command line with the ``-D`` option.
+
+  .. code-block:: sh
+
+    cmake -B out/cmake_host -S "$PW_ROOT" -G Ninja \
+        -DCMAKE_TOOLCHAIN_FILE=$PW_ROOT/pw_toolchain/host_clang/toolchain.cmake \
+        -Dpw_log_BACKEND=pw_log_basic
+
+* Temporarily override a backend by setting it interactively with ``ccmake`` or
+  ``cmake-gui``.
+
+Toolchain setup
+---------------
+In CMake, the toolchain is configured by setting CMake variables, as described
+in the `CMake documentation <https://cmake.org/cmake/help/latest/manual/cmake-toolchains.7.html>`_.
+These variables are typically set in a toolchain CMake file passed to ``cmake``
+with the ``-D`` option (``-DCMAKE_TOOLCHAIN_FILE=path/to/file.cmake``).
+For Pigweed embedded builds, set ``CMAKE_SYSTEM_NAME`` to the empty string
+(``""``).
+
+Third party libraries
+---------------------
+The CMake build includes third-party libraries similarly to the GN build. A
+``dir_pw_third_party_<library>`` cache variable is defined for each third-party
+dependency. This variable can have one of three values:
+
+* ``""`` (empty) -- the dependency is not available
+* ``PRESENT`` -- the dependency is available and is already included in the
+  build
+* ``</path/to/the/dependency>`` -- the dependency is available and will be
+  automatically imported from this path using ``add_subdirectory``.
+
+If the variable is empty (``if("${dir_pw_third_party_<library>}" STREQUAL
+"")``), the dependency is not available. Otherwise, it is available and
+libraries declared by it can be referenced.
+
+Third party variables are set like any other cache global variable in CMake. It
+is recommended to set these in one of the following ways:
+
+* Set with the CMake ``set`` function in the toolchain file or a
+  ``CMakeLists.txt`` before other CMake code executes.
+
+  .. code-block:: cmake
+
+    set(dir_pw_third_party_nanopb PRESENT CACHE STRING "" FORCE)
+
+* Set the variable at the command line with the ``-D`` option.
+
+  .. code-block:: sh
+
+    cmake -B out/cmake_host -S "$PW_ROOT" -G Ninja \
+        -DCMAKE_TOOLCHAIN_FILE=$PW_ROOT/pw_toolchain/host_clang/toolchain.cmake \
+        -Ddir_pw_third_party_nanopb=/path/to/nanopb
+
+* Set the variable interactively with ``ccmake`` or ``cmake-gui``.
+
 Use Pigweed from an existing CMake project
 ------------------------------------------
 To use Pigweed libraries form a CMake-based project, simply include the Pigweed
 repository from a ``CMakeLists.txt``.
 
-.. code:: cmake
+.. code-block:: cmake
 
   add_subdirectory(path/to/pigweed pigweed)
 
@@ -295,16 +500,13 @@
 
 If desired, modules can be included individually.
 
-.. code:: cmake
-
-  include(path/to/pigweed/pw_build/pigweed.cmake)
+.. code-block:: cmake
 
   add_subdirectory(path/to/pigweed/pw_some_module pw_some_module)
   add_subdirectory(path/to/pigweed/pw_another_module pw_another_module)
 
 Bazel
 =====
-
 Bazel is currently very experimental, and only builds for host.
 
 The common configuration for Bazel for all modules is in the ``pigweed.bzl``
diff --git a/pw_build/error.gni b/pw_build/error.gni
new file mode 100644
index 0000000..6e7994a
--- /dev/null
+++ b/pw_build/error.gni
@@ -0,0 +1,36 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("python_action.gni")
+
+# Prints an error message and exits the build unsuccessfully.
+#
+# Args:
+#   message: The message to print.
+#
+template("pw_error") {
+  assert(defined(invoker.message) && invoker.message != "",
+         "pw_error requires an error message")
+
+  pw_python_action(target_name) {
+    script = "$dir_pw_build/py/pw_build/error.py"
+    args = [
+      "--target",
+      get_label_info(target_name, "label_no_toolchain"),
+      "--message",
+      invoker.message,
+    ]
+    stamp = true
+  }
+}
diff --git a/pw_build/exec.gni b/pw_build/exec.gni
index 787d69c..d2b4e69 100644
--- a/pw_build/exec.gni
+++ b/pw_build/exec.gni
@@ -12,7 +12,7 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-import("python_script.gni")
+import("python_action.gni")
 
 # Runs a program which isn't in Python.
 #
@@ -108,7 +108,7 @@
     _script_args += invoker.args
   }
 
-  pw_python_script(target_name) {
+  pw_python_action(target_name) {
     script = "$dir_pw_build/py/pw_build/exec.py"
     args = _script_args
 
diff --git a/pw_build/facade.gni b/pw_build/facade.gni
index e9604fc..d957646 100644
--- a/pw_build/facade.gni
+++ b/pw_build/facade.gni
@@ -11,19 +11,31 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations under
 # the License.
-# gn-format disable
+
 import("//build_overrides/pigweed.gni")
 
-import("$dir_pw_build/python_script.gni")
+import("$dir_pw_build/python_action.gni")
 import("$dir_pw_build/target_types.gni")
 
 # Declare a facade.
-# A Pigweed facade is an API layer that has a single implementation it must link
-# against. Typically this will be done by pointing `dir_pw_[module]_backend` at
-# a backend implementation for that module.
+#
+# A Pigweed facade is an API layer that has a single implementation it must
+# link against. Typically this will be done by pointing a build arg like
+# `pw_[module]_BACKEND` at a backend implementation for that module.
+#
+# To avoid circular dependencies, pw_facade creates two targets:
+#
+#   - $target_name: the public-facing pw_source_set
+#   - $target_name.facade: target used by the backend to avoid circular
+#         dependencies
+#
+# If the target name matches the directory name (e.g. //foo:foo), a ":facade"
+# alias of the facade target (e.g. //foo:facade) is also provided. This avoids
+# the need to repeat the directory name, for consistency with the main target.
 #
 # Example facade:
 #
+#   # Creates ":module_name" and ":module_name.facade" GN targets.
 #   pw_facade("module_name") {
 #     backend = dir_module_name_backend
 #     public_deps = [
@@ -31,20 +43,26 @@
 #     ]
 #   }
 #
-# Args:
-#  - backend: the dependency that implements this facade
-#  - facade_name: (optional) The name to use for the facade target on which the
-#        backend depends. Only required when a module defines multiple facades.
-#        Defaults to "facade".
+# Accepts the standard pw_source_set args with the following additions:
+#
+#  - backend: the dependency that implements this facade (a GN variable)
 #
 template("pw_facade") {
   assert(defined(invoker.backend),
          "pw_facade requires a reference to a backend variable for the facade")
 
-  if (defined(invoker.facade_name)) {
-    _facade_name = invoker.facade_name
-  } else {
-    _facade_name = "facade"
+  _facade_name = "$target_name.facade"
+
+  if (get_path_info(get_label_info(":$target_name", "dir"), "name") ==
+      get_label_info(":$target_name", "name")) {
+    group("facade") {
+      public_deps = [ ":$_facade_name" ]
+    }
+  }
+
+  # For backwards compatibility, provide a _facade version of the name.
+  group(target_name + "_facade") {
+    public_deps = [ ":$_facade_name" ]
   }
 
   # A facade's headers are split into a separate target to avoid a circular
@@ -89,7 +107,7 @@
     # error message.
     _main_target_name = target_name
 
-    pw_python_script(_main_target_name + "_NO_BACKEND_SET") {
+    pw_python_action(_main_target_name + ".NO_BACKEND_SET") {
       stamp = true
       script = "$dir_pw_build/py/pw_build/null_backend.py"
       args = [ _main_target_name ]
@@ -111,8 +129,8 @@
     if (invoker.backend != "") {
       public_deps += [ invoker.backend ]
     } else {
-      # If the backend is not set, depend on the *_NO_BACKEND_SET target.
-      public_deps += [ ":$_main_target_name" + "_NO_BACKEND_SET" ]
+      # If the backend is not set, depend on the *.NO_BACKEND_SET target.
+      public_deps += [ ":$_main_target_name" + ".NO_BACKEND_SET" ]
     }
   }
 }
diff --git a/pw_build/host_tool.gni b/pw_build/host_tool.gni
index 7c092bb..cac3f23 100644
--- a/pw_build/host_tool.gni
+++ b/pw_build/host_tool.gni
@@ -12,7 +12,7 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-import("python_script.gni")
+import("python_action.gni")
 
 declare_args() {
   # Whether to build host-side tooling.
@@ -46,7 +46,7 @@
       ]
     }
 
-    pw_python_script(target_name) {
+    pw_python_action(target_name) {
       script = "$dir_pw_build/py/pw_build/host_tool.py"
       args = _script_args
       deps = [ invoker.tool ]
diff --git a/pw_build/input_group.gni b/pw_build/input_group.gni
index 3c71453..a7b93b4 100644
--- a/pw_build/input_group.gni
+++ b/pw_build/input_group.gni
@@ -12,7 +12,7 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-import("python_script.gni")
+import("python_action.gni")
 
 # Creates an action that doesn't do anything that depends on a list of input
 # files. This allows modifications to these files to trigger targets depending
@@ -23,7 +23,7 @@
 template("pw_input_group") {
   assert(defined(invoker.inputs), "pw_input_group requires some inputs")
 
-  pw_python_script(target_name) {
+  pw_python_action(target_name) {
     ignore_vars = [
       "args",
       "script",
diff --git a/pw_build/linker_script.gni b/pw_build/linker_script.gni
index 47dfd8f..7ba64ac 100644
--- a/pw_build/linker_script.gni
+++ b/pw_build/linker_script.gni
@@ -12,7 +12,6 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/exec.gni")
@@ -84,7 +83,9 @@
 
     # Add defines.
     if (defined(invoker.defines)) {
-      args += process_file_template(invoker.defines, "-D{{source_name_part}}")
+      foreach(compiler_define, invoker.defines) {
+        args += [ "-D${compiler_define}" ]
+      }
     }
 
     # Set output file.
diff --git a/pw_build/module_config.gni b/pw_build/module_config.gni
new file mode 100644
index 0000000..8b2d7ed
--- /dev/null
+++ b/pw_build/module_config.gni
@@ -0,0 +1,29 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+declare_args() {
+  # The default implementation for all Pigweed module configurations.
+  #
+  # This variable makes it possible to configure multiple Pigweed modules from
+  # a single GN target. Module configurations can still be overridden
+  # individually by setting a module's config backend directly (e.g.
+  # pw_some_module_CONFIG = "//my_config").
+  #
+  # Modules are configured through compilation options. The configuration
+  # implementation might set individual compiler macros or forcibly include a
+  # config header with multiple options using the -include flag.
+  pw_build_DEFAULT_MODULE_CONFIG = "$dir_pw_build:empty"
+}
diff --git a/pw_build/pigweed.cmake b/pw_build/pigweed.cmake
index f5a4a7b..10e414b 100644
--- a/pw_build/pigweed.cmake
+++ b/pw_build/pigweed.cmake
@@ -13,11 +13,6 @@
 # the License.
 include_guard(GLOBAL)
 
-# Create an empty, dummy source file for use by non-INTERFACE libraries, which
-# require at least one source file.
-set(_pw_empty_source_file "${CMAKE_BINARY_DIR}/pw_empty_source_file.cc")
-file(WRITE "${_pw_empty_source_file}" "")
-
 # Automatically creates a library and test targets for the files in a module.
 # This function is only suitable for simple modules that meet the following
 # requirements:
@@ -43,25 +38,27 @@
 #   3. Declare a test for each source file that ends with _test.cc.
 #
 # Args:
-#   IMPLEMENTS_FACADE: this module implements the specified facade
-#   PUBLIC_DEPS: public target_link_libraries arguments
-#   PRIVATE_DEPS: private target_link_libraries arguments
+#
+#   IMPLEMENTS_FACADE - this module implements the specified facade
+#   PUBLIC_DEPS - public target_link_libraries arguments
+#   PRIVATE_DEPS - private target_link_libraries arguments
 #
 function(pw_auto_add_simple_module MODULE)
-  set(multi PUBLIC_DEPS PRIVATE_DEPS)
+  set(multi PUBLIC_DEPS PRIVATE_DEPS TEST_DEPS)
   cmake_parse_arguments(PARSE_ARGV 1 arg "" "IMPLEMENTS_FACADE" "${multi}")
 
   file(GLOB all_sources *.cc *.c)
 
   # Create a library with all source files not ending in _test.
   set(sources "${all_sources}")
-  list(FILTER sources EXCLUDE REGEX "_test.cc$")
+  list(FILTER sources EXCLUDE REGEX "_test(\\.cc|(_c)?\\.c)$")
+  list(FILTER sources EXCLUDE REGEX "_fuzzer\\.cc$")
 
   file(GLOB_RECURSE headers *.h)
 
   if(arg_IMPLEMENTS_FACADE)
     set(groups backends)
-    set(deps PUBLIC_DEPS "${arg_IMPLEMENTS_FACADE}.facade")
+    set(facade_dep "${arg_IMPLEMENTS_FACADE}.facade")
   else()
     set(groups modules "${MODULE}")
   endif()
@@ -69,48 +66,72 @@
   pw_add_module_library("${MODULE}"
     PUBLIC_DEPS
       ${arg_PUBLIC_DEPS}
+      ${facade_dep}
     PRIVATE_DEPS
       ${arg_PRIVATE_DEPS}
     SOURCES
       ${sources}
     HEADERS
       ${headers}
-    ${deps}
   )
 
-  # Create a test for each source file ending in _test. Tests with mutliple .cc
-  # files or different dependencies than the module will not work correctly.
-  set(tests "${all_sources}")
-  list(FILTER tests INCLUDE REGEX "_test.cc$")
+  if(arg_IMPLEMENTS_FACADE)
+    target_include_directories("${MODULE}" PUBLIC public_overrides)
+  endif()
 
-  foreach(test IN LISTS tests)
+  pw_auto_add_module_tests("${MODULE}"
+    PRIVATE_DEPS
+      ${arg_PUBLIC_DEPS}
+      ${arg_PRIVATE_DEPS}
+      ${arg_TEST_DEPS}
+    GROUPS
+      ${groups}
+  )
+endfunction(pw_auto_add_simple_module)
+
+# Creates a test for each source file ending in _test. Tests with mutliple .cc
+# files or different dependencies than the module will not work correctly.
+#
+# Args:
+#
+#  PRIVATE_DEPS - dependencies to apply to all tests
+#  GROUPS - groups in addition to MODULE to which to add these tests
+#
+function(pw_auto_add_module_tests MODULE)
+  cmake_parse_arguments(PARSE_ARGV 1 arg "" "" "PRIVATE_DEPS;GROUPS")
+
+  file(GLOB cc_tests *_test.cc)
+
+  foreach(test IN LISTS cc_tests)
     get_filename_component(test_name "${test}" NAME_WE)
 
     # Find a .c test corresponding with the test .cc file, if any.
-    list(FILTER c_test INCLUDE REGEX "^${test_name}.c$")
+    file(GLOB c_test "${test_name}.c" "${test_name}_c.c")
 
     pw_add_test("${MODULE}.${test_name}"
       SOURCES
         "${test}"
         ${c_test}
       DEPS
-        "${MODULE}"
-        ${arg_PUBLIC_DEPS}
+        "$<TARGET_NAME_IF_EXISTS:${MODULE}>"
         ${arg_PRIVATE_DEPS}
       GROUPS
-        "${groups}"
+        "${MODULE}"
+        ${arg_GROUPS}
     )
   endforeach()
-endfunction(pw_auto_add_simple_module)
+endfunction(pw_auto_add_module_tests)
 
 # Creates a library in a module. The library has access to the public/ include
 # directory.
 #
 # Args:
-#   SOURCES: source files for this library
-#   HEADERS: header files for this library
-#   PUBLIC_DEPS: public target_link_libraries arguments
-#   PRIVATE_DEPS: private target_link_libraries arguments
+#
+#   SOURCES - source files for this library
+#   HEADERS - header files for this library
+#   PUBLIC_DEPS - public target_link_libraries arguments
+#   PRIVATE_DEPS - private target_link_libraries arguments
+#
 function(pw_add_module_library NAME)
   set(list_args SOURCES HEADERS PUBLIC_DEPS PRIVATE_DEPS)
   cmake_parse_arguments(PARSE_ARGV 1 arg "" "" "${list_args}")
@@ -118,7 +139,7 @@
   # Check that the library's name is prefixed by the module name.
   get_filename_component(module "${CMAKE_CURRENT_SOURCE_DIR}" NAME)
 
-  if(NOT "${NAME}" MATCHES "^${module}(\\.[^\\.]+)?(\\.facade|\\.backend)?$")
+  if(NOT "${NAME}" MATCHES "${module}(\\.[^\\.]+)?(\\.facade)?$")
     message(FATAL_ERROR
         "Module libraries must match the module name or be in the form "
         "'MODULE_NAME.LIBRARY_NAME'. The library '${NAME}' does not match."
@@ -132,12 +153,14 @@
       pw_build
       ${arg_PUBLIC_DEPS}
     PRIVATE
+      pw_build.strict_warnings
+      pw_build.extra_strict_warnings
       ${arg_PRIVATE_DEPS}
   )
 
   # Libraries require at least one source file.
   if(NOT arg_SOURCES)
-    target_sources("${NAME}" PRIVATE "${_pw_empty_source_file}")
+    target_sources("${NAME}" PRIVATE $<TARGET_PROPERTY:pw_build.empty,SOURCES>)
   endif()
 endfunction(pw_add_module_library)
 
@@ -148,31 +171,64 @@
 # module that implements the facade depends on a library named
 # MODULE_NAME.facade.
 #
-# pw_add_facade accepts the same arguments as pw_add_module_library.
+# pw_add_facade accepts the same arguments as pw_add_module_library, with the
+# following additions:
+#
+#  DEFAULT_BACKEND - which backend to use by default
+#
 function(pw_add_facade NAME)
-  pw_add_module_library("${NAME}.facade" ${ARGN})
+  cmake_parse_arguments(PARSE_ARGV 1 arg "" "DEFAULT_BACKEND" "")
 
-  # Use a library with an empty source instead of an INTERFACE library so that
-  # the library can have a private dependency on the backend.
-  add_library("${NAME}" OBJECT EXCLUDE_FROM_ALL "${_pw_empty_source_file}")
+  # If no backend is set, a script that displays an error message is used
+  # instead. If the facade is used in the build, it fails with this error.
+  add_custom_target("${NAME}._no_backend_set_message"
+    COMMAND
+      python "$ENV{PW_ROOT}/pw_build/py/pw_build/null_backend.py" "${NAME}"
+  )
+  add_library("${NAME}.NO_BACKEND_SET" INTERFACE)
+  add_dependencies("${NAME}.NO_BACKEND_SET" "${NAME}._no_backend_set_message")
+
+  # Set the default backend to the error message if no default is specified.
+  if("${arg_DEFAULT_BACKEND}" STREQUAL "")
+    set(arg_DEFAULT_BACKEND "${NAME}.NO_BACKEND_SET")
+  endif()
+
+  # Declare the backend variable for this facade.
+  set("${NAME}_BACKEND" "${arg_DEFAULT_BACKEND}" CACHE STRING
+      "Backend for ${NAME}")
+
+  # Define the facade library, which is used by the backend to avoid circular
+  # dependencies.
+  pw_add_module_library("${NAME}.facade" ${arg_UNPARSED_ARGUMENTS})
+
+  # Define the public-facing library for this facade, which depends on the
+  # header files in .facade target and exposes the dependency on the backend.
+  add_library("${NAME}" INTERFACE)
   target_link_libraries("${NAME}"
-    PUBLIC
+    INTERFACE
       "${NAME}.facade"
-      "${NAME}.backend"
+      "${${NAME}_BACKEND}"
   )
 endfunction(pw_add_facade)
 
+# Sets which backend to use for the given facade.
+function(pw_set_backend FACADE BACKEND)
+  set("${FACADE}_BACKEND" "${BACKEND}" CACHE STRING "Backend for ${NAME}" FORCE)
+endfunction(pw_set_backend)
+
 # Declares a unit test. Creates two targets:
 #
-#  - <TEST_NAME>: the test executable
-#  - <TEST_NAME>_run: builds and runs the test
+#  * <TEST_NAME> - the test executable
+#  * <TEST_NAME>_run - builds and runs the test
 #
 # Args:
+#
 #   NAME: name to use for the target
 #   SOURCES: source files for this test
 #   DEPS: libraries on which this test depends
 #   GROUPS: groups to which to add this test; if none are specified, the test is
-#       added to the default and all groups
+#       added to the 'default' and 'all' groups
+#
 function(pw_add_test NAME)
   cmake_parse_arguments(PARSE_ARGV 1 arg "" "" "SOURCES;DEPS;GROUPS")
 
@@ -227,57 +283,3 @@
     add_dependencies("pw_run_tests.${group}" "${TEST_NAME}_run")
   endforeach()
 endfunction(pw_add_test_to_groups)
-
-# Declare top-level targets for tests.
-add_custom_target(pw_tests.default)
-add_custom_target(pw_run_tests.default)
-
-add_custom_target(pw_tests DEPENDS pw_tests.default)
-add_custom_target(pw_run_tests DEPENDS pw_run_tests.default)
-
-# Define the standard Pigweed compile options.
-add_library(_pw_reduced_size_copts INTERFACE)
-target_compile_options(_pw_reduced_size_copts
-  INTERFACE
-    "-fno-common"
-    "-fno-exceptions"
-    "-ffunction-sections"
-    "-fdata-sections"
-    $<$<COMPILE_LANGUAGE:CXX>:-fno-rtti>
-)
-
-add_library(_pw_strict_warnings_copts INTERFACE)
-target_compile_options(_pw_strict_warnings_copts
-  INTERFACE
-    "-Wall"
-    "-Wextra"
-    # Make all warnings errors, except for the exemptions below.
-    "-Werror"
-    "-Wno-error=cpp"  # preprocessor #warning statement
-    "-Wno-error=deprecated-declarations"  # [[deprecated]] attribute
-    $<$<COMPILE_LANGUAGE:CXX>:-Wnon-virtual-dtor>
-)
-
-add_library(_pw_cpp17_copts INTERFACE)
-target_compile_options(_pw_cpp17_copts
-  INTERFACE
-    $<$<COMPILE_LANGUAGE:CXX>:-std=c++17>
-    # Allow uses of the register keyword, which may appear in C headers.
-    $<$<COMPILE_LANGUAGE:CXX>:-Wno-register>
-)
-
-# Target that specifies the standard Pigweed build options.
-add_library(pw_build INTERFACE)
-target_compile_options(pw_build INTERFACE "-g")
-target_link_libraries(pw_build
-  INTERFACE
-    _pw_reduced_size_copts
-    _pw_strict_warnings_copts
-    _pw_cpp17_copts
-)
-target_compile_options(pw_build
-  INTERFACE
-    # Force the compiler use colorized output. This is required for Ninja.
-    $<$<CXX_COMPILER_ID:Clang>:-fcolor-diagnostics>
-    $<$<CXX_COMPILER_ID:GNU>:-fdiagnostics-color=always>
-)
diff --git a/pw_build/py/BUILD.gn b/pw_build/py/BUILD.gn
new file mode 100644
index 0000000..04a30e2
--- /dev/null
+++ b/pw_build/py/BUILD.gn
@@ -0,0 +1,36 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_build/__init__.py",
+    "pw_build/error.py",
+    "pw_build/exec.py",
+    "pw_build/generate_python_package_gn.py",
+    "pw_build/generated_tests.py",
+    "pw_build/host_tool.py",
+    "pw_build/nop.py",
+    "pw_build/null_backend.py",
+    "pw_build/python_runner.py",
+    "pw_build/python_wheels.py",
+    "pw_build/zip.py",
+    "pw_build/zip_test.py",
+  ]
+  tests = [ "python_runner_test.py" ]
+}
diff --git a/pw_build/py/pw_build/__init__.py b/pw_build/py/pw_build/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_build/py/pw_build/__init__.py
diff --git a/pw_build/py/pw_build/error.py b/pw_build/py/pw_build/error.py
new file mode 100644
index 0000000..d98d55d
--- /dev/null
+++ b/pw_build/py/pw_build/error.py
@@ -0,0 +1,46 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Prints an error message and exits unsuccessfully."""
+
+import argparse
+import logging
+import sys
+
+import pw_cli.log
+
+_LOG = logging.getLogger(__name__)
+
+
+def _parse_args():
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument('--message', help='Error message to print')
+    parser.add_argument('--target', help='GN target in which error occurred')
+    return parser.parse_args()
+
+
+def main(message: str, target: str) -> int:
+    _LOG.error('')
+    _LOG.error('Build error:')
+    _LOG.error('')
+    for line in message.split('\\n'):
+        _LOG.error('  %s', line)
+    _LOG.error('')
+    _LOG.error('(in %s)', target)
+    _LOG.error('')
+    return 1
+
+
+if __name__ == '__main__':
+    pw_cli.log.install()
+    sys.exit(main(**vars(_parse_args())))
diff --git a/pw_build/py/pw_build/exec.py b/pw_build/py/pw_build/exec.py
index 0b74458..b956c13 100644
--- a/pw_build/py/pw_build/exec.py
+++ b/pw_build/py/pw_build/exec.py
@@ -22,7 +22,11 @@
 import sys
 from typing import Dict, Optional
 
-import pw_cli.log
+# Need to be able to run without pw_cli installed in the virtualenv.
+try:
+    import pw_cli.log
+except ImportError:
+    pass
 
 _LOG = logging.getLogger(__name__)
 
@@ -157,5 +161,7 @@
 
 
 if __name__ == '__main__':
-    pw_cli.log.install()
+    # If pw_cli is not yet installed in the virtualenv just skip it.
+    if 'pw_cli' in globals():
+        pw_cli.log.install()
     sys.exit(main())
diff --git a/pw_build/py/pw_build/generate_python_package_gn.py b/pw_build/py/pw_build/generate_python_package_gn.py
new file mode 100755
index 0000000..b3681b6
--- /dev/null
+++ b/pw_build/py/pw_build/generate_python_package_gn.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Generates a BUILD.gn for a Python package.
+
+Pass the script a list of paths to the root directory of a Python package (where
+setup.py is).
+
+Don't forget to add the pw_python_package to a top-level group, or it will not
+be included in the build.
+"""
+
+from datetime import datetime
+from pathlib import Path
+import sys
+from typing import Iterable, List, NamedTuple
+
+from pw_presubmit import git_repo
+
+_HEADER = f"""\
+# Copyright {datetime.now().year} The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+"""
+
+
+class PackageFiles(NamedTuple):
+    setup: List[Path]
+    sources: List[Path]
+    tests: List[Path]
+    other: List[Path]
+
+
+def _find_package_files(root_dir: Path) -> PackageFiles:
+    files = git_repo.list_files(pathspecs=('*.py', '*.toml', '*.cfg'),
+                                repo_path=root_dir)
+
+    package_files = PackageFiles([], [], [], [])
+
+    for file in files:
+        if file.parent == root_dir:
+            if file.name == 'setup.py' or file.suffix != '.py':
+                package_files.setup.append(file)
+            elif file.stem.startswith('test_') or file.stem.endswith('_test'):
+                package_files.tests.append(file)
+            else:
+                package_files.other.append(file)
+        else:
+            package_files.sources.append(file)
+
+    return package_files
+
+
+def _gn_list(name: str, files: Iterable[Path], base: Path) -> Iterable[str]:
+    if files:
+        yield f'  {name} = ['
+        for file in files:
+            yield f'    "{file.relative_to(base).as_posix()}",'
+        yield '  ]'
+
+
+def generate_build_gn(root_dir: Path):
+    files = _find_package_files(root_dir)
+
+    yield _HEADER
+
+    yield 'pw_python_package("py") {'
+
+    yield from _gn_list('setup', files.setup, root_dir)
+    yield from _gn_list('sources', files.sources, root_dir)
+    yield from _gn_list('tests', files.tests, root_dir)
+
+    # Don't include the "other" files for now.
+    # yield from _gn_list('other', files.other, root_dir)
+
+    yield '}'
+
+
+def main(paths: Iterable[Path]):
+    for path in paths:
+        path.joinpath('BUILD.gn').write_text(
+            '\n'.join(generate_build_gn(path)) + '\n')
+
+
+if __name__ == '__main__':
+    if len(sys.argv) > 1:
+        main(Path(p).resolve() for p in sys.argv[1:])
+    else:
+        print(__file__, '', __doc__.strip(), sep='\n', file=sys.stderr)
+        sys.exit(1)
diff --git a/pw_build/py/pw_build/generated_tests.py b/pw_build/py/pw_build/generated_tests.py
new file mode 100644
index 0000000..b3c280a
--- /dev/null
+++ b/pw_build/py/pw_build/generated_tests.py
@@ -0,0 +1,187 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Tools for generating Pigweed tests that execute in C++ and Python."""
+
+import argparse
+from dataclasses import dataclass
+from datetime import datetime
+from collections import defaultdict
+import unittest
+
+from typing import (Any, Callable, Dict, Generic, Iterable, Iterator, List,
+                    Sequence, TextIO, TypeVar, Union)
+
+_CPP_HEADER = f"""\
+// Copyright {datetime.now().year} The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// AUTOGENERATED - DO NOT EDIT
+//
+// Generated at {datetime.now().isoformat()}
+
+// clang-format off
+"""
+
+
+class Error(Exception):
+    """Something went wrong when generating tests."""
+
+
+T = TypeVar('T')
+
+
+@dataclass
+class Context(Generic[T]):
+    """Info passed into test generator functions for each test case."""
+    group: str
+    count: int
+    total: int
+    test_case: T
+
+    def cc_name(self) -> str:
+        name = ''.join(w.capitalize()
+                       for w in self.group.replace('-', ' ').split(' '))
+        name = ''.join(c if c.isalnum() else '_' for c in name)
+        return f'{name}_{self.count}' if self.total > 1 else name
+
+    def py_name(self) -> str:
+        name = 'test_' + ''.join(c if c.isalnum() else '_'
+                                 for c in self.group.lower())
+        return f'{name}_{self.count}' if self.total > 1 else name
+
+
+# Test cases are specified as a sequence of strings or test case instances. The
+# strings are used to separate the tests into named groups. For example:
+#
+#   STR_SPLIT_TEST_CASES = (
+#     'Empty input',
+#     MyTestCase('', '', []),
+#     MyTestCase('', 'foo', []),
+#     'Split on single character',
+#     MyTestCase('abcde', 'c', ['ab', 'de']),
+#     ...
+#   )
+#
+GroupOrTest = Union[str, T]
+
+# Python tests are generated by a function that returns a function usable as a
+# unittest.TestCase method.
+PyTest = Callable[[unittest.TestCase], None]
+PyTestGenerator = Callable[[Context[T]], PyTest]
+
+# C++ tests are generated with a function that returns or yields lines of C++
+# code for the given test case.
+CcTestGenerator = Callable[[Context[T]], Iterable[str]]
+
+
+class TestGenerator(Generic[T]):
+    """Generates tests for multiple languages from a series of test cases."""
+    def __init__(self, test_cases: Sequence[GroupOrTest[T]]):
+        self._cases: Dict[str, List[T]] = defaultdict(list)
+        message = ''
+
+        if len(test_cases) < 2:
+            raise Error('At least one test case must be provided')
+
+        if not isinstance(test_cases[0], str):
+            raise Error(
+                'The first item in the test cases must be a group name string')
+
+        for case in test_cases:
+            if isinstance(case, str):
+                message = case
+            else:
+                self._cases[message].append(case)
+
+        if '' in self._cases:
+            raise Error('Empty test group names are not permitted')
+
+    def _test_contexts(self) -> Iterator[Context[T]]:
+        for group, test_list in self._cases.items():
+            for i, test_case in enumerate(test_list, 1):
+                yield Context(group, i, len(test_list), test_case)
+
+    def _generate_python_tests(self, define_py_test: PyTestGenerator):
+        tests: Dict[str, Callable[[Any], None]] = {}
+
+        for ctx in self._test_contexts():
+            test = define_py_test(ctx)
+            test.__name__ = ctx.py_name()
+
+            if test.__name__ in tests:
+                raise Error(
+                    f'Multiple Python tests are named {test.__name__}!')
+
+            tests[test.__name__] = test
+
+        return tests
+
+    def python_tests(self, name: str, define_py_test: PyTestGenerator) -> type:
+        """Returns a Python unittest.TestCase class with tests for each case."""
+        return type(name, (unittest.TestCase, ),
+                    self._generate_python_tests(define_py_test))
+
+    def _generate_cc_tests(self, define_cpp_test: CcTestGenerator, header: str,
+                           footer: str) -> Iterator[str]:
+        yield _CPP_HEADER
+        yield header
+
+        for ctx in self._test_contexts():
+            yield from define_cpp_test(ctx)
+            yield ''
+
+        yield footer
+
+    def cc_tests(self, output: TextIO, define_cpp_test: CcTestGenerator,
+                 header: str, footer: str):
+        """Writes C++ unit tests for each test case to the given file."""
+        for line in self._generate_cc_tests(define_cpp_test, header, footer):
+            output.write(line)
+            output.write('\n')
+
+
+def _to_chars(data: bytes) -> Iterator[str]:
+    for i, byte in enumerate(data):
+        try:
+            char = data[i:i + 1].decode()
+            yield char if char.isprintable() else fr'\x{byte:02x}'
+        except UnicodeDecodeError:
+            yield fr'\x{byte:02x}'
+
+
+def cc_string(data: Union[str, bytes]) -> str:
+    """Returns a C++ string literal version of a byte string or UTF-8 string."""
+    if isinstance(data, str):
+        data = data.encode()
+
+    return '"' + ''.join(_to_chars(data)) + '"'
+
+
+def parse_test_generation_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser(description='Generate unit test files')
+    parser.add_argument('--generate-cc-test',
+                        type=argparse.FileType('w'),
+                        help='Generate the C++ test file')
+    return parser.parse_known_args()[0]
diff --git a/pw_build/py/pw_build/nop.py b/pw_build/py/pw_build/nop.py
index ffe4a59..9fbc5f0 100644
--- a/pw_build/py/pw_build/nop.py
+++ b/pw_build/py/pw_build/nop.py
@@ -16,7 +16,7 @@
 The purpose of this script is to allow for source file dependencies within GN
 to be attached to targets that do not typically support them, such as groups.
 
-For example, instead of creating a group target, a pw_python_script target to
+For example, instead of creating a group target, a pw_python_action target to
 run this script can be created. The script can be given a list of input files,
 causing GN to rebuild the target and everything that depends on it whenever any
 input file is modified.
diff --git a/pw_build/py/pw_build/py.typed b/pw_build/py/pw_build/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_build/py/pw_build/py.typed
diff --git a/pw_build/py/pw_build/python_runner.py b/pw_build/py/pw_build/python_runner.py
index e627bb0..0bd5f96 100755
--- a/pw_build/py/pw_build/python_runner.py
+++ b/pw_build/py/pw_build/python_runner.py
@@ -21,6 +21,7 @@
 from dataclasses import dataclass
 import enum
 import logging
+import os
 from pathlib import Path
 import re
 import shlex
@@ -51,6 +52,13 @@
     parser.add_argument('--current-toolchain',
                         required=True,
                         help='Value of current_toolchain')
+    parser.add_argument('--directory',
+                        type=Path,
+                        help='Execute the command from this directory')
+    parser.add_argument('--module', help='Run this module instead of a script')
+    parser.add_argument('--env',
+                        action='append',
+                        help='Environment variables to set as NAME=VALUE')
     parser.add_argument(
         '--touch',
         type=Path,
@@ -124,7 +132,7 @@
 
         # Resolve the directory to an absolute path
         set_attr('dir', paths.resolve(directory))
-        set_attr('relative_dir', self.dir.relative_to(paths.root))
+        set_attr('relative_dir', self.dir.relative_to(paths.root.resolve()))
 
         set_attr(
             'out_dir',
@@ -149,7 +157,8 @@
     variables: Dict[str, str]
 
 
-_GN_NINJA_BUILD_STATEMENT = re.compile(r'^build (.+):[ \n]')
+# Matches a non-phony build statement.
+_GN_NINJA_BUILD_STATEMENT = re.compile(r'^build (.+):[ \n](?!phony\b)')
 
 
 def _parse_build_artifacts(build_dir: Path, fd) -> Iterator[_Artifact]:
@@ -198,7 +207,7 @@
 
     with ninja_file.open() as fd:
         for path, variables in _parse_build_artifacts(paths.build, fd):
-            # GN uses .stamp files when there is no build artifact.
+            # Older GN used .stamp files when there is no build artifact.
             if path.suffix == '.stamp':
                 continue
 
@@ -213,7 +222,7 @@
 
 def _search_toolchain_ninja(ninja_file: Path, paths: GnPaths,
                             target: Label) -> Optional[Path]:
-    """Searches the toolchain.ninja file for <target>.stamp.
+    """Searches the toolchain.ninja file for outputs from the provided target.
 
     Files created by an action appear in toolchain.ninja instead of in their own
     <target>.ninja. If the specified target has a single output file in
@@ -222,18 +231,25 @@
 
     _LOG.debug('Searching toolchain Ninja file %s for %s', ninja_file, target)
 
+    # Older versions of GN used a .stamp file to signal completion of a target.
     stamp_dir = target.out_dir.relative_to(paths.build).as_posix()
     stamp_tool = f'{target.toolchain_name()}_stamp'
     stamp_statement = f'build {stamp_dir}/{target.name}.stamp: {stamp_tool} '
 
+    # Newer GN uses a phony Ninja target to signal completion of a target.
+    phony_dir = Path(target.toolchain_name(), 'phony',
+                     target.relative_dir).as_posix()
+    phony_statement = f'build {phony_dir}/{target.name}: phony '
+
     with ninja_file.open() as fd:
         for line in fd:
-            if line.startswith(stamp_statement):
-                output_files = line[len(stamp_statement):].strip().split()
-                if len(output_files) == 1:
-                    return paths.build / output_files[0]
+            for statement in (phony_statement, stamp_statement):
+                if line.startswith(statement):
+                    output_files = line[len(statement):].strip().split()
+                    if len(output_files) == 1:
+                        return paths.build / output_files[0]
 
-                break
+                    break
 
     return None
 
@@ -400,9 +416,12 @@
 def main(
     gn_root: Path,
     current_path: Path,
+    directory: Optional[Path],
     original_cmd: List[str],
     default_toolchain: str,
     current_toolchain: str,
+    module: Optional[str],
+    env: Optional[List[str]],
     capture_output: bool,
     touch: Optional[Path],
 ) -> int:
@@ -422,6 +441,23 @@
                     toolchain=tool)
 
     command = [sys.executable]
+
+    if module is not None:
+        command += ['-m', module]
+
+    run_args: dict = dict(cwd=directory)
+
+    if env is not None:
+        environment = os.environ.copy()
+        environment.update((k, v) for k, v in (a.split('=', 1) for a in env))
+        run_args['env'] = environment
+
+    if capture_output:
+        # Combine stdout and stderr so that error messages are correctly
+        # interleaved with the rest of the output.
+        run_args['stdout'] = subprocess.PIPE
+        run_args['stderr'] = subprocess.STDOUT
+
     try:
         for arg in original_cmd[1:]:
             command += expand_expressions(paths, arg)
@@ -431,22 +467,11 @@
 
     _LOG.debug('RUN %s', ' '.join(shlex.quote(arg) for arg in command))
 
-    if capture_output:
-        completed_process = subprocess.run(
-            command,
-            # Combine stdout and stderr so that error messages are correctly
-            # interleaved with the rest of the output.
-            stdout=subprocess.PIPE,
-            stderr=subprocess.STDOUT,
-        )
-    else:
-        completed_process = subprocess.run(command)
+    completed_process = subprocess.run(command, **run_args)
 
     if completed_process.returncode != 0:
         _LOG.debug('Command failed; exit code: %d',
                    completed_process.returncode)
-        # TODO(pwbug/34): Print a cross-platform pastable-in-shell command, to
-        # help users track down what is happening when a command is broken.
         if capture_output:
             sys.stdout.buffer.write(completed_process.stdout)
     elif touch:
diff --git a/pw_build/py/pw_build/python_wheels.py b/pw_build/py/pw_build/python_wheels.py
new file mode 100644
index 0000000..22ad3d5
--- /dev/null
+++ b/pw_build/py/pw_build/python_wheels.py
@@ -0,0 +1,65 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Wrapper for the CLI commands for Python .whl building."""
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+
+_LOG = logging.getLogger(__name__)
+
+
+def _parse_args():
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument(
+        'setup_files',
+        nargs='+',
+        help='Path to a setup.py file to invoke to build wheels.')
+    parser.add_argument('--out_dir',
+                        help='Path where the build artifacts should be put.')
+
+    return parser.parse_args()
+
+
+def build_wheels(setup_files, out_dir):
+    """Build Python wheels by calling 'python setup.py bdist_wheel'."""
+    dist_dir = os.path.abspath(out_dir)
+
+    for filename in setup_files:
+        if not (filename.endswith('setup.py') and os.path.isfile(filename)):
+            raise RuntimeError(f'Unable to find setup.py file at {filename}.')
+
+        working_dir = os.path.dirname(filename)
+
+        cmd = [
+            sys.executable,
+            'setup.py',
+            'bdist_wheel',
+            '--dist-dir',
+            dist_dir,
+        ]
+        _LOG.debug('Running command:\n  %s', ' '.join(cmd))
+        subprocess.check_call(cmd, cwd=working_dir)
+
+
+def main():
+    build_wheels(**vars(_parse_args()))
+
+
+if __name__ == '__main__':
+    logging.basicConfig()
+    main()
+    sys.exit(0)
diff --git a/pw_build/py/pw_build/zip.py b/pw_build/py/pw_build/zip.py
new file mode 100644
index 0000000..4942d67
--- /dev/null
+++ b/pw_build/py/pw_build/zip.py
@@ -0,0 +1,136 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Takes a set of input files and zips them up."""
+
+import argparse
+import pathlib
+import sys
+import zipfile
+
+from collections.abc import Iterable
+
+DEFAULT_DELIMITER = '>'
+
+
+class ZipError(Exception):
+    """Raised when a pw_zip archive can't be built as specified."""
+
+
+def _parse_args():
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument(
+        '--delimiter',
+        nargs='?',
+        default=DEFAULT_DELIMITER,
+        help='Symbol that separates the path and the zip path destination.')
+    parser.add_argument(
+        '--input_list',
+        nargs='+',
+        help='Paths to files and dirs to zip and their desired zip location.')
+    parser.add_argument('--out_filename', help='Zip file destination.')
+
+    return parser.parse_args()
+
+
+def zip_up(input_list: Iterable,
+           out_filename: str,
+           delimiter=DEFAULT_DELIMITER):
+    """Zips up all input files/dirs.
+
+    Args:
+        input_list: List of strings consisting of file or directory,
+            the delimiter, and a path to the desired .zip destination.
+        out_filename: Path and name of the .zip file.
+        delimiter: string that separates the input source and the zip
+            destination. Defaults to '>'. Examples:
+            '/foo.txt > /'         # /foo.txt zipped as /foo.txt
+            '/foo.txt > /bar.txt'  # /foo.txt zipped as /bar.txt
+            'foo.txt > /'  # foo.txt from invokers dir zipped as /foo.txt
+            '/bar/ > /'            # Whole bar dir zipped into /
+    """
+    with zipfile.ZipFile(out_filename, 'w', zipfile.ZIP_DEFLATED) as zip_file:
+        for _input in input_list:
+            try:
+                source, destination = _input.split(delimiter)
+                source = source.strip()
+                destination = destination.strip()
+            except ValueError as value_error:
+                msg = (
+                    f'Input in the form of "[filename or dir] {delimiter} '
+                    f'/zip_destination/" expected. Instead got:\n  {_input}')
+                raise ZipError(msg) from value_error
+            if not source:
+                raise ZipError(
+                    f'Bad input:\n  {_input}\nInput source '
+                    f'cannot be empty. Please specify the input in the form '
+                    f'of "[filename or dir] {delimiter} /zip_destination/".')
+            if not destination.startswith('/'):
+                raise ZipError(
+                    f'Bad input:\n  {_input}\nZip desination '
+                    f'"{destination}" must start with "/" to indicate the '
+                    f'zip file\'s root directory.')
+            source_path = pathlib.Path(source)
+            destination_path = pathlib.PurePath(destination)
+
+            # Case: the input source path points to a file.
+            if source_path.is_file():
+                # Case: "foo.txt > /mydir/"; destination is dir. Put foo.txt
+                # into mydir as /mydir/foo.txt
+                if destination.endswith('/'):
+                    zip_file.write(source_path,
+                                   destination_path / source_path.name)
+                # Case: "foo.txt > /bar.txt"; destination is a file--rename the
+                # source file: put foo.txt into the zip as /bar.txt
+                else:
+                    zip_file.write(source_path, destination_path)
+                continue
+            # Case: the input source path points to a directory.
+            if source_path.is_dir():
+                zip_up_dir(source, source_path, destination, destination_path,
+                           zip_file)
+                continue
+            raise ZipError(f'Unknown source path\n  {source_path}')
+
+
+def zip_up_dir(source: str, source_path: pathlib.Path, destination: str,
+               destination_path: pathlib.PurePath, zip_file: zipfile.ZipFile):
+    if not source.endswith('/'):
+        raise ZipError(
+            f'Source path:\n  {source}\nis a directory, but is '
+            f'missing a trailing "/". The / requirement helps prevent bugs. '
+            f'To fix, add a trailing /:\n  {source}/')
+    if not destination.endswith('/'):
+        raise ZipError(
+            f'Destination path:\n  {destination}\nis a directory, '
+            f'but is missing a trailing "/". The / requirement helps prevent '
+            f'bugs. To fix, add a trailing /:\n  {destination}/')
+
+    # Walk the directory and add zip all of the files with the
+    # same structure as the source.
+    for file_path in source_path.glob('**/*'):
+        if file_path.is_file():
+            rel_path = file_path.relative_to(source_path)
+            zip_file.write(file_path, destination_path / rel_path)
+
+
+def main():
+    zip_up(**vars(_parse_args()))
+
+
+if __name__ == '__main__':
+    try:
+        main()
+    except ZipError as err:
+        print('ERROR:', str(err), file=sys.stderr)
+        sys.exit(1)
diff --git a/pw_build/py/pw_build/zip_test.py b/pw_build/py/pw_build/zip_test.py
new file mode 100644
index 0000000..57a3e5d
--- /dev/null
+++ b/pw_build/py/pw_build/zip_test.py
@@ -0,0 +1,254 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Tests for the pw_build.zip module."""
+
+import unittest
+import os
+import tempfile
+import pathlib
+import zipfile
+
+from pw_build.zip import zip_up, ZipError
+
+DELIMITER = '>'
+IN_FILENAMES = [
+    'file1.txt',
+    'file2.txt',
+    'dir1/file3.txt',
+    'dir1/file4.txt',
+    'dir1/dir2/file5.txt',
+    'dir1/dir2/file6.txt',
+]
+
+
+def make_directory(parent_path: pathlib.Path, dir_name: str, filenames: list):
+    """Creates a directory and returns a pathlib.Path() of it's root dir.
+
+        Args:
+            parent_path: Path to directory where the new directory will be made.
+            dir_name: Name of the new directory.
+            filenames: list of file contents of the new directory. Also allows
+                the creation of subdirectories. Example:
+                [
+                    'file1.txt',
+                    'subdir/file2.txt'
+                ]
+
+        Returns: pathlib.Path() to the newly created directory.
+    """
+    root_path = pathlib.Path(parent_path / dir_name)
+    os.mkdir(root_path)
+    for filename in filenames:
+        # Make the sub directories if they don't already exist.
+        directories = filename.split('/')[:-1]
+        for i in range(len(directories)):
+            directory = pathlib.PurePath('/'.join(directories[:i + 1]))
+            if not (root_path / directory).is_dir():
+                os.mkdir(root_path / directory)
+
+        # Create a file at the destination.
+        touch(root_path, filename)
+    return root_path
+
+
+def touch(parent_dir: pathlib.Path, filename: str):
+    """Creates an empty file at parent_dir/filename."""
+    with open(parent_dir / filename, 'a') as touch_file:
+        touch_file.write(filename)
+
+
+def get_directory_contents(path: pathlib.Path):
+    """Iterates through a directory and returns a set of its contents."""
+    contents = set()
+    for filename in path.glob('**/*'):
+        # Remove the original parent directories to get just the relative path.
+        contents.add(filename.relative_to(path))
+    return contents
+
+
+class TestZipping(unittest.TestCase):
+    """Tests for the pw_build.zip module."""
+    def test_zip_up_file(self):
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            # Arrange.
+            tmp_path = pathlib.Path(tmp_dir)
+            in_path = make_directory(tmp_path, 'in', IN_FILENAMES)
+            input_list = [f'{in_path}/file1.txt {DELIMITER} /']
+            out_filename = f'{tmp_path}/out.zip'
+
+            # Act.
+            zip_up(input_list, out_filename)
+            out_path = pathlib.Path(f'{tmp_path}/out/')
+            with zipfile.ZipFile(out_filename, 'r') as zip_file:
+                zip_file.extractall(out_path)
+            expected_path = make_directory(tmp_path, 'expected', ['file1.txt'])
+
+            # Assert.
+            self.assertSetEqual(get_directory_contents(out_path),
+                                get_directory_contents(expected_path))
+
+    def test_zip_up_dir(self):
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            # Arrange.
+            tmp_path = pathlib.Path(tmp_dir)
+            in_path = make_directory(tmp_path, 'in', IN_FILENAMES)
+            input_list = [f'{in_path}/dir1/ {DELIMITER} /']
+            out_filename = f'{tmp_path}/out.zip'
+
+            # Act.
+            zip_up(input_list, out_filename)
+            out_path = pathlib.Path(f'{tmp_path}/out/')
+            with zipfile.ZipFile(out_filename, 'r') as zip_file:
+                zip_file.extractall(out_path)
+            expected_path = make_directory(tmp_path, 'expected', [
+                'file3.txt',
+                'file4.txt',
+                'dir2/file5.txt',
+                'dir2/file6.txt',
+            ])
+
+            # Assert.
+            self.assertSetEqual(get_directory_contents(out_path),
+                                get_directory_contents(expected_path))
+
+    def test_file_rename(self):
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            # Arrange.
+            tmp_path = pathlib.Path(tmp_dir)
+            in_path = make_directory(tmp_path, 'in', IN_FILENAMES)
+            input_list = [f'{in_path}/file1.txt {DELIMITER} /renamed.txt']
+            out_filename = f'{tmp_path}/out.zip'
+
+            # Act.
+            zip_up(input_list, out_filename)
+            out_path = pathlib.Path(f'{tmp_path}/out/')
+            with zipfile.ZipFile(out_filename, 'r') as zip_file:
+                zip_file.extractall(out_path)
+            expected_path = make_directory(tmp_path, 'expected',
+                                           ['renamed.txt'])
+
+            # Assert.
+            self.assertSetEqual(get_directory_contents(out_path),
+                                get_directory_contents(expected_path))
+
+    def test_file_move(self):
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            # Arrange.
+            tmp_path = pathlib.Path(tmp_dir)
+            in_path = make_directory(tmp_path, 'in', IN_FILENAMES)
+            input_list = [f'{in_path}/file1.txt {DELIMITER} /foo/']
+            out_filename = f'{tmp_path}/out.zip'
+
+            # Act.
+            zip_up(input_list, out_filename)
+            out_path = pathlib.Path(f'{tmp_path}/out/')
+            with zipfile.ZipFile(out_filename, 'r') as zip_file:
+                zip_file.extractall(out_path)
+            expected_path = make_directory(tmp_path, 'expected',
+                                           ['foo/file1.txt'])
+
+            # Assert.
+            self.assertSetEqual(get_directory_contents(out_path),
+                                get_directory_contents(expected_path))
+
+    def test_dir_move(self):
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            # Arrange.
+            tmp_path = pathlib.Path(tmp_dir)
+            in_path = make_directory(tmp_path, 'in', IN_FILENAMES)
+            input_list = [f'{in_path}/dir1/ {DELIMITER} /foo/']
+            out_filename = f'{tmp_path}/out.zip'
+
+            # Act.
+            zip_up(input_list, out_filename)
+            out_path = pathlib.Path(f'{tmp_path}/out/')
+            with zipfile.ZipFile(out_filename, 'r') as zip_file:
+                zip_file.extractall(out_path)
+            expected_path = make_directory(tmp_path, 'expected', [
+                'foo/file3.txt',
+                'foo/file4.txt',
+                'foo/dir2/file5.txt',
+                'foo/dir2/file6.txt',
+            ])
+
+            # Assert.
+            self.assertSetEqual(get_directory_contents(out_path),
+                                get_directory_contents(expected_path))
+
+    def test_change_delimiter(self):
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            # Arrange.
+            tmp_path = pathlib.Path(tmp_dir)
+            in_path = make_directory(tmp_path, 'in', IN_FILENAMES)
+            delimiter = '==>'
+            input_list = [f'{in_path}/file1.txt {delimiter} /']
+            out_filename = f'{tmp_path}/out.zip'
+
+            # Act.
+            zip_up(input_list, out_filename, delimiter=delimiter)
+            out_path = pathlib.Path(f'{tmp_path}/out/')
+            with zipfile.ZipFile(out_filename, 'r') as zip_file:
+                zip_file.extractall(out_path)
+            expected_path = make_directory(tmp_path, 'expected', ['file1.txt'])
+
+            # Assert.
+            self.assertSetEqual(get_directory_contents(out_path),
+                                get_directory_contents(expected_path))
+
+    def test_wrong_input_syntax_raises_error(self):
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            # Arrange.
+            bad_inputs = [
+                '',  # Empty input
+                f'{tmp_dir}/ /',  # No delimiter
+                f'{tmp_dir}/ {DELIMITER} ',  # No zip destination
+                f'{tmp_dir} /',  # No source
+                f'{tmp_dir}/',  # No delimiter or zip destination
+                f'{DELIMITER}',  # No source or zip destination
+                f'{tmp_dir} {DELIMITER} /',  # No trailing source '/'
+                f'{tmp_dir}/ {DELIMITER} foo/',  # No leading zip root '/'
+                f'{tmp_dir}/ {DELIMITER} /foo',  # No trailing zip dest '/'
+                f'{tmp_dir}/ {DELIMITER} /{tmp_dir}/ '
+                f'{DELIMITER} /{tmp_dir}/',  # Too many paths on split
+            ]
+            out_filename = f'{tmp_dir}/out.zip'
+
+            # Act & Assert.
+            for bad_input in bad_inputs:
+                with self.assertRaises(ZipError):
+                    zip_up([bad_input], out_filename)
+
+    def test_nonexistant_file_raises_error(self):
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            # Arrange.
+            input_list = [f'{tmp_dir}/nonexistant-file.txt > /']
+            out_filename = f'{tmp_dir}/out.zip'
+
+            # Act & Assert.
+            with self.assertRaises(ZipError):
+                zip_up(input_list, out_filename)
+
+    def test_nonexistant_dir_raises_error(self):
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            # Arrange.
+            input_list = [f'{tmp_dir}/nonexistant-dir/ > /']
+            out_filename = f'{tmp_dir}/out.zip'
+
+            # Act & Assert.
+            with self.assertRaises(ZipError):
+                zip_up(input_list, out_filename)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/pw_build/py/python_runner_test.py b/pw_build/py/python_runner_test.py
index 8ba6edb..86497f4 100755
--- a/pw_build/py/python_runner_test.py
+++ b/pw_build/py/python_runner_test.py
@@ -16,14 +16,21 @@
 
 import os
 from pathlib import Path
+import platform
 import tempfile
 import unittest
 
 from pw_build.python_runner import ExpressionError, GnPaths, Label, TargetInfo
 from pw_build.python_runner import expand_expressions
 
-TEST_PATHS = GnPaths(Path('/gn_root'), Path('/gn_root/out'),
-                     Path('/gn_root/some/cwd'), '//toolchains/cool:ToolChain')
+ROOT = Path(r'C:\gn_root' if platform.system() == 'Windows' else '/gn_root')
+
+TEST_PATHS = GnPaths(
+    ROOT,
+    ROOT / 'out',
+    ROOT / 'some' / 'cwd',
+    '//toolchains/cool:ToolChain',
+)
 
 
 class LabelTest(unittest.TestCase):
@@ -38,63 +45,63 @@
         for paths, toolchain in self._paths_and_toolchain_name:
             label = Label(paths, '//')
             self.assertEqual(label.name, '')
-            self.assertEqual(label.dir, Path('/gn_root'))
+            self.assertEqual(label.dir, ROOT)
             self.assertEqual(label.out_dir,
-                             Path('/gn_root/out', toolchain, 'obj'))
+                             ROOT.joinpath('out', toolchain, 'obj'))
             self.assertEqual(label.gen_dir,
-                             Path('/gn_root/out', toolchain, 'gen'))
+                             ROOT.joinpath('out', toolchain, 'gen'))
 
     def test_absolute(self):
         for paths, toolchain in self._paths_and_toolchain_name:
             label = Label(paths, '//foo/bar:baz')
             self.assertEqual(label.name, 'baz')
-            self.assertEqual(label.dir, Path('/gn_root/foo/bar'))
+            self.assertEqual(label.dir, ROOT.joinpath('foo/bar'))
             self.assertEqual(label.out_dir,
-                             Path('/gn_root/out', toolchain, 'obj/foo/bar'))
+                             ROOT.joinpath('out', toolchain, 'obj/foo/bar'))
             self.assertEqual(label.gen_dir,
-                             Path('/gn_root/out', toolchain, 'gen/foo/bar'))
+                             ROOT.joinpath('out', toolchain, 'gen/foo/bar'))
 
     def test_absolute_implicit_target(self):
         for paths, toolchain in self._paths_and_toolchain_name:
             label = Label(paths, '//foo/bar')
             self.assertEqual(label.name, 'bar')
-            self.assertEqual(label.dir, Path('/gn_root/foo/bar'))
+            self.assertEqual(label.dir, ROOT.joinpath('foo/bar'))
             self.assertEqual(label.out_dir,
-                             Path('/gn_root/out', toolchain, 'obj/foo/bar'))
+                             ROOT.joinpath('out', toolchain, 'obj/foo/bar'))
             self.assertEqual(label.gen_dir,
-                             Path('/gn_root/out', toolchain, 'gen/foo/bar'))
+                             ROOT.joinpath('out', toolchain, 'gen/foo/bar'))
 
     def test_relative(self):
         for paths, toolchain in self._paths_and_toolchain_name:
             label = Label(paths, ':tgt')
             self.assertEqual(label.name, 'tgt')
-            self.assertEqual(label.dir, Path('/gn_root/some/cwd'))
+            self.assertEqual(label.dir, ROOT.joinpath('some/cwd'))
             self.assertEqual(label.out_dir,
-                             Path('/gn_root/out', toolchain, 'obj/some/cwd'))
+                             ROOT.joinpath('out', toolchain, 'obj/some/cwd'))
             self.assertEqual(label.gen_dir,
-                             Path('/gn_root/out', toolchain, 'gen/some/cwd'))
+                             ROOT.joinpath('out', toolchain, 'gen/some/cwd'))
 
     def test_relative_subdir(self):
         for paths, toolchain in self._paths_and_toolchain_name:
             label = Label(paths, 'tgt')
             self.assertEqual(label.name, 'tgt')
-            self.assertEqual(label.dir, Path('/gn_root/some/cwd/tgt'))
+            self.assertEqual(label.dir, ROOT.joinpath('some/cwd/tgt'))
             self.assertEqual(
                 label.out_dir,
-                Path('/gn_root/out', toolchain, 'obj/some/cwd/tgt'))
+                ROOT.joinpath('out', toolchain, 'obj/some/cwd/tgt'))
             self.assertEqual(
                 label.gen_dir,
-                Path('/gn_root/out', toolchain, 'gen/some/cwd/tgt'))
+                ROOT.joinpath('out', toolchain, 'gen/some/cwd/tgt'))
 
     def test_relative_parent_dir(self):
         for paths, toolchain in self._paths_and_toolchain_name:
             label = Label(paths, '..:tgt')
             self.assertEqual(label.name, 'tgt')
-            self.assertEqual(label.dir, Path('/gn_root/some'))
+            self.assertEqual(label.dir, ROOT.joinpath('some'))
             self.assertEqual(label.out_dir,
-                             Path('/gn_root/out', toolchain, 'obj/some'))
+                             ROOT.joinpath('out', toolchain, 'obj/some'))
             self.assertEqual(label.gen_dir,
-                             Path('/gn_root/out', toolchain, 'gen/some'))
+                             ROOT.joinpath('out', toolchain, 'gen/some'))
 
 
 class ResolvePathTest(unittest.TestCase):
@@ -124,7 +131,7 @@
 build fake_toolchain/obj/fake_module/fake_test.fake_test.cc.o: fake_toolchain_cxx ../fake_module/fake_test.cc
 build fake_toolchain/obj/fake_module/fake_test.fake_test_c.c.o: fake_toolchain_cc ../fake_module/fake_test_c.c
 
-build fake_toolchain/obj/fake_module/test/fake_test.elf: fake_tolchain_link fake_tolchain/obj/fake_module/fake_test.fake_test.cc.o fake_tolchain/obj/fake_module/fake_test.fake_test_c.c.o
+build fake_toolchain/obj/fake_module/test/fake_test.elf: fake_toolchain_link fake_toolchain/obj/fake_module/fake_test.fake_test.cc.o fake_toolchain/obj/fake_module/fake_test.fake_test_c.c.o
   ldflags = -Og -fdiagnostics-color
   libs =
   frameworks =
@@ -132,7 +139,7 @@
   output_dir = host_clang_debug/obj/fake_module/test
 '''
 
-NINJA_SOURCE_SET = '''\
+_SOURCE_SET_TEMPLATE = '''\
 defines =
 framework_dirs =
 include_dirs = -I../fake_module/public
@@ -144,7 +151,7 @@
 build fake_toolchain/obj/fake_module/fake_source_set.file_a.cc.o: fake_toolchain_cxx ../fake_module/file_a.cc
 build fake_toolchain/obj/fake_module/fake_source_set.file_b.c.o: fake_toolchain_cc ../fake_module/file_b.c
 
-build fake_toolchain/obj/fake_module/fake_source_set.stamp: fake_tolchain_link fake_tolchain/obj/fake_module/fake_source_set.file_a.cc.o fake_tolchain/obj/fake_module/fake_source_set.file_b.c.o
+build {path} fake_toolchain/obj/fake_module/fake_source_set.file_a.cc.o fake_toolchain/obj/fake_module/fake_source_set.file_b.c.o
   ldflags = -Og -fdiagnostics-color -Wno-error=deprecated
   libs =
   frameworks =
@@ -152,14 +159,23 @@
   output_dir = host_clang_debug/obj/fake_module
 '''
 
+# GN originally used empty .stamp files to mark the completion of a group of
+# dependencies. GN switched to using 'phony' Ninja targets instead, which don't
+# require creating a new file.
+_PHONY_BUILD_PATH = 'fake_toolchain/phony/fake_module/fake_source_set: phony'
+_STAMP_BUILD_PATH = 'fake_toolchain/obj/fake_module/fake_source_set.stamp:'
 
-def _create_ninja_files():
+NINJA_SOURCE_SET = _SOURCE_SET_TEMPLATE.format(path=_PHONY_BUILD_PATH)
+NINJA_SOURCE_SET_STAMP = _SOURCE_SET_TEMPLATE.format(path=_STAMP_BUILD_PATH)
+
+
+def _create_ninja_files(source_set: str) -> tuple:
     tempdir = tempfile.TemporaryDirectory(prefix='pw_build_test_')
 
     module = Path(tempdir.name, 'out', 'fake_toolchain', 'obj', 'fake_module')
     os.makedirs(module)
     module.joinpath('fake_test.ninja').write_text(NINJA_EXECUTABLE)
-    module.joinpath('fake_source_set.ninja').write_text(NINJA_SOURCE_SET)
+    module.joinpath('fake_source_set.ninja').write_text(source_set)
     module.joinpath('fake_no_objects.ninja').write_text('\n')
 
     outdir = Path(tempdir.name, 'out', 'fake_toolchain', 'obj', 'fake_module')
@@ -175,7 +191,8 @@
 class TargetTest(unittest.TestCase):
     """Tests querying GN target information."""
     def setUp(self):
-        self._tempdir, self._outdir, self._paths = _create_ninja_files()
+        self._tempdir, self._outdir, self._paths = _create_ninja_files(
+            NINJA_SOURCE_SET)
 
     def tearDown(self):
         self._tempdir.cleanup()
@@ -220,10 +237,18 @@
         self.assertIsNone(target.artifact)
 
 
+class StampTargetTest(TargetTest):
+    """Test with old-style .stamp files instead of phony Ninja targets."""
+    def setUp(self):
+        self._tempdir, self._outdir, self._paths = _create_ninja_files(
+            NINJA_SOURCE_SET_STAMP)
+
+
 class ExpandExpressionsTest(unittest.TestCase):
     """Tests expansion of expressions like <TARGET_FILE(//foo)>."""
     def setUp(self):
-        self._tempdir, self._outdir, self._paths = _create_ninja_files()
+        self._tempdir, self._outdir, self._paths = _create_ninja_files(
+            NINJA_SOURCE_SET)
 
     def tearDown(self):
         self._tempdir.cleanup()
@@ -358,5 +383,12 @@
             expand_expressions(self._paths, '<TARGET_OBJECTS(//not_real)>')
 
 
+class StampExpandExpressionsTest(TargetTest):
+    """Test with old-style .stamp files instead of phony Ninja targets."""
+    def setUp(self):
+        self._tempdir, self._outdir, self._paths = _create_ninja_files(
+            NINJA_SOURCE_SET_STAMP)
+
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/pw_build/py/setup.py b/pw_build/py/setup.py
index 784b8e0..c8fc23e 100644
--- a/pw_build/py/setup.py
+++ b/pw_build/py/setup.py
@@ -13,7 +13,7 @@
 # the License.
 """pw_build"""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='pw_build',
@@ -22,4 +22,9 @@
     author_email='pigweed-developers@googlegroups.com',
     description='Python scripts that support the GN build',
     packages=setuptools.find_packages(),
+    package_data={'pw_build': ['py.typed']},
+    zip_safe=False,
+    install_requires=[
+        'wheel',
+    ],
 )
diff --git a/pw_build/python.gni b/pw_build/python.gni
new file mode 100644
index 0000000..9ca3a43
--- /dev/null
+++ b/pw_build/python.gni
@@ -0,0 +1,308 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/input_group.gni")
+import("$dir_pw_build/python_action.gni")
+
+# Defines a Python package. GN Python packages contain several GN targets:
+#
+#   - $name - Provides the Python files in the build, but does not take any
+#         actions. All subtargets depend on this target.
+#   - $name.lint - Runs static analyis tools on the Python code. This is a group
+#     of two subtargets:
+#     - $name.lint.mypy - Runs mypy.
+#     - $name.lint.pylint - Runs pylint.
+#   - $name.tests - Runs all tests for this package.
+#   - $name.install - Installs the package in a venv.
+#   - $name.wheel - Builds a Python wheel for the package. (Not implemented.)
+#
+# TODO(pwbug/239): Implement installation and wheel building.
+#
+# Args:
+#   setup: List of setup file paths (setup.py or pyproject.toml & setup.cfg),
+#       which must all be in the same directory.
+#   sources: Python sources files in the package.
+#   tests: Test files for this Python package.
+#   python_deps: Dependencies on other pw_python_packages in the GN build.
+#   other_deps: Dependencies on GN targets that are not pw_python_packages.
+#   inputs: Other files to track, such as package_data.
+#
+template("pw_python_package") {
+  if (defined(invoker.sources)) {
+    _all_py_files = invoker.sources
+  } else {
+    _all_py_files = []
+  }
+
+  if (defined(invoker.tests)) {
+    _test_sources = invoker.tests
+  } else {
+    _test_sources = []
+  }
+
+  _all_py_files += _test_sources
+
+  assert(_all_py_files != [], "At least one source or test must be provided")
+
+  # pw_python_script uses pw_python_package, but with a limited set of features.
+  # _pw_standalone signals that this target is actually a pw_python_script.
+  _is_package = !(defined(invoker._pw_standalone) && invoker._pw_standalone)
+
+  if (_is_package) {
+    assert(defined(invoker.setup) && invoker.setup != [],
+           "pw_python_package requires 'setup' to point to a setup.py file " +
+               "or pyproject.toml and setup.cfg files")
+
+    _all_py_files += invoker.setup
+
+    # Get the directories of the setup files. All files must be in the same dir.
+    _setup_dirs = get_path_info(invoker.setup, "dir")
+    _setup_dir = _setup_dirs[0]
+
+    foreach(dir, _setup_dirs) {
+      assert(dir == _setup_dir,
+             "All files in 'setup' must be in the same directory")
+    }
+
+    # If sources are provided, make sure there is an __init__.py file.
+    if (defined(invoker.sources)) {
+      assert(filter_include(invoker.sources, [ "*\b__init__.py" ]) != [],
+             "Python packages must have at least one __init__.py file")
+    }
+  }
+
+  _python_deps = []
+  if (defined(invoker.python_deps)) {
+    foreach(dep, invoker.python_deps) {
+      # Use the fully qualified name so the subtarget can be appended as needed.
+      _python_deps += [ get_label_info(dep, "label_no_toolchain") ]
+    }
+  }
+
+  # Declare the main Python package group. This represents the Python files, but
+  # does not take any actions. GN targets can depend on the package name to run
+  # when any files in the package change.
+  pw_input_group(target_name) {
+    inputs = _all_py_files
+    if (defined(invoker.inputs)) {
+      inputs += invoker.inputs
+    }
+
+    deps = _python_deps
+
+    if (defined(invoker.other_deps)) {
+      deps += invoker.other_deps
+    }
+  }
+
+  _package_target = ":$target_name"
+
+  if (_is_package) {
+    # Install this Python package and its dependencies in the current Python
+    # environment.
+    pw_python_action("$target_name.install") {
+      module = "pip"
+      args = [
+        "install",
+        "--editable",
+        rebase_path(_setup_dir),
+      ]
+
+      stamp = true
+
+      # Parallel pip installations don't work, so serialize pip invocations.
+      pool = "$dir_pw_build:pip_pool"
+
+      deps = [ _package_target ]
+      foreach(dep, _python_deps) {
+        deps += [ "$dep.install" ]
+      }
+    }
+
+    # TODO(pwbug/239): Add support for building groups of wheels. The code below
+    #     is incomplete and untested.
+    pw_python_action("$target_name.wheel") {
+      script = "$dir_pw_build/py/pw_build/python_wheels.py"
+
+      args = [
+        "--out_dir",
+        rebase_path(target_out_dir),
+      ]
+      args += rebase_path(_all_py_files)
+
+      deps = [ _package_target ]
+      stamp = true
+    }
+  } else {
+    # If this is not a package, install or build wheels for its deps only.
+    group("$target_name.install") {
+      deps = []
+      foreach(dep, _python_deps) {
+        deps += [ "$dep.install" ]
+      }
+    }
+    group("$target_name.wheel") {
+      deps = []
+      foreach(dep, _python_deps) {
+        deps += [ "$dep.wheel" ]
+      }
+    }
+  }
+
+  # Define the static analysis targets for this package.
+  group("$target_name.lint") {
+    deps = [
+      "$_package_target.lint.mypy",
+      "$_package_target.lint.pylint",
+    ]
+  }
+
+  pw_python_action("$target_name.lint.mypy") {
+    module = "mypy"
+    args = [
+      "--pretty",
+      "--show-error-codes",
+    ]
+    if (_is_package) {
+      args += [ rebase_path(_setup_dir) ]
+    } else {
+      args += rebase_path(_all_py_files)
+    }
+
+    # Use this environment variable to force mypy to colorize output.
+    # See https://github.com/python/mypy/issues/7771
+    environment = [ "MYPY_FORCE_COLOR=1" ]
+
+    stamp = true
+
+    deps = [ _package_target ]
+    foreach(dep, _python_deps) {
+      deps += [ "$dep.lint.mypy" ]
+    }
+  }
+
+  pw_python_action_foreach("$target_name.lint.pylint") {
+    module = "pylint"
+    args = [
+      "{{source_root_relative_dir}}/{{source_file_part}}",
+      "--jobs=1",
+      "--output-format=colorized",
+    ]
+
+    if (host_os == "win") {
+      # Allow CRLF on Windows, in case Git is set to switch line endings.
+      args += [ "--disable=unexpected-line-ending-format" ]
+    }
+
+    sources = _all_py_files
+
+    stamp = "$target_gen_dir/{{source_target_relative}}.pylint.pw_pystamp"
+
+    # Run pylint from the source root so that pylint detects rcfiles (.pylintrc)
+    # in the source tree.
+    directory = rebase_path("//")
+
+    deps = [ _package_target ]
+    foreach(dep, _python_deps) {
+      deps += [ "$dep.lint.pylint" ]
+    }
+  }
+
+  # Create a target for each test file.
+  _test_targets = []
+
+  foreach(test, _test_sources) {
+    _test_name = string_replace(test, "/", "_")
+    _test_target = "$target_name.tests.$_test_name"
+
+    pw_python_action(_test_target) {
+      script = test
+      stamp = true
+
+      deps = [ _package_target ]
+      foreach(dep, _python_deps) {
+        deps += [ "$dep.tests" ]
+      }
+    }
+
+    _test_targets += [ ":$_test_target" ]
+  }
+
+  group("$target_name.tests") {
+    deps = _test_targets
+  }
+}
+
+# Declares a group of Python packages or other Python groups. pw_python_groups
+# expose the same set of subtargets as pw_python_package (e.g.
+# "$group_name.lint" and "$group_name.tests"), but these apply to all packages
+# in deps and their dependencies.
+template("pw_python_group") {
+  if (defined(invoker.python_deps)) {
+    _python_deps = invoker.python_deps
+  } else {
+    _python_deps = []
+  }
+
+  group(target_name) {
+    deps = _python_deps
+  }
+
+  _subtargets = [
+    "tests",
+    "lint",
+    "lint.mypy",
+    "lint.pylint",
+    "install",
+    "wheel",
+  ]
+
+  foreach(subtarget, _subtargets) {
+    group("$target_name.$subtarget") {
+      deps = []
+      foreach(dep, _python_deps) {
+        # Split out the toolchain to support deps with a toolchain specified.
+        _target = get_label_info(dep, "label_no_toolchain")
+        _toolchain = get_label_info(dep, "toolchain")
+        deps += [ "$_target.$subtarget($_toolchain)" ]
+      }
+    }
+  }
+}
+
+# Declares Python scripts or tests that are not part of a Python package.
+# Similar to pw_python_package, but only supports a subset of its features.
+#
+# pw_python_script accepts the same arguments as pw_python_package, except
+# `setup` cannot be provided.
+#
+# pw_python_script provides the same subtargets as pw_python_package, but
+# $target_name.install and $target_name.wheel only affect the python_deps of
+# this GN target, not the target itself.
+template("pw_python_script") {
+  _supported_variables = [
+    "sources",
+    "tests",
+    "python_deps",
+    "other_deps",
+    "inputs",
+  ]
+
+  pw_python_package(target_name) {
+    _pw_standalone = true
+    forward_variables_from(invoker, _supported_variables)
+  }
+}
diff --git a/pw_build/python_action.gni b/pw_build/python_action.gni
new file mode 100644
index 0000000..a420fce
--- /dev/null
+++ b/pw_build/python_action.gni
@@ -0,0 +1,189 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+# Defines an action that runs a Python script.
+#
+# This wraps a regular Python script GN action with an invocation of a script-
+# runner script that adds useful features. pw_python_action() uses the same
+# actions as GN's action(), with the following additions or changes:
+#
+#   module          May be used in place of the script argument to run the
+#                   provided Python module with `python -m` instead of a script.
+#                   Either script or module must be provided.
+#
+#   capture_output  If true, script output is hidden unless the script fails
+#                   with an error. Defaults to true.
+#
+#   stamp           File to touch if the script is successful. Actions that
+#                   don't create output files can use this stamp file instead of
+#                   creating their own dummy file. If true, a generic file is
+#                   used. If false or not set, no file is touched.
+#
+#   directory       The directory from which to execute the Python script. Paths
+#                   in args may need to be adjusted to be relative to this
+#                   directory.
+#
+#   environment     Environment variables to set, passed as a list of NAME=VALUE
+#                   strings.
+#
+#   args            Same as the standard action args, except special expressions
+#                   may be used to extract information not normally accessible
+#                   in GN. These include the following:
+#
+#                     <TARGET_FILE(//some/label:here)> - expands to the
+#                         output file (such as a .a or .elf) from a GN target
+#                     <TARGET_FILE_IF_EXISTS(//some/label:here)> - expands to
+#                         the output file if the target exists, or nothing
+#                     <TARGET_OBJECTS(//some/label:here)> - expands to the
+#                         object files produced by the provided GN target
+#
+template("pw_python_action") {
+  _script_args = [
+    # GN root directory relative to the build directory (in which the runner
+    # script is invoked).
+    "--gn-root",
+    rebase_path("//"),
+
+    # Current directory, used to resolve relative paths.
+    "--current-path",
+    rebase_path("."),
+
+    "--default-toolchain=$default_toolchain",
+    "--current-toolchain=$current_toolchain",
+  ]
+
+  if (defined(invoker.directory)) {
+    _script_args += [
+      "--directory",
+      rebase_path(invoker.directory),
+    ]
+  }
+
+  if (defined(invoker.environment)) {
+    foreach(variable, invoker.environment) {
+      _script_args += [ "--env=$variable" ]
+    }
+  }
+
+  if (defined(invoker.inputs)) {
+    _inputs = invoker.inputs
+  } else {
+    _inputs = []
+  }
+
+  # List the script to run as an input so that the action is re-run when it is
+  # modified.
+  if (defined(invoker.script)) {
+    _inputs += [ invoker.script ]
+  }
+
+  if (defined(invoker.outputs)) {
+    _outputs = invoker.outputs
+  } else {
+    _outputs = []
+  }
+
+  # If a stamp file is requested, add it as an output of the runner script.
+  if (defined(invoker.stamp) && invoker.stamp != false) {
+    if (invoker.stamp == true) {
+      _stamp_file = "$target_gen_dir/$target_name.pw_pystamp"
+    } else {
+      _stamp_file = invoker.stamp
+    }
+
+    _outputs += [ _stamp_file ]
+    _script_args += [
+      "--touch",
+      rebase_path(_stamp_file),
+    ]
+  }
+
+  # Capture output or not (defaults to true).
+  if (!defined(invoker.capture_output) || invoker.capture_output) {
+    _script_args += [ "--capture-output" ]
+  }
+
+  if (defined(invoker.module)) {
+    _script_args += [
+      "--module",
+      invoker.module,
+    ]
+  }
+
+  # "--" indicates the end of arguments to the runner script.
+  # Everything beyond this point is interpreted as the command and arguments
+  # of the Python script to run.
+  _script_args += [ "--" ]
+
+  if (defined(invoker.script)) {
+    _script_args += [ rebase_path(invoker.script) ]
+  }
+
+  if (defined(invoker.args)) {
+    _script_args += invoker.args
+  }
+
+  if (defined(invoker._pw_action_type)) {
+    _action_type = invoker._pw_action_type
+  } else {
+    _action_type = "action"
+  }
+
+  target(_action_type, target_name) {
+    _ignore_vars = [
+      "script",
+      "args",
+      "inputs",
+      "outputs",
+    ]
+    forward_variables_from(invoker, "*", _ignore_vars)
+
+    script = "$dir_pw_build/py/pw_build/python_runner.py"
+    args = _script_args
+    inputs = _inputs
+    outputs = _outputs
+  }
+}
+
+# Runs pw_python_action once per file over a set of sources.
+#
+# This template brings pw_python_action's features to action_foreach. Usage is
+# the same as pw_python_action, except that sources must be provided and source
+# expansion (e.g. "{{source}}") may be used in args and outputs.
+#
+# See the pw_python_action and action_foreach documentation for full details.
+template("pw_python_action_foreach") {
+  assert(defined(invoker.sources) && invoker.sources != [],
+         "pw_python_action_foreach requires a list of one or more sources")
+
+  pw_python_action(target_name) {
+    if (defined(invoker.stamp) && invoker.stamp != false) {
+      if (invoker.stamp == true) {
+        # Use source file names in the generated stamp file path so they are
+        # unique for each source.
+        stamp = "$target_gen_dir/{{source_file_part}}.pw_pystamp"
+      } else {
+        stamp = invoker.stamp
+      }
+    } else {
+      stamp = false
+    }
+
+    forward_variables_from(invoker, "*", [ "stamp" ])
+
+    _pw_action_type = "action_foreach"
+  }
+}
diff --git a/pw_build/python_script.gni b/pw_build/python_script.gni
deleted file mode 100644
index d9ec728..0000000
--- a/pw_build/python_script.gni
+++ /dev/null
@@ -1,156 +0,0 @@
-# Copyright 2019 The Pigweed Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-#     https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-# gn-format disable
-import("//build_overrides/pigweed.gni")
-
-# Defines an action to run a Python script.
-#
-# This wraps a regular Python script action with an invocation of a script-
-# runner script which resolves GN paths to filesystem paths and locates output
-# files for binary targets.
-#
-# The interface to this template is identical to that of a regular "action"
-# which runs a Python script, except for two key differences:
-#
-#   1. Regular GN actions typically require a call to rebase_path to resolve
-#      GN paths to filesystem paths. This template requires that all paths
-#      remain GN paths, but are made absolute.
-#
-#      This means that an "action" argument of the form:
-#
-#        rebase_path("my/relative/path:optional_target", root_build_dir)
-#
-#      Becomes:
-#
-#        get_path_info("my/relative/path:optional_target", "abspath")
-#
-#   2. The behavior of the runner script depends on whether a provided path is a
-#      regular build path or an output path (starting with "$root_out_dir").
-#      If an output path is provided and the path has a target, the script
-#      assumes that the target refers to a file built by Ninja and tries to
-#      locate it within the output directory.
-#
-# Additionally, this template can accept a boolean "stamp" argument. If set to
-# true, the script runner will touch a file to indicate the success of the run.
-# This is provided so that individual Python scripts are not required to define
-# an output file if they do not have one.
-#
-# Path resolution examples (assuming the build directory is //out):
-#
-#           BEFORE                     AFTER
-#
-#   //my_module              ../my_module
-#   //my_module:foo          ../my_module:foo
-#   //my_module/file.txt     ../my_module/file.txt
-#   $root_out_dir/my_module  ../out/obj/my_module
-#   $target_out_dir          ../out/obj/my_module      (in //my_module/BUILD.gn)
-#   $target_out_dir/out.json ../out/obj/my_module/out.json
-#   $target_out_dir:foo      ../out/obj/my_module/foo.elf  (toolchain-dependent)
-#   $target_out_dir:foo      ../out/obj/my_module/foo.exe  (toolchain-dependent)
-#
-# Arguments beyond normal action() target arguments:
-#
-#   capture_output (=true)  If true, script output is hidden unless the script
-#                           fails with an error. Defaults to true.
-#
-#   stamp                   File to touch if the script is successful. If set to
-#                           true, a generic file is used. If false or not set,
-#                           no file is touched.
-#
-template("pw_python_script") {
-  assert(defined(invoker.script), "pw_python_script requires a script to run")
-
-  _script_args = [
-    # GN root directory relative to the build directory (in which the runner
-    # script is invoked).
-    "--gn-root",
-    rebase_path("//"),
-
-    # Current directory, used to resolve relative paths.
-    "--current-path",
-    rebase_path("."),
-
-    "--default-toolchain=$default_toolchain",
-    "--current-toolchain=$current_toolchain",
-  ]
-
-  if (defined(invoker.inputs)) {
-    _inputs = invoker.inputs
-  } else {
-    _inputs = []
-  }
-
-  # List the script to run as an input so that the action is re-run when it is
-  # modified.
-  _inputs += [ invoker.script ]
-
-  if (defined(invoker.outputs)) {
-    _outputs = invoker.outputs
-  } else {
-    _outputs = []
-  }
-
-  # If a stamp file is requested, add it as an output of the runner script.
-  if (defined(invoker.stamp) && invoker.stamp != false) {
-    if (invoker.stamp == true) {
-      _stamp_file = "$target_gen_dir/$target_name.pw_pystamp"
-    } else {
-      _stamp_file = invoker.stamp
-    }
-
-    _outputs += [ _stamp_file ]
-    _script_args += [
-      "--touch",
-      rebase_path(_stamp_file),
-    ]
-  }
-
-  # Capture output or not.
-  # Note: capture defaults to true.
-  if (defined(invoker.capture_output)) {
-    forward_variables_from(invoker, [ "capture_output" ])
-  } else {
-    capture_output = true
-  }
-  if (capture_output) {
-    _script_args += [ "--capture-output" ]
-  }
-
-  # "--" indicates the end of arguments to the runner script.
-  # Everything beyond this point is interpreted as the command and arguments
-  # of the Python script to run.
-  _script_args += [ "--" ]
-
-  _script_args += [ rebase_path(invoker.script) ]
-
-  if (defined(invoker.args)) {
-    _script_args += invoker.args
-  }
-
-  action(target_name) {
-    _ignore_vars = [
-      "script",
-      "args",
-      "inputs",
-      "outputs",
-    ]
-    forward_variables_from(invoker, "*", _ignore_vars)
-
-    script = "$dir_pw_build/py/pw_build/python_runner.py"
-    args = _script_args
-    inputs = _inputs
-    outputs = _outputs
-  }
-}
diff --git a/pw_rpc/test_impl/BUILD.gn b/pw_build/python_wheels.gni
similarity index 60%
copy from pw_rpc/test_impl/BUILD.gn
copy to pw_build/python_wheels.gni
index 68f88c1..22210da 100644
--- a/pw_rpc/test_impl/BUILD.gn
+++ b/pw_build/python_wheels.gni
@@ -12,19 +12,23 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
-import("$dir_pw_build/target_types.gni")
-import("$dir_pw_unit_test/test.gni")
-config("config") {
-  include_dirs = [ "public_overrides" ]
-  visibility = [ ":*" ]
-}
+import("$dir_pw_build/python_action.gni")
 
-pw_source_set("test_impl") {
-  public_configs = [ ":config" ]
-  public = [ "public_overrides/pw_rpc/internal/method.h" ]
-  public_deps = [ "../:server_library_deps" ]
-  visibility = [ "..:*" ]
+# Builds a .whl from a Python package.
+template("pw_python_wheels") {
+  pw_python_action(target_name) {
+    forward_variables_from(invoker, [ "deps" ])
+
+    script = "$dir_pw_build/py/pw_build/python_wheels.py"
+
+    args = [
+      "--out_dir",
+      rebase_path("$target_out_dir/python_wheels"),
+    ]
+    args += rebase_path(invoker.inputs)
+
+    stamp = true
+  }
 }
diff --git a/pw_build/target_types.gni b/pw_build/target_types.gni
index 80ffd2a..7590f2d 100644
--- a/pw_build/target_types.gni
+++ b/pw_build/target_types.gni
@@ -12,8 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
+
 declare_args() {
   # The name of the GN target type used to build Pigweed executables.
   #
@@ -54,10 +54,11 @@
       configs += pw_build_defaults.configs
     }
     if (defined(remove_configs)) {
-      if (remove_configs[0] == "*") {
+      if (remove_configs != [] && remove_configs[0] == "*") {
         configs = []
       } else {
-        configs -= remove_configs
+        configs += remove_configs  # Add configs in case they aren't already
+        configs -= remove_configs  # present, then remove them.
       }
     }
     if (defined(invoker.configs)) {
@@ -70,9 +71,10 @@
       public_deps = []
     }
     if (defined(remove_public_deps)) {
-      if (remove_public_deps[0] == "*") {
+      if (remove_public_deps != [] && remove_public_deps[0] == "*") {
         public_deps = []
       } else {
+        public_deps += remove_public_deps
         public_deps -= remove_public_deps
       }
     }
@@ -99,10 +101,11 @@
       configs += pw_build_defaults.configs
     }
     if (defined(remove_configs)) {
-      if (remove_configs[0] == "*") {
+      if (remove_configs != [] && remove_configs[0] == "*") {
         configs = []
       } else {
-        configs -= remove_configs
+        configs += remove_configs  # Add configs in case they aren't already
+        configs -= remove_configs  # present, then remove them.
       }
     }
     if (defined(invoker.configs)) {
@@ -115,9 +118,10 @@
       public_deps = []
     }
     if (defined(remove_public_deps)) {
-      if (remove_public_deps[0] == "*") {
+      if (remove_public_deps != [] && remove_public_deps[0] == "*") {
         public_deps = []
       } else {
+        public_deps += remove_public_deps
         public_deps -= remove_public_deps
       }
     }
@@ -146,10 +150,11 @@
       configs += pw_build_defaults.configs
     }
     if (defined(remove_configs)) {
-      if (remove_configs[0] == "*") {
+      if (remove_configs != [] && remove_configs[0] == "*") {
         configs = []
       } else {
-        configs -= remove_configs
+        configs += remove_configs  # Add configs in case they aren't already
+        configs -= remove_configs  # present, then remove them.
       }
     }
     if (defined(invoker.configs)) {
@@ -162,9 +167,10 @@
       public_deps = []
     }
     if (defined(remove_public_deps)) {
-      if (remove_public_deps[0] == "*") {
+      if (remove_public_deps != [] && remove_public_deps[0] == "*") {
         public_deps = []
       } else {
+        public_deps += remove_public_deps
         public_deps -= remove_public_deps
       }
     }
@@ -186,6 +192,7 @@
 
   target(pw_build_EXECUTABLE_TARGET_TYPE, target_name) {
     import("$dir_pw_build/defaults.gni")
+
     forward_variables_from(invoker, "*", _supported_toolchain_defaults)
 
     if (!defined(configs)) {
@@ -195,10 +202,11 @@
       configs += pw_build_defaults.configs
     }
     if (defined(remove_configs)) {
-      if (remove_configs[0] == "*") {
+      if (remove_configs != [] && remove_configs[0] == "*") {
         configs = []
       } else {
-        configs -= remove_configs
+        configs += remove_configs  # Add configs in case they aren't already
+        configs -= remove_configs  # present, then remove them.
       }
     }
     if (defined(invoker.configs)) {
@@ -211,9 +219,10 @@
       public_deps = []
     }
     if (defined(remove_public_deps)) {
-      if (remove_public_deps[0] == "*") {
+      if (remove_public_deps != [] && remove_public_deps[0] == "*") {
         public_deps = []
       } else {
+        public_deps += remove_public_deps
         public_deps -= remove_public_deps
       }
     }
diff --git a/pw_build/zip.gni b/pw_build/zip.gni
new file mode 100644
index 0000000..653b0cd
--- /dev/null
+++ b/pw_build/zip.gni
@@ -0,0 +1,131 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python_action.gni")
+
+# Takes a set of input sources and zips them up to a .zip output.
+#
+# Users can either pass in specific input files or entire directories.
+# This target type also supports renaming files as well as specifing
+# desired zip destination directories for each input source.
+#
+# Args:
+#   deps: Dependencies for this target.
+#
+#   inputs: List of input files following the custom input formatting
+#      convention. See below for syntax.
+#
+#   dirs: List of directories to be completely zipped up following the same
+#     input formatting convention. See below for syntax.
+#
+#   output: Filename of artifact .zip file produced by script's execution.
+#
+# Each input follows the following convention:
+#   /source_path > /zip_destination/
+#
+# All directories are expected to be end with a '/'. Inputs must always specify
+# both a source and a destination. Destinations are expected to have a leading
+# '/' which stands for the root of the archive.
+#
+# Example:
+#   Let's say we have the following structure for a //source/ directory:
+#
+#     source/
+#     ├── file1.txt
+#     ├── file2.txt
+#     ├── file3.txt
+#     └── some_dir/
+#         ├── file4.txt
+#         └── some_other_dir/
+#             └── file5.txt
+#
+#   And we create the following build target:
+#
+#     import("$dir_pw_build/zip.gni")
+#
+#     pw_zip("target_name") {
+#       inputs = [
+#         "//source/file1.txt > /",             # Copied to the zip root dir.
+#         "//source/file2.txt > /renamed.txt",  # File renamed.
+#         "//source/file3.txt > /bar/",         # File moved to the /bar/ dir.
+#       ]
+#
+#       dirs = [
+#         "//source/some_dir/ > /bar/some_dir/",  # Whole /some_dir/ contents
+#                                                 # copied as /bar/some_dir/.
+#       ]
+#
+#       # Note on output: if the specific output directory isn't defined
+#       # (such as output = "zoo.zip") then the .zip will output to the
+#       # same directory as the BUILD.gn file that called the target.
+#       output = "//$target_out_dir/foo.zip",  # Where the foo.zip will end up
+#     }
+#
+#   This will result in a .zip file called foo.zip stored in //$target_out_dir
+#   with the following structure:
+#
+#     foo.zip
+#     ├── bar/
+#     │   ├── file3.txt
+#     │   └── some_dir/
+#     │       ├── file4.txt
+#     │       └── some_other_dir/
+#     │           └── file5.txt
+#     ├── file1.txt
+#     └── renamed.txt
+#
+template("pw_zip") {
+  _delimiter = ">"
+  pw_python_action(target_name) {
+    forward_variables_from(invoker, [ "deps" ])
+    script = "$dir_pw_build/py/pw_build/zip.py"
+
+    args = [ "--out_filename" ]
+    args += [ rebase_path(invoker.output) ]
+
+    inputs = []
+    args += [ "--input_list" ]
+    if (defined(invoker.inputs)) {
+      foreach(input, invoker.inputs) {
+        # Adding spaces around our delimiter is great for readability,
+        # but not great for the string split: remove the spacing.
+        input = string_replace(input, " $_delimiter", _delimiter)
+        input = string_replace(input, "$_delimiter ", _delimiter)
+
+        input_list = []
+        input_list = string_split(input, _delimiter)
+        input_list[0] = rebase_path(input_list[0])
+        inputs += [ input_list[0] ]
+
+        # Pass rebased and delimited path to script.
+        args += [ string_join(_delimiter, input_list) ]
+      }
+    }
+
+    if (defined(invoker.dirs)) {
+      foreach(dir, invoker.dirs) {
+        # Adding spaces around our delimiter is great for readability,
+        # but not great for the string split: remove the spacing.
+        dir = string_replace(dir, " $_delimiter", _delimiter)
+        dir = string_replace(dir, "$_delimiter ", _delimiter)
+
+        args += [ rebase_path(dir) ]
+      }
+    }
+
+    outputs = [ invoker.output ]
+  }
+}
diff --git a/pw_bytes/BUILD b/pw_bytes/BUILD
index baa3389..293ae83 100644
--- a/pw_bytes/BUILD
+++ b/pw_bytes/BUILD
@@ -28,7 +28,9 @@
         "byte_builder.cc",
     ],
     hdrs = [
+        "public/pw_bytes/array.h",
         "public/pw_bytes/byte_builder.h",
+        "public/pw_bytes/endian.h",
         "public/pw_bytes/span.h",
     ],
     includes = ["public"],
@@ -40,6 +42,15 @@
 )
 
 pw_cc_test(
+    name = "array_test",
+    srcs = ["array_test.cc"],
+    deps = [
+        ":pw_bytes",
+        "//pw_unit_test",
+    ],
+)
+
+pw_cc_test(
     name = "byte_builder_test",
     srcs = ["byte_builder_test.cc"],
     deps = [
@@ -47,3 +58,12 @@
         "//pw_unit_test",
     ],
 )
+
+pw_cc_test(
+    name = "endian_test",
+    srcs = ["endian_test.cc"],
+    deps = [
+        ":pw_bytes",
+        "//pw_unit_test",
+    ],
+)
diff --git a/pw_bytes/BUILD.gn b/pw_bytes/BUILD.gn
index ab0ca28..d088fe4 100644
--- a/pw_bytes/BUILD.gn
+++ b/pw_bytes/BUILD.gn
@@ -12,13 +12,13 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_bloat/bloat.gni")
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
   visibility = [ ":*" ]
@@ -27,7 +27,9 @@
 pw_source_set("pw_bytes") {
   public_configs = [ ":default_config" ]
   public = [
+    "public/pw_bytes/array.h",
     "public/pw_bytes/byte_builder.h",
+    "public/pw_bytes/endian.h",
     "public/pw_bytes/span.h",
   ]
   sources = [ "byte_builder.cc" ]
@@ -39,7 +41,11 @@
 }
 
 pw_test_group("tests") {
-  tests = [ ":byte_builder_test" ]
+  tests = [
+    ":array_test",
+    ":byte_builder_test",
+    ":endian_test",
+  ]
   group_deps = [
     "$dir_pw_preprocessor:tests",
     "$dir_pw_span:tests",
@@ -47,11 +53,34 @@
   ]
 }
 
+pw_test("array_test") {
+  deps = [ ":pw_bytes" ]
+  sources = [ "array_test.cc" ]
+}
+
 pw_test("byte_builder_test") {
   deps = [ ":pw_bytes" ]
   sources = [ "byte_builder_test.cc" ]
 }
 
+pw_test("endian_test") {
+  deps = [ ":pw_bytes" ]
+  sources = [ "endian_test.cc" ]
+}
+
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
+  report_deps = [ ":byte_builder_size_report" ]
+}
+
+pw_size_report("byte_builder_size_report") {
+  title = "Using pw::ByteBuilder"
+
+  binaries = [
+    {
+      target = "size_report:with_byte_builder"
+      base = "size_report:without_byte_builder"
+      label = "Using ByteBuilder vs not using it"
+    },
+  ]
 }
diff --git a/pw_bytes/CMakeLists.txt b/pw_bytes/CMakeLists.txt
index f6706f8..33f48b7 100644
--- a/pw_bytes/CMakeLists.txt
+++ b/pw_bytes/CMakeLists.txt
@@ -12,8 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_auto_add_simple_module(pw_bytes
   PUBLIC_DEPS
+    pw_polyfill.overrides
     pw_preprocessor
     pw_span
     pw_status
diff --git a/pw_bytes/array_test.cc b/pw_bytes/array_test.cc
new file mode 100644
index 0000000..dfccad3
--- /dev/null
+++ b/pw_bytes/array_test.cc
@@ -0,0 +1,70 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_bytes/array.h"
+
+#include <array>
+#include <cstddef>
+
+#include "gtest/gtest.h"
+
+namespace pw::bytes {
+namespace {
+
+template <typename T, typename U>
+constexpr bool Equal(const T& lhs, const U& rhs) {
+  if (sizeof(lhs) != sizeof(rhs) || std::size(lhs) != std::size(rhs)) {
+    return false;
+  }
+
+  for (size_t i = 0; i < std::size(lhs); ++i) {
+    if (lhs[i] != rhs[i]) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+using std::byte;
+
+constexpr std::array<byte, 5> kHello{
+    byte{'H'}, byte{'e'}, byte{'l'}, byte{'l'}, byte{'o'}};
+
+constexpr uint32_t kEllo =
+    static_cast<uint32_t>('e') << 0 | static_cast<uint32_t>('l') << 8 |
+    static_cast<uint32_t>('l') << 16 | static_cast<uint32_t>('o') << 24;
+
+static_assert(Equal(String("Hello"), kHello));
+static_assert(Equal(String(""), std::array<std::byte, 0>{}));
+static_assert(Equal(MakeArray('H', 'e', 'l', 'l', 'o'), kHello));
+static_assert(Equal(Concat('H', kEllo), kHello));
+
+constexpr std::array<byte, 3> kInit{byte{'?'}, byte{'?'}, byte{'?'}};
+static_assert(Equal(Initialized<3>('?'), kInit));
+
+constexpr std::array<byte, 3> kCounting = MakeArray(0, 1, 2);
+static_assert(Equal(Initialized<3>([](size_t i) { return i; }), kCounting));
+
+constexpr std::array<byte, 3> kCounting2 = MakeArray(256, 1, 2);
+static_assert(Equal(Initialized<3>([](size_t i) { return i; }), kCounting2));
+
+constexpr auto kArray = Array<1, 2, 3, 255>();
+static_assert(Equal(MakeArray(1, 2, 3, 255), kArray));
+
+constexpr std::array<uint8_t, 4> kUintArray = Array<uint8_t, 1, 2, 3, 255>();
+static_assert(Equal(MakeArray<uint8_t>(1, 2, 3, 255), kUintArray));
+
+}  // namespace
+}  // namespace pw::bytes
diff --git a/pw_bytes/byte_builder.cc b/pw_bytes/byte_builder.cc
index bcfc9de..ba01c0f 100644
--- a/pw_bytes/byte_builder.cc
+++ b/pw_bytes/byte_builder.cc
@@ -34,21 +34,21 @@
   }
 
   if (bytes_to_append > max_size() - size()) {
-    status_ = Status::RESOURCE_EXHAUSTED;
+    status_ = Status::ResourceExhausted();
     return 0;
   }
 
   size_ += bytes_to_append;
-  status_ = Status::OK;
+  status_ = Status::Ok();
   return bytes_to_append;
 }
 
 void ByteBuilder::resize(size_t new_size) {
   if (new_size <= size_) {
     size_ = new_size;
-    status_ = Status::OK;
+    status_ = Status::Ok();
   } else {
-    status_ = Status::OUT_OF_RANGE;
+    status_ = Status::OutOfRange();
   }
 }
 
diff --git a/pw_bytes/byte_builder_test.cc b/pw_bytes/byte_builder_test.cc
index 1fa6a32..d1d1e7b 100644
--- a/pw_bytes/byte_builder_test.cc
+++ b/pw_bytes/byte_builder_test.cc
@@ -99,7 +99,7 @@
   constexpr auto kBytesTestLiteral = MakeBytes(0x04, 0x05, 0x06, 0x07);
 
   EXPECT_FALSE(bb.append(kBytesTestLiteral.data(), 4).ok());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, bb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), bb.status());
   EXPECT_EQ(0u, bb.size());
 }
 
@@ -129,7 +129,7 @@
 TEST(ByteBuilder, Append_Bytes_Exhausted) {
   ByteBuffer<8> bb;
 
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, bb.append(9, byte{0x04}).status());
+  EXPECT_EQ(Status::ResourceExhausted(), bb.append(9, byte{0x04}).status());
   EXPECT_EQ(0u, bb.size());
 }
 
@@ -156,7 +156,7 @@
   ByteBuilder bb(buffer);
 
   bb.resize(1);
-  EXPECT_EQ(Status::OUT_OF_RANGE, bb.append(9, byte{0x04}).status());
+  EXPECT_EQ(Status::OutOfRange(), bb.append(9, byte{0x04}).status());
 }
 
 TEST(ByteBuilder, Resize_Smaller) {
@@ -192,12 +192,12 @@
   EXPECT_EQ(3u, bb.size());
   bb.resize(5);
   EXPECT_EQ(3u, bb.size());
-  EXPECT_EQ(bb.status(), Status::OUT_OF_RANGE);
+  EXPECT_EQ(bb.status(), Status::OutOfRange());
 }
 
 TEST(ByteBuilder, Status_StartsOk) {
   ByteBuffer<16> bb;
-  EXPECT_EQ(Status::OK, bb.status());
+  EXPECT_EQ(Status::Ok(), bb.status());
 }
 
 TEST(ByteBuilder, Status_StatusUpdate) {
@@ -205,13 +205,13 @@
   ByteBuffer<2> bb;
 
   EXPECT_FALSE(bb.append(buffer).ok());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, bb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), bb.status());
 
   bb.resize(4);
-  EXPECT_EQ(Status::OUT_OF_RANGE, bb.status());
+  EXPECT_EQ(Status::OutOfRange(), bb.status());
 
   EXPECT_FALSE(bb.append(buffer.data(), 0).ok());
-  EXPECT_EQ(Status::OUT_OF_RANGE, bb.status());
+  EXPECT_EQ(Status::OutOfRange(), bb.status());
 }
 
 TEST(ByteBuilder, Status_ClearStatus_SetsStatusToOk) {
@@ -219,16 +219,16 @@
   ByteBuffer<2> bb;
 
   EXPECT_FALSE(bb.append(buffer).ok());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, bb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), bb.status());
 
   bb.clear_status();
-  EXPECT_EQ(Status::OK, bb.status());
+  EXPECT_EQ(Status::Ok(), bb.status());
 }
 
 TEST(ByteBuilder, PushBack) {
   ByteBuffer<12> bb;
   bb.push_back(byte{0x01});
-  EXPECT_EQ(Status::OK, bb.status());
+  EXPECT_EQ(Status::Ok(), bb.status());
   EXPECT_EQ(1u, bb.size());
   EXPECT_EQ(byte{0x01}, bb.data()[0]);
 }
@@ -236,7 +236,7 @@
 TEST(ByteBuilder, PushBack_Full) {
   ByteBuffer<1> bb;
   bb.push_back(byte{0x01});
-  EXPECT_EQ(Status::OK, bb.status());
+  EXPECT_EQ(Status::Ok(), bb.status());
   EXPECT_EQ(1u, bb.size());
 }
 
@@ -245,7 +245,7 @@
   bb.push_back(byte{0x01});
   bb.push_back(byte{0x01});
 
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, bb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), bb.status());
   EXPECT_EQ(1u, bb.size());
 }
 
@@ -256,7 +256,7 @@
   bb.append(buffer.data(), 3);
 
   bb.pop_back();
-  EXPECT_EQ(Status::OK, bb.status());
+  EXPECT_EQ(Status::Ok(), bb.status());
   EXPECT_EQ(2u, bb.size());
   EXPECT_EQ(byte{0x01}, bb.data()[0]);
   EXPECT_EQ(byte{0x02}, bb.data()[1]);
@@ -270,7 +270,7 @@
   bb.pop_back();
   bb.pop_back();
   bb.pop_back();
-  EXPECT_EQ(Status::OK, bb.status());
+  EXPECT_EQ(Status::Ok(), bb.status());
   EXPECT_EQ(0u, bb.size());
   EXPECT_TRUE(bb.empty());
 }
@@ -306,13 +306,13 @@
   two.push_back(byte{0x01});
   two.push_back(byte{0x01});
   two.push_back(byte{0x01});
-  ASSERT_EQ(Status::RESOURCE_EXHAUSTED, two.status());
+  ASSERT_EQ(Status::ResourceExhausted(), two.status());
 
   one = two;
   EXPECT_EQ(byte{0x01}, two.data()[7]);
   EXPECT_EQ(byte{0x01}, two.data()[8]);
   EXPECT_EQ(byte{0x01}, two.data()[9]);
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, one.status());
+  EXPECT_EQ(Status::ResourceExhausted(), one.status());
 }
 
 TEST(ByteBuffer, CopyConstructFromSameSize) {
@@ -338,7 +338,7 @@
 
   EXPECT_EQ(byte{0x01}, two.data()[0]);
   EXPECT_EQ(byte{0x02}, two.data()[1]);
-  EXPECT_EQ(Status::OK, two.status());
+  EXPECT_EQ(Status::Ok(), two.status());
 }
 
 TEST(ByteBuilder, ResizeError_NoDataAddedAfter) {
@@ -350,11 +350,11 @@
   EXPECT_EQ(3u, bb.size());
   bb.resize(5);
   EXPECT_EQ(3u, bb.size());
-  EXPECT_EQ(bb.status(), Status::OUT_OF_RANGE);
+  EXPECT_EQ(bb.status(), Status::OutOfRange());
 
   bb.PutInt8(0xFE);
   EXPECT_EQ(3u, bb.size());
-  EXPECT_EQ(bb.status(), Status::OUT_OF_RANGE);
+  EXPECT_EQ(bb.status(), Status::OutOfRange());
 }
 
 TEST(ByteBuilder, AddingNoBytesToZeroSizedByteBuffer) {
@@ -372,7 +372,7 @@
 
   EXPECT_EQ(byte{0xFE}, bb.data()[0]);
   EXPECT_EQ(byte{0x02}, bb.data()[1]);
-  EXPECT_EQ(Status::OK, bb.status());
+  EXPECT_EQ(Status::Ok(), bb.status());
 }
 
 TEST(ByteBuffer, Putting8ByteInts_Exhausted) {
@@ -383,7 +383,7 @@
 
   EXPECT_EQ(byte{0xFE}, bb.data()[0]);
   EXPECT_EQ(byte{0x02}, bb.data()[1]);
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, bb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), bb.status());
 }
 
 TEST(ByteBuffer, Putting16ByteInts_Full_kLittleEndian) {
@@ -396,22 +396,22 @@
   EXPECT_EQ(byte{0x08}, bb.data()[2]);
   EXPECT_EQ(byte{0x00}, bb.data()[3]);
 
-  EXPECT_EQ(Status::OK, bb.status());
+  EXPECT_EQ(Status::Ok(), bb.status());
 }
 
 TEST(ByteBuffer, Putting16ByteInts_Exhausted_kBigEndian) {
   ByteBuffer<5> bb;
-  bb.PutInt16(0xFFF7, ByteOrder::kBigEndian);
-  bb.PutUint16(0x0008, ByteOrder::kBigEndian);
+  bb.PutInt16(0xFFF7, std::endian::big);
+  bb.PutUint16(0x0008, std::endian::big);
 
   EXPECT_EQ(byte{0xFF}, bb.data()[0]);
   EXPECT_EQ(byte{0xF7}, bb.data()[1]);
   EXPECT_EQ(byte{0x00}, bb.data()[2]);
   EXPECT_EQ(byte{0x08}, bb.data()[3]);
 
-  bb.PutInt16(0xFAFA, ByteOrder::kBigEndian);
+  bb.PutInt16(0xFAFA, std::endian::big);
   EXPECT_EQ(4u, bb.size());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, bb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), bb.status());
 }
 
 TEST(ByteBuffer, Putting32ByteInts_Full_kLittleEndian) {
@@ -428,13 +428,13 @@
   EXPECT_EQ(byte{0x00}, bb.data()[6]);
   EXPECT_EQ(byte{0x00}, bb.data()[7]);
 
-  EXPECT_EQ(Status::OK, bb.status());
+  EXPECT_EQ(Status::Ok(), bb.status());
 }
 
 TEST(ByteBuffer, Putting32ByteInts_Exhausted_kBigEndian) {
   ByteBuffer<10> bb;
-  bb.PutInt32(0xF92927B2, ByteOrder::kBigEndian);
-  bb.PutUint32(0x0C90739E, ByteOrder::kBigEndian);
+  bb.PutInt32(0xF92927B2, std::endian::big);
+  bb.PutUint32(0x0C90739E, std::endian::big);
 
   EXPECT_EQ(byte{0xF9}, bb.data()[0]);
   EXPECT_EQ(byte{0x29}, bb.data()[1]);
@@ -445,9 +445,9 @@
   EXPECT_EQ(byte{0x73}, bb.data()[6]);
   EXPECT_EQ(byte{0x9E}, bb.data()[7]);
 
-  bb.PutInt32(-114743374, ByteOrder::kBigEndian);
+  bb.PutInt32(-114743374, std::endian::big);
   EXPECT_EQ(8u, bb.size());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, bb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), bb.status());
 }
 
 TEST(ByteBuffer, Putting64ByteInts_Full_kLittleEndian) {
@@ -472,13 +472,13 @@
   EXPECT_EQ(byte{0xFF}, bb.data()[14]);
   EXPECT_EQ(byte{0xFF}, bb.data()[15]);
 
-  EXPECT_EQ(Status::OK, bb.status());
+  EXPECT_EQ(Status::Ok(), bb.status());
 }
 
 TEST(ByteBuffer, Putting64ByteInts_Exhausted_kBigEndian) {
   ByteBuffer<20> bb;
-  bb.PutUint64(0x000001E8A7A0D569, ByteOrder::kBigEndian);
-  bb.PutInt64(0xFFFFFE17585F2A97, ByteOrder::kBigEndian);
+  bb.PutUint64(0x000001E8A7A0D569, std::endian::big);
+  bb.PutInt64(0xFFFFFE17585F2A97, std::endian::big);
 
   EXPECT_EQ(byte{0x00}, bb.data()[0]);
   EXPECT_EQ(byte{0x00}, bb.data()[1]);
@@ -497,17 +497,17 @@
   EXPECT_EQ(byte{0x2A}, bb.data()[14]);
   EXPECT_EQ(byte{0x97}, bb.data()[15]);
 
-  bb.PutInt64(-6099875637501324530, ByteOrder::kBigEndian);
+  bb.PutInt64(-6099875637501324530, std::endian::big);
   EXPECT_EQ(16u, bb.size());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, bb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), bb.status());
 }
 
 TEST(ByteBuffer, PuttingInts_MixedTypes_MixedEndian) {
   ByteBuffer<16> bb;
   bb.PutUint8(0x03);
-  bb.PutInt16(0xFD6D, ByteOrder::kBigEndian);
+  bb.PutInt16(0xFD6D, std::endian::big);
   bb.PutUint32(0x482B3D9E);
-  bb.PutInt64(0x9A1C3641843DF317, ByteOrder::kBigEndian);
+  bb.PutInt64(0x9A1C3641843DF317, std::endian::big);
   bb.PutInt8(0xFB);
 
   EXPECT_EQ(byte{0x03}, bb.data()[0]);
@@ -527,7 +527,7 @@
   EXPECT_EQ(byte{0x17}, bb.data()[14]);
   EXPECT_EQ(byte{0xFB}, bb.data()[15]);
 
-  EXPECT_EQ(Status::OK, bb.status());
+  EXPECT_EQ(Status::Ok(), bb.status());
 }
 
 TEST(ByteBuffer, Iterator) {
@@ -791,34 +791,34 @@
 TEST(ByteBuffer, Iterator_PeekValues_2Bytes) {
   ByteBuffer<4> bb;
   bb.PutInt16(0xA7F1);
-  bb.PutUint16(0xF929, ByteOrder::kBigEndian);
+  bb.PutUint16(0xF929, std::endian::big);
 
   auto it = bb.begin();
   EXPECT_EQ(it.PeekInt16(), int16_t(0xA7F1));
   it = it + 2;
-  EXPECT_EQ(it.PeekUint16(ByteOrder::kBigEndian), uint16_t(0xF929));
+  EXPECT_EQ(it.PeekUint16(std::endian::big), uint16_t(0xF929));
 }
 
 TEST(ByteBuffer, Iterator_PeekValues_4Bytes) {
   ByteBuffer<8> bb;
   bb.PutInt32(0xFFFFFFF1);
-  bb.PutUint32(0xF92927B2, ByteOrder::kBigEndian);
+  bb.PutUint32(0xF92927B2, std::endian::big);
 
   auto it = bb.begin();
   EXPECT_EQ(it.PeekInt32(), int32_t(0xFFFFFFF1));
   it = it + 4;
-  EXPECT_EQ(it.PeekUint32(ByteOrder::kBigEndian), uint32_t(0xF92927B2));
+  EXPECT_EQ(it.PeekUint32(std::endian::big), uint32_t(0xF92927B2));
 }
 
 TEST(ByteBuffer, Iterator_PeekValues_8Bytes) {
   ByteBuffer<16> bb;
   bb.PutUint64(0x000001E8A7A0D569);
-  bb.PutInt64(0xFFFFFE17585F2A97, ByteOrder::kBigEndian);
+  bb.PutInt64(0xFFFFFE17585F2A97, std::endian::big);
 
   auto it = bb.begin();
   EXPECT_EQ(it.PeekUint64(), uint64_t(0x000001E8A7A0D569));
   it = it + 8;
-  EXPECT_EQ(it.PeekInt64(ByteOrder::kBigEndian), int64_t(0xFFFFFE17585F2A97));
+  EXPECT_EQ(it.PeekInt64(std::endian::big), int64_t(0xFFFFFE17585F2A97));
 }
 
 TEST(ByteBuffer, Iterator_ReadValues_1Byte) {
@@ -836,31 +836,31 @@
 TEST(ByteBuffer, Iterator_ReadValues_2Bytes) {
   ByteBuffer<4> bb;
   bb.PutInt16(0xA7F1);
-  bb.PutUint16(0xF929, ByteOrder::kBigEndian);
+  bb.PutUint16(0xF929, std::endian::big);
 
   auto it = bb.begin();
   EXPECT_EQ(it.ReadInt16(), int16_t(0xA7F1));
-  EXPECT_EQ(it.ReadUint16(ByteOrder::kBigEndian), uint16_t(0xF929));
+  EXPECT_EQ(it.ReadUint16(std::endian::big), uint16_t(0xF929));
 }
 
 TEST(ByteBuffer, Iterator_ReadValues_4Bytes) {
   ByteBuffer<8> bb;
   bb.PutInt32(0xFFFFFFF1);
-  bb.PutUint32(0xF92927B2, ByteOrder::kBigEndian);
+  bb.PutUint32(0xF92927B2, std::endian::big);
 
   auto it = bb.begin();
   EXPECT_EQ(it.ReadInt32(), int32_t(0xFFFFFFF1));
-  EXPECT_EQ(it.ReadUint32(ByteOrder::kBigEndian), uint32_t(0xF92927B2));
+  EXPECT_EQ(it.ReadUint32(std::endian::big), uint32_t(0xF92927B2));
 }
 
 TEST(ByteBuffer, Iterator_ReadValues_8Bytes) {
   ByteBuffer<16> bb;
   bb.PutUint64(0x000001E8A7A0D569);
-  bb.PutInt64(0xFFFFFE17585F2A97, ByteOrder::kBigEndian);
+  bb.PutInt64(0xFFFFFE17585F2A97, std::endian::big);
 
   auto it = bb.begin();
   EXPECT_EQ(it.ReadUint64(), uint64_t(0x000001E8A7A0D569));
-  EXPECT_EQ(it.ReadInt64(ByteOrder::kBigEndian), int64_t(0xFFFFFE17585F2A97));
+  EXPECT_EQ(it.ReadInt64(std::endian::big), int64_t(0xFFFFFE17585F2A97));
 }
 }  // namespace
 }  // namespace pw
diff --git a/pw_bytes/docs.rst b/pw_bytes/docs.rst
index d67a8df..01b5e0f 100644
--- a/pw_bytes/docs.rst
+++ b/pw_bytes/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-bytes:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_bytes:
 
 ---------
 pw_bytes
@@ -22,18 +18,27 @@
 Features
 ========
 
-pw::ByteBuilder
+pw_bytes/array.h
+----------------
+Functions for working with byte arrays, primarily for building fixed-size byte
+arrays at compile time.
+
+pw_bytes/byte_builder.h
+-----------------------
+.. cpp:class:: ByteBuilder
+
+  ``ByteBuilder`` is a class that facilitates building or reading arrays of
+  bytes in a fixed-size buffer. ByteBuilder handles reading and writing integers
+  with varying endianness.
+
+.. cpp:class:: template <size_t max_size> ByteBuffer
+
+  ``ByteBuilder`` with an internally allocated buffer.
+
+Size report: using ByteBuffer
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+.. include:: byte_builder_size_report
+
+pw_bytes/endian.h
 -----------------
-ByteBuilder is a utility class which facilitates the creation and
-building of formatted bytes in a fixed-size buffer.
-
-Utilities for building byte arrays at run time
-------------------------------------------------
--``PutInt8``, ``PutUInt8``: Inserts 8-bit integers.
--``PutInt16``, ``PutInt16``: Inserts 16-bit integers in little/big endian.
--``PutInt32``, ``PutUInt32``: Inserts 32-bit integers in little/big endian.
--``PutInt64``, ``PutInt64``: Inserts 64-bit integers in little/big endian.
-
-Future work
-^^^^^^^^^^^
-- Utilities for building byte arrays at compile time.
+Functions for converting the endianness of integral values.
diff --git a/pw_bytes/endian_test.cc b/pw_bytes/endian_test.cc
new file mode 100644
index 0000000..08ae244
--- /dev/null
+++ b/pw_bytes/endian_test.cc
@@ -0,0 +1,280 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_bytes/endian.h"
+
+#include <array>
+#include <cstddef>
+
+#include "gtest/gtest.h"
+
+namespace pw::bytes {
+namespace {
+
+constexpr std::endian kNonNative = (std::endian::native == std::endian::little)
+                                       ? std::endian::big
+                                       : std::endian::little;
+
+// ConvertOrderTo/From
+//
+// ConvertOrderTo and ConvertOrderFrom are implemented identically, but are
+// provided as separate functions to improve readability where they are used.
+//
+// clang-format off
+
+// Native endianess conversions (should do nothing)
+
+// Convert unsigned to native endianness
+static_assert(ConvertOrderTo(std::endian::native, uint8_t{0x12}) == uint8_t{0x12});
+static_assert(ConvertOrderTo(std::endian::native, uint16_t{0x0011}) == uint16_t{0x0011});
+static_assert(ConvertOrderTo(std::endian::native, uint32_t{0x33221100}) == uint32_t{0x33221100});
+static_assert(ConvertOrderTo(std::endian::native, uint64_t{0x0011223344556677}) == uint64_t{0x0011223344556677});
+
+// Convert signed to native endianness
+static_assert(ConvertOrderTo(std::endian::native, int8_t{0x12}) == int8_t{0x12});
+static_assert(ConvertOrderTo(std::endian::native, int16_t{0x0011}) == int16_t{0x0011});
+static_assert(ConvertOrderTo(std::endian::native, int32_t{0x33221100}) == int32_t{0x33221100});
+static_assert(ConvertOrderTo(std::endian::native, int64_t{0x0011223344556677}) == int64_t{0x0011223344556677});
+
+// Convert unsigned from native endianness
+static_assert(ConvertOrderFrom(std::endian::native, uint8_t{0x12}) == uint8_t{0x12});
+static_assert(ConvertOrderFrom(std::endian::native, uint16_t{0x0011}) == uint16_t{0x0011});
+static_assert(ConvertOrderFrom(std::endian::native, uint32_t{0x33221100}) == uint32_t{0x33221100});
+static_assert(ConvertOrderFrom(std::endian::native, uint64_t{0x0011223344556677}) == uint64_t{0x0011223344556677});
+
+// Convert signed from native endianness
+static_assert(ConvertOrderFrom(std::endian::native, int8_t{0x12}) == int8_t{0x12});
+static_assert(ConvertOrderFrom(std::endian::native, int16_t{0x0011}) == int16_t{0x0011});
+static_assert(ConvertOrderFrom(std::endian::native, int32_t{0x33221100}) == int32_t{0x33221100});
+static_assert(ConvertOrderFrom(std::endian::native, int64_t{0x0011223344556677}) == int64_t{0x0011223344556677});
+
+// Non-native endianess conversions (should reverse byte order)
+
+// Convert unsigned to non-native endianness
+static_assert(ConvertOrderTo(kNonNative, uint8_t{0x12}) == uint8_t{0x12});
+static_assert(ConvertOrderTo(kNonNative, uint16_t{0x0011}) == uint16_t{0x1100});
+static_assert(ConvertOrderTo(kNonNative, uint32_t{0x33221100}) == uint32_t{0x00112233});
+static_assert(ConvertOrderTo(kNonNative, uint64_t{0x0011223344556677}) == uint64_t{0x7766554433221100});
+
+// Convert signed to non-native endianness
+static_assert(ConvertOrderTo(kNonNative, int8_t{0x12}) == int8_t{0x12});
+static_assert(ConvertOrderTo(kNonNative, int16_t{0x0011}) == int16_t{0x1100});
+static_assert(ConvertOrderTo(kNonNative, int32_t{0x33221100}) == int32_t{0x00112233});
+static_assert(ConvertOrderTo(kNonNative, int64_t{0x0011223344556677}) == int64_t{0x7766554433221100});
+
+// Convert unsigned from non-native endianness
+static_assert(ConvertOrderFrom(kNonNative, uint8_t{0x12}) == uint8_t{0x12});
+static_assert(ConvertOrderFrom(kNonNative, uint16_t{0x0011}) == uint16_t{0x1100});
+static_assert(ConvertOrderFrom(kNonNative, uint32_t{0x33221100}) == uint32_t{0x00112233});
+static_assert(ConvertOrderFrom(kNonNative, uint64_t{0x0011223344556677}) == uint64_t{0x7766554433221100});
+
+// Convert signed from non-native endianness
+static_assert(ConvertOrderFrom(kNonNative, int8_t{0x12}) == int8_t{0x12});
+static_assert(ConvertOrderFrom(kNonNative, int16_t{0x0011}) == int16_t{0x1100});
+static_assert(ConvertOrderFrom(kNonNative, int32_t{0x33221100}) == int32_t{0x00112233});
+static_assert(ConvertOrderFrom(kNonNative, int64_t{0x0011223344556677}) == int64_t{0x7766554433221100});
+
+// clang-format on
+
+template <typename T, typename U>
+constexpr bool Equal(const T& lhs, const U& rhs) {
+  if (sizeof(lhs) != sizeof(rhs) || std::size(lhs) != std::size(rhs)) {
+    return false;
+  }
+
+  for (size_t i = 0; i < std::size(lhs); ++i) {
+    if (lhs[i] != rhs[i]) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+// CopyInOrder copies a value to a std::array with the specified endianness.
+//
+// clang-format off
+
+// 8-bit little
+static_assert(Equal(CopyInOrder(std::endian::little, '?'),
+                    Array<'?'>()));
+static_assert(Equal(CopyInOrder(std::endian::little, uint8_t{0x10}),
+                    Array<0x10>()));
+static_assert(Equal(CopyInOrder(std::endian::little, static_cast<int8_t>(0x10)),
+                    Array<0x10>()));
+
+// 8-bit big
+static_assert(Equal(CopyInOrder(std::endian::big, '?'),
+                    Array<'?'>()));
+static_assert(Equal(CopyInOrder(std::endian::big, static_cast<uint8_t>(0x10)),
+                    Array<0x10>()));
+static_assert(Equal(CopyInOrder(std::endian::big, static_cast<int8_t>(0x10)),
+                    Array<0x10>()));
+
+// 16-bit little
+static_assert(Equal(CopyInOrder(std::endian::little, uint16_t{0xAB12}),
+                    Array<0x12, 0xAB>()));
+static_assert(Equal(CopyInOrder(std::endian::little, static_cast<int16_t>(0xAB12)),
+                    Array<0x12, 0xAB>()));
+
+// 16-bit big
+static_assert(Equal(CopyInOrder(std::endian::big, uint16_t{0xAB12}),
+                    Array<0xAB, 0x12>()));
+static_assert(Equal(CopyInOrder(std::endian::big, static_cast<int16_t>(0xAB12)),
+                    Array<0xAB, 0x12>()));
+
+// 32-bit little
+static_assert(Equal(CopyInOrder(std::endian::little, uint32_t{0xAABBCCDD}),
+                    Array<0xDD, 0xCC, 0xBB, 0xAA>()));
+static_assert(Equal(CopyInOrder(std::endian::little, static_cast<int32_t>(0xAABBCCDD)),
+                    Array<0xDD, 0xCC, 0xBB, 0xAA>()));
+
+// 32-bit big
+static_assert(Equal(CopyInOrder(std::endian::big, uint32_t{0xAABBCCDD}),
+                    Array<0xAA, 0xBB, 0xCC, 0xDD>()));
+static_assert(Equal(CopyInOrder(std::endian::big, static_cast<int32_t>(0xAABBCCDD)),
+                    Array<0xAA, 0xBB, 0xCC, 0xDD>()));
+
+// 64-bit little
+static_assert(Equal(CopyInOrder(std::endian::little, uint64_t{0xAABBCCDD11223344}),
+                    Array<0x44, 0x33, 0x22, 0x11, 0xDD, 0xCC, 0xBB, 0xAA>()));
+static_assert(Equal(CopyInOrder(std::endian::little, static_cast<int64_t>(0xAABBCCDD11223344ull)),
+                    Array<0x44, 0x33, 0x22, 0x11, 0xDD, 0xCC, 0xBB, 0xAA>()));
+
+// 64-bit big
+static_assert(Equal(CopyInOrder(std::endian::big, uint64_t{0xAABBCCDD11223344}),
+                    Array<0xAA, 0xBB, 0xCC, 0xDD, 0x11, 0x22, 0x33, 0x44>()));
+static_assert(Equal(CopyInOrder(std::endian::big, static_cast<int64_t>(0xAABBCCDD11223344ull)),
+                    Array<0xAA, 0xBB, 0xCC, 0xDD, 0x11, 0x22, 0x33, 0x44>()));
+
+// clang-format on
+
+constexpr const char* kNumber = "\x11\x22\x33\x44\xaa\xbb\xcc\xdd";
+
+TEST(ReadInOrder, 8Bit_Big) {
+  EXPECT_EQ(ReadInOrder<uint8_t>(std::endian::big, "\0"), 0u);
+  EXPECT_EQ(ReadInOrder<uint8_t>(std::endian::big, "\x80"), 0x80u);
+  EXPECT_EQ(ReadInOrder<uint8_t>(std::endian::big, kNumber), 0x11u);
+
+  EXPECT_EQ(ReadInOrder<int8_t>(std::endian::big, "\0"), 0);
+  EXPECT_EQ(ReadInOrder<int8_t>(std::endian::big, "\x80"), -128);
+  EXPECT_EQ(ReadInOrder<int8_t>(std::endian::big, kNumber), 0x11);
+}
+
+TEST(ReadInOrder, 8Bit_Little) {
+  EXPECT_EQ(ReadInOrder<uint8_t>(std::endian::little, "\0"), 0u);
+  EXPECT_EQ(ReadInOrder<uint8_t>(std::endian::little, "\x80"), 0x80u);
+  EXPECT_EQ(ReadInOrder<uint8_t>(std::endian::little, kNumber), 0x11u);
+
+  EXPECT_EQ(ReadInOrder<int8_t>(std::endian::little, "\0"), 0);
+  EXPECT_EQ(ReadInOrder<int8_t>(std::endian::little, "\x80"), -128);
+  EXPECT_EQ(ReadInOrder<int8_t>(std::endian::little, kNumber), 0x11);
+}
+
+TEST(ReadInOrder, 16Bit_Big) {
+  EXPECT_EQ(ReadInOrder<uint16_t>(std::endian::big, "\0\0"), 0u);
+  EXPECT_EQ(ReadInOrder<uint16_t>(std::endian::big, "\x80\0"), 0x8000u);
+  EXPECT_EQ(ReadInOrder<uint16_t>(std::endian::big, kNumber), 0x1122u);
+
+  EXPECT_EQ(ReadInOrder<int16_t>(std::endian::big, "\0\0"), 0);
+  EXPECT_EQ(ReadInOrder<int16_t>(std::endian::big, "\x80\0"), -32768);
+  EXPECT_EQ(ReadInOrder<int16_t>(std::endian::big, kNumber), 0x1122);
+}
+
+TEST(ReadInOrder, 16Bit_Little) {
+  EXPECT_EQ(ReadInOrder<uint16_t>(std::endian::little, "\0\0"), 0u);
+  EXPECT_EQ(ReadInOrder<uint16_t>(std::endian::little, "\x80\0"), 0x80u);
+  EXPECT_EQ(ReadInOrder<uint16_t>(std::endian::little, kNumber), 0x2211u);
+
+  EXPECT_EQ(ReadInOrder<int16_t>(std::endian::little, "\0\0"), 0);
+  EXPECT_EQ(ReadInOrder<int16_t>(std::endian::little, "\x80\0"), 0x80);
+  EXPECT_EQ(ReadInOrder<int16_t>(std::endian::little, kNumber), 0x2211);
+}
+
+TEST(ReadInOrder, 32Bit_Big) {
+  EXPECT_EQ(ReadInOrder<uint32_t>(std::endian::big, "\0\0\0\0"), 0u);
+  EXPECT_EQ(ReadInOrder<uint32_t>(std::endian::big, "\x80\0\0\0"), 0x80000000u);
+  EXPECT_EQ(ReadInOrder<uint32_t>(std::endian::big, kNumber), 0x11223344u);
+
+  EXPECT_EQ(ReadInOrder<int32_t>(std::endian::big, "\0\0\0\0"), 0);
+  EXPECT_EQ(ReadInOrder<int32_t>(std::endian::big, "\x80\0\0\0"), -2147483648);
+  EXPECT_EQ(ReadInOrder<int32_t>(std::endian::big, kNumber), 0x11223344);
+}
+
+TEST(ReadInOrder, 32Bit_Little) {
+  EXPECT_EQ(ReadInOrder<uint32_t>(std::endian::little, "\0\0\0\0"), 0u);
+  EXPECT_EQ(ReadInOrder<uint32_t>(std::endian::little, "\x80\0\0\0"), 0x80u);
+  EXPECT_EQ(ReadInOrder<uint32_t>(std::endian::little, kNumber), 0x44332211u);
+
+  EXPECT_EQ(ReadInOrder<int32_t>(std::endian::little, "\0\0\0\0"), 0);
+  EXPECT_EQ(ReadInOrder<int32_t>(std::endian::little, "\x80\0\0\0"), 0x80);
+  EXPECT_EQ(ReadInOrder<int32_t>(std::endian::little, kNumber), 0x44332211);
+}
+
+TEST(ReadInOrder, 64Bit_Big) {
+  EXPECT_EQ(ReadInOrder<uint64_t>(std::endian::big, "\0\0\0\0\0\0\0\0"), 0u);
+  EXPECT_EQ(ReadInOrder<uint64_t>(std::endian::big, "\x80\0\0\0\0\0\0\0"),
+            0x80000000'00000000llu);
+  EXPECT_EQ(ReadInOrder<uint64_t>(std::endian::big, kNumber),
+            0x11223344AABBCCDDu);
+
+  EXPECT_EQ(ReadInOrder<int64_t>(std::endian::big, "\0\0\0\0\0\0\0\0"), 0);
+  EXPECT_EQ(ReadInOrder<int64_t>(std::endian::big, "\x80\0\0\0\0\0\0\0"),
+            static_cast<int64_t>(1llu << 63));
+  EXPECT_EQ(ReadInOrder<int64_t>(std::endian::big, kNumber),
+            0x11223344AABBCCDD);
+}
+
+TEST(ReadInOrder, 64Bit_Little) {
+  EXPECT_EQ(ReadInOrder<uint64_t>(std::endian::little, "\0\0\0\0\0\0\0\0"), 0u);
+  EXPECT_EQ(ReadInOrder<uint64_t>(std::endian::little, "\x80\0\0\0\0\0\0\0"),
+            0x80u);
+  EXPECT_EQ(ReadInOrder<uint64_t>(std::endian::little, kNumber),
+            0xDDCCBBAA44332211u);
+
+  EXPECT_EQ(ReadInOrder<int64_t>(std::endian::little, "\0\0\0\0\0\0\0\0"), 0);
+  EXPECT_EQ(ReadInOrder<int64_t>(std::endian::little, "\x80\0\0\0\0\0\0\0"),
+            0x80);
+  EXPECT_EQ(ReadInOrder<int64_t>(std::endian::little, kNumber),
+            static_cast<int64_t>(0xDDCCBBAA44332211));
+}
+
+TEST(ReadInOrder, StdArray) {
+  std::array<std::byte, 4> buffer = Array<1, 2, 3, 4>();
+  EXPECT_EQ(0x04030201, ReadInOrder<int32_t>(std::endian::little, buffer));
+  EXPECT_EQ(0x01020304, ReadInOrder<int32_t>(std::endian::big, buffer));
+}
+
+TEST(ReadInOrder, CArray) {
+  char buffer[5] = {1, 2, 3, 4, 99};
+  EXPECT_EQ(0x04030201, ReadInOrder<int32_t>(std::endian::little, buffer));
+  EXPECT_EQ(0x01020304, ReadInOrder<int32_t>(std::endian::big, buffer));
+}
+
+TEST(ReadInOrder, BoundsChecking_Ok) {
+  constexpr auto buffer = Array<1, 2, 3, 4>();
+  uint16_t value;
+  EXPECT_TRUE(ReadInOrder(std::endian::little, buffer, value));
+  EXPECT_EQ(0x0201, value);
+}
+
+TEST(ReadInOrder, BoundsChecking_TooSmall) {
+  constexpr auto buffer = Array<1, 2, 3>();
+  int32_t value = 0;
+  EXPECT_FALSE(ReadInOrder(std::endian::little, buffer, value));
+  EXPECT_EQ(0, value);
+}
+
+}  // namespace
+}  // namespace pw::bytes
diff --git a/pw_bytes/public/pw_bytes/array.h b/pw_bytes/public/pw_bytes/array.h
new file mode 100644
index 0000000..8f93ddd
--- /dev/null
+++ b/pw_bytes/public/pw_bytes/array.h
@@ -0,0 +1,147 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Utilities for building std::byte arrays from strings or integer values at
+// compile time.
+#pragma once
+
+#include <array>
+#include <cstddef>
+#include <iterator>
+
+namespace pw::bytes {
+namespace internal {
+
+template <typename T>
+constexpr bool UseBytesDirectly = std::is_integral_v<T> || std::is_enum_v<T>;
+
+// Internal implementation functions. CopyBytes copies bytes from an array of
+// byte-sized elements or the underlying bytes of an integer (as little-endian).
+// std::memcpy cannot be used since it is not constexpr.
+template <typename B, typename T, typename... Args>
+consteval void CopyBytes(B* array, T value, Args... args) {
+  static_assert(sizeof(B) == sizeof(std::byte));
+
+  if constexpr (UseBytesDirectly<T>) {
+    if constexpr (sizeof(T) == 1u) {
+      *array++ = static_cast<B>(value);
+    } else {
+      for (size_t i = 0; i < sizeof(T); ++i) {
+        *array++ = static_cast<B>(value & 0xFF);
+        value >>= 8;
+      }
+    }
+  } else {
+    static_assert(sizeof(value[0]) == sizeof(B));
+    for (auto b : value) {
+      *array++ = static_cast<B>(b);
+    }
+  }
+
+  if constexpr (sizeof...(args) > 0u) {
+    CopyBytes(array, args...);
+  }
+}
+
+// Evaluates to the size in bytes of an integer or byte array.
+template <typename T>
+consteval size_t SizeOfBytes(const T& arg) {
+  if constexpr (UseBytesDirectly<T>) {
+    return sizeof(arg);
+  } else {
+    static_assert(sizeof(arg[0]) == sizeof(std::byte));
+    return std::size(arg);
+  }
+}
+
+template <typename B, typename T, size_t... kIndex>
+consteval auto String(const T& array, std::index_sequence<kIndex...>) {
+  return std::array{static_cast<B>(array[kIndex])...};
+}
+
+template <typename T, typename U>
+consteval bool CanBeRepresentedAsByteType(const U& value) {
+  return static_cast<U>(static_cast<T>(value)) == value;
+}
+
+}  // namespace internal
+
+// Concatenates arrays or integers as a byte array at compile time. Integer
+// values are copied little-endian. Spans are copied byte-for-byte.
+template <typename B = std::byte, typename... Args>
+consteval auto Concat(Args... args) {
+  std::array<B, (internal::SizeOfBytes(args) + ...)> bytes{};
+  internal::CopyBytes(bytes.begin(), args...);
+  return bytes;
+}
+
+// Converts a string literal to an array of bytes, without the trailing '\0'.
+template <typename B = std::byte,
+          size_t size,
+          typename Indices = std::make_index_sequence<size - 1>>
+consteval auto String(const char (&str)[size]) {
+  return internal::String<B>(str, Indices{});
+}
+
+// String overload for the empty string "".
+template <typename B = std::byte>
+consteval auto String(const char (&)[1]) {
+  return std::array<B, 0>{};
+}
+
+// Creates an array of bytes from values passed as template parameters. The
+// values are guaranteed to be representable in the destination byte type.
+template <typename B, auto... values>
+consteval auto Array() {
+  static_assert((internal::CanBeRepresentedAsByteType<B>(values) && ...));
+  return std::array<B, sizeof...(values)>{static_cast<B>(values)...};
+}
+
+// Array() defaults to using std::byte.
+template <auto... values>
+consteval auto Array() {
+  return Array<std::byte, values...>();
+}
+
+// Creates an initialized array of bytes. Initializes the array to a value or
+// the return values from a function that accepts the index as a parameter.
+template <typename B, size_t size, typename T>
+constexpr auto Initialized(const T& value_or_function) {
+  std::array<B, size> array{};
+
+  for (size_t i = 0; i < size; ++i) {
+    if constexpr (std::is_integral_v<T>) {
+      array[i] = static_cast<B>(value_or_function);
+    } else {
+      array[i] = static_cast<B>(value_or_function(i));
+    }
+  }
+  return array;
+}
+
+// Initialized(value_or_function) defaults to using std::byte.
+template <size_t size, typename T>
+constexpr auto Initialized(const T& value_or_function) {
+  return Initialized<std::byte, size>(value_or_function);
+}
+
+// Creates an array of bytes from a series of function arguments. Unlike
+// Array(), MakeArray() cannot check if the values fit in the destination type.
+// MakeArray() should only be used when Array() is not suitable.
+template <typename B = std::byte, typename... Args>
+constexpr auto MakeArray(const Args&... args) {
+  return std::array<B, sizeof...(args)>{static_cast<B>(args)...};
+}
+
+}  // namespace pw::bytes
diff --git a/pw_bytes/public/pw_bytes/byte_builder.h b/pw_bytes/public/pw_bytes/byte_builder.h
index 79b609e..f046cb0 100644
--- a/pw_bytes/public/pw_bytes/byte_builder.h
+++ b/pw_bytes/public/pw_bytes/byte_builder.h
@@ -15,9 +15,11 @@
 
 #include <algorithm>
 #include <array>
+#include <bit>
 #include <cstddef>
 #include <cstring>
 
+#include "pw_bytes/endian.h"
 #include "pw_bytes/span.h"
 #include "pw_preprocessor/compiler.h"
 #include "pw_status/status.h"
@@ -25,14 +27,6 @@
 
 namespace pw {
 
-// ByteOrder enum class enables users of ByteBuffer to specify the
-// desired Endianness for ordering the values to be inserted.
-enum class ByteOrder { kLittleEndian, kBigEndian };
-
-inline constexpr ByteOrder kSystemEndianness =
-    (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ? ByteOrder::kLittleEndian
-                                               : ByteOrder::kBigEndian);
-
 // ByteBuilder facilitates building bytes in a fixed-size buffer.
 // BytesBuilders never overflow. Status is tracked for each operation and
 // an overall status is maintained, which reflects the most recent error.
@@ -126,30 +120,32 @@
     // located at the iterator position without moving the iterator forward.
     int8_t PeekInt8() const { return static_cast<int8_t>(PeekUint8()); }
 
-    uint8_t PeekUint8() const { return GetInteger<uint8_t>(); }
+    uint8_t PeekUint8() const {
+      return bytes::ReadInOrder<uint8_t>(std::endian::little, byte_);
+    }
 
-    int16_t PeekInt16(ByteOrder order = ByteOrder::kLittleEndian) const {
+    int16_t PeekInt16(std::endian order = std::endian::little) const {
       return static_cast<int16_t>(PeekUint16(order));
     }
 
-    uint16_t PeekUint16(ByteOrder order = ByteOrder::kLittleEndian) const {
-      return GetInteger<uint16_t>(order);
+    uint16_t PeekUint16(std::endian order = std::endian::little) const {
+      return bytes::ReadInOrder<uint16_t>(order, byte_);
     }
 
-    int32_t PeekInt32(ByteOrder order = ByteOrder::kLittleEndian) const {
+    int32_t PeekInt32(std::endian order = std::endian::little) const {
       return static_cast<int32_t>(PeekUint32(order));
     }
 
-    uint32_t PeekUint32(ByteOrder order = ByteOrder::kLittleEndian) const {
-      return GetInteger<uint32_t>(order);
+    uint32_t PeekUint32(std::endian order = std::endian::little) const {
+      return bytes::ReadInOrder<uint32_t>(order, byte_);
     }
 
-    int64_t PeekInt64(ByteOrder order = ByteOrder::kLittleEndian) const {
+    int64_t PeekInt64(std::endian order = std::endian::little) const {
       return static_cast<int64_t>(PeekUint64(order));
     }
 
-    uint64_t PeekUint64(ByteOrder order = ByteOrder::kLittleEndian) const {
-      return GetInteger<uint64_t>(order);
+    uint64_t PeekUint64(std::endian order = std::endian::little) const {
+      return bytes::ReadInOrder<uint64_t>(order, byte_);
     }
 
     // The Read methods will retreive ordered (Little/Big Endian) values
@@ -158,60 +154,42 @@
     int8_t ReadInt8() { return static_cast<int8_t>(ReadUint8()); }
 
     uint8_t ReadUint8() {
-      uint8_t value = GetInteger<uint8_t>();
+      uint8_t value = bytes::ReadInOrder<uint8_t>(std::endian::little, byte_);
       byte_ += 1;
       return value;
     }
 
-    int16_t ReadInt16(ByteOrder order = ByteOrder::kLittleEndian) {
+    int16_t ReadInt16(std::endian order = std::endian::little) {
       return static_cast<int16_t>(ReadUint16(order));
     }
 
-    uint16_t ReadUint16(ByteOrder order = ByteOrder::kLittleEndian) {
-      uint16_t value = GetInteger<uint16_t>(order);
+    uint16_t ReadUint16(std::endian order = std::endian::little) {
+      uint16_t value = bytes::ReadInOrder<uint16_t>(order, byte_);
       byte_ += 2;
       return value;
     }
 
-    int32_t ReadInt32(ByteOrder order = ByteOrder::kLittleEndian) {
+    int32_t ReadInt32(std::endian order = std::endian::little) {
       return static_cast<int32_t>(ReadUint32(order));
     }
 
-    uint32_t ReadUint32(ByteOrder order = ByteOrder::kLittleEndian) {
-      uint32_t value = GetInteger<uint32_t>(order);
+    uint32_t ReadUint32(std::endian order = std::endian::little) {
+      uint32_t value = bytes::ReadInOrder<uint32_t>(order, byte_);
       byte_ += 4;
       return value;
     }
 
-    int64_t ReadInt64(ByteOrder order = ByteOrder::kLittleEndian) {
+    int64_t ReadInt64(std::endian order = std::endian::little) {
       return static_cast<int64_t>(ReadUint64(order));
     }
 
-    uint64_t ReadUint64(ByteOrder order = ByteOrder::kLittleEndian) {
-      int64_t value = GetInteger<int64_t>(order);
+    uint64_t ReadUint64(std::endian order = std::endian::little) {
+      int64_t value = bytes::ReadInOrder<int64_t>(order, byte_);
       byte_ += 8;
       return value;
     }
 
    private:
-    template <typename T>
-    T GetInteger(ByteOrder order = ByteOrder::kLittleEndian) const {
-      T value;
-      std::memcpy(&value, byte_, sizeof(T));
-      if (kSystemEndianness != order) {
-        if constexpr (sizeof(T) == 1) {
-          return value;
-        } else if constexpr (sizeof(T) == 2) {
-          return Reverse2Bytes(value);
-        } else if constexpr (sizeof(T) == 4) {
-          return Reverse4Bytes(value);
-        } else if constexpr (sizeof(T) == 8) {
-          return Reverse8Bytes(value);
-        }
-      }
-      return value;
-    }
-
     const std::byte* byte_;
   };
 
@@ -250,7 +228,7 @@
     return StatusWithSize(status_, size_);
   }
 
-  // True if status() is Status::OK.
+  // True if status() is Status::Ok().
   bool ok() const { return status_.ok(); }
 
   // True if the bytes builder is empty.
@@ -265,11 +243,11 @@
   // Clears the bytes and resets its error state.
   void clear() {
     size_ = 0;
-    status_ = Status::OK;
+    status_ = Status::Ok();
   };
 
-  // Sets the statuses to Status::OK;
-  void clear_status() { status_ = Status::OK; }
+  // Sets the statuses to Status::Ok();
+  void clear_status() { status_ = Status::Ok(); }
 
   // Appends a single byte. Sets the status to RESOURCE_EXHAUSTED if the
   // byte cannot be added because the buffer is full.
@@ -317,43 +295,34 @@
 
   // Put methods for inserting different 16-bit ints
   ByteBuilder& PutUint16(uint16_t value,
-                         ByteOrder order = ByteOrder::kLittleEndian) {
-    if (kSystemEndianness != order) {
-      value = Reverse2Bytes(value);
-    }
-    return WriteInOrder(value);
+                         std::endian order = std::endian::little) {
+    return WriteInOrder(bytes::ConvertOrderTo(order, value));
   }
 
   ByteBuilder& PutInt16(int16_t value,
-                        ByteOrder order = ByteOrder::kLittleEndian) {
+                        std::endian order = std::endian::little) {
     return PutUint16(static_cast<uint16_t>(value), order);
   }
 
   // Put methods for inserting different 32-bit ints
   ByteBuilder& PutUint32(uint32_t value,
-                         ByteOrder order = ByteOrder::kLittleEndian) {
-    if (kSystemEndianness != order) {
-      value = Reverse4Bytes(value);
-    }
-    return WriteInOrder(value);
+                         std::endian order = std::endian::little) {
+    return WriteInOrder(bytes::ConvertOrderTo(order, value));
   }
 
   ByteBuilder& PutInt32(int32_t value,
-                        ByteOrder order = ByteOrder::kLittleEndian) {
+                        std::endian order = std::endian::little) {
     return PutUint32(static_cast<uint32_t>(value), order);
   }
 
   // Put methods for inserting different 64-bit ints
   ByteBuilder& PutUint64(uint64_t value,
-                         ByteOrder order = ByteOrder::kLittleEndian) {
-    if (kSystemEndianness != order) {
-      value = Reverse8Bytes(value);
-    }
-    return WriteInOrder(value);
+                         std::endian order = std::endian::little) {
+    return WriteInOrder(bytes::ConvertOrderTo(order, value));
   }
 
   ByteBuilder& PutInt64(int64_t value,
-                        ByteOrder order = ByteOrder::kLittleEndian) {
+                        std::endian order = std::endian::little) {
     return PutUint64(static_cast<uint64_t>(value), order);
   }
 
@@ -368,28 +337,6 @@
   };
 
  private:
-  static constexpr uint16_t Reverse2Bytes(uint16_t value) {
-    return uint16_t(((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8));
-  }
-
-  static constexpr uint32_t Reverse4Bytes(uint32_t value) {
-    return uint32_t(((value & 0x000000FF) << 3 * 8) |  //
-                    ((value & 0x0000FF00) << 1 * 8) |  //
-                    ((value & 0x00FF0000) >> 1 * 8) |  //
-                    ((value & 0xFF000000) >> 3 * 8));
-  }
-
-  static constexpr uint64_t Reverse8Bytes(uint64_t value) {
-    return uint64_t(((value & 0x00000000000000FF) << 7 * 8) |  //
-                    ((value & 0x000000000000FF00) << 5 * 8) |  //
-                    ((value & 0x0000000000FF0000) << 3 * 8) |  //
-                    ((value & 0x00000000FF000000) << 1 * 8) |  //
-                    ((value & 0x000000FF00000000) >> 1 * 8) |  //
-                    ((value & 0x0000FF0000000000) >> 3 * 8) |  //
-                    ((value & 0x00FF000000000000) >> 5 * 8) |  //
-                    ((value & 0xFF00000000000000) >> 7 * 8));
-  }
-
   template <typename T>
   ByteBuilder& WriteInOrder(T value) {
     return append(&value, sizeof(value));
diff --git a/pw_bytes/public/pw_bytes/endian.h b/pw_bytes/public/pw_bytes/endian.h
new file mode 100644
index 0000000..9eb1d71
--- /dev/null
+++ b/pw_bytes/public/pw_bytes/endian.h
@@ -0,0 +1,190 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <array>
+#include <bit>
+#include <cstring>
+#include <span>
+#include <type_traits>
+
+#include "pw_bytes/array.h"
+#include "pw_bytes/span.h"
+
+namespace pw::bytes {
+namespace internal {
+
+// Use a struct rather than an alias to give the type a more reasonable name.
+template <typename T>
+struct EquivalentUintImpl
+    : std::conditional<
+          sizeof(T) == 1,
+          uint8_t,
+          std::conditional_t<
+              sizeof(T) == 2,
+              uint16_t,
+              std::conditional_t<
+                  sizeof(T) == 4,
+                  uint32_t,
+                  std::conditional_t<sizeof(T) == 8, uint64_t, void>>>> {
+  static_assert(std::is_integral_v<T>);
+};
+
+template <typename T>
+using EquivalentUint = typename EquivalentUintImpl<T>::type;
+
+template <typename T>
+constexpr std::array<std::byte, sizeof(T)> CopyLittleEndian(T value) {
+  return CopyLittleEndian(static_cast<EquivalentUint<T>>(value));
+}
+
+template <>
+constexpr std::array<std::byte, 1> CopyLittleEndian<uint8_t>(uint8_t value) {
+  return MakeArray(value);
+}
+template <>
+constexpr std::array<std::byte, 2> CopyLittleEndian<uint16_t>(uint16_t value) {
+  return MakeArray(value & 0x00FF, (value & 0xFF00) >> 8);
+}
+
+template <>
+constexpr std::array<std::byte, 4> CopyLittleEndian<uint32_t>(uint32_t value) {
+  return MakeArray((value & 0x000000FF) >> 0 * 8,
+                   (value & 0x0000FF00) >> 1 * 8,
+                   (value & 0x00FF0000) >> 2 * 8,
+                   (value & 0xFF000000) >> 3 * 8);
+}
+
+template <>
+constexpr std::array<std::byte, 8> CopyLittleEndian<uint64_t>(uint64_t value) {
+  return MakeArray((value & 0x00000000000000FF) >> 0 * 8,
+                   (value & 0x000000000000FF00) >> 1 * 8,
+                   (value & 0x0000000000FF0000) >> 2 * 8,
+                   (value & 0x00000000FF000000) >> 3 * 8,
+                   (value & 0x000000FF00000000) >> 4 * 8,
+                   (value & 0x0000FF0000000000) >> 5 * 8,
+                   (value & 0x00FF000000000000) >> 6 * 8,
+                   (value & 0xFF00000000000000) >> 7 * 8);
+}
+
+template <typename T>
+constexpr T ReverseBytes(T value) {
+  EquivalentUint<T> uint = static_cast<EquivalentUint<T>>(value);
+
+  if constexpr (sizeof(uint) == 1) {
+    return static_cast<T>(uint);
+  } else if constexpr (sizeof(uint) == 2) {
+    return static_cast<T>(((uint & 0x00FF) << 8) | ((uint & 0xFF00) >> 8));
+  } else if constexpr (sizeof(uint) == 4) {
+    return static_cast<T>(((uint & 0x000000FF) << 3 * 8) |  //
+                          ((uint & 0x0000FF00) << 1 * 8) |  //
+                          ((uint & 0x00FF0000) >> 1 * 8) |  //
+                          ((uint & 0xFF000000) >> 3 * 8));
+  } else {
+    static_assert(sizeof(uint) == 8);
+    return static_cast<T>(((uint & 0x00000000000000FF) << 7 * 8) |  //
+                          ((uint & 0x000000000000FF00) << 5 * 8) |  //
+                          ((uint & 0x0000000000FF0000) << 3 * 8) |  //
+                          ((uint & 0x00000000FF000000) << 1 * 8) |  //
+                          ((uint & 0x000000FF00000000) >> 1 * 8) |  //
+                          ((uint & 0x0000FF0000000000) >> 3 * 8) |  //
+                          ((uint & 0x00FF000000000000) >> 5 * 8) |  //
+                          ((uint & 0xFF00000000000000) >> 7 * 8));
+  }
+}
+
+}  // namespace internal
+
+// Functions for reordering bytes in the provided integral value to match the
+// specified byte order. These functions are similar to the htonl() family of
+// functions.
+//
+// If the value is converted to non-system endianness, it must NOT be used
+// directly, since the value will be meaningless. Such values are only suitable
+// to memcpy'd or sent to a different device.
+template <typename T>
+constexpr T ConvertOrder(std::endian from, std::endian to, T value) {
+  return from == to ? value : internal::ReverseBytes(value);
+}
+
+// Converts a value from native byte order to the specified byte order. Since
+// this function changes the value's endianness, the result should only be used
+// to memcpy the bytes to a buffer or send to a different device.
+template <typename T>
+constexpr T ConvertOrderTo(std::endian to_endianness, T value) {
+  return ConvertOrder(std::endian::native, to_endianness, value);
+}
+
+// Converts a value from the specified byte order to the native byte order.
+template <typename T>
+constexpr T ConvertOrderFrom(std::endian from_endianness, T value) {
+  return ConvertOrder(from_endianness, std::endian::native, value);
+}
+
+// Copies the value to a std::array with the specified endianness.
+template <typename T>
+constexpr auto CopyInOrder(std::endian order, T value) {
+  return internal::CopyLittleEndian(ConvertOrderTo(order, value));
+}
+
+// Reads a value from a buffer with the specified endianness.
+//
+// The buffer **MUST** be at least sizeof(T) bytes large! If you are not
+// absolutely certain the input buffer is large enough, use the ReadInOrder
+// overload that returns bool, which checks the buffer size at runtime.
+template <typename T>
+T ReadInOrder(std::endian order, const void* buffer) {
+  T value;
+  std::memcpy(&value, buffer, sizeof(value));
+  return ConvertOrderFrom(order, value);
+}
+
+// ReadInOrder from a static-extent span, with compile-time bounds checking.
+template <typename T,
+          typename B,
+          size_t buffer_size,
+          typename = std::enable_if_t<buffer_size != std::dynamic_extent &&
+                                      sizeof(B) == sizeof(std::byte)>>
+T ReadInOrder(std::endian order, std::span<B, buffer_size> buffer) {
+  static_assert(buffer_size >= sizeof(T));
+  return ReadInOrder<T>(order, buffer.data());
+}
+
+// ReadInOrder from a std::array, with compile-time bounds checking.
+template <typename T, typename B, size_t buffer_size>
+T ReadInOrder(std::endian order, const std::array<B, buffer_size>& buffer) {
+  return ReadInOrder<T>(order, std::span(buffer));
+}
+
+// ReadInOrder from a C array, with compile-time bounds checking.
+template <typename T, typename B, size_t buffer_size>
+T ReadInOrder(std::endian order, const B (&buffer)[buffer_size]) {
+  return ReadInOrder<T>(order, std::span(buffer));
+}
+
+// Reads a value with the specified endianness from the buffer, with bounds
+// checking. Returns true if successful, false if buffer is too small for a T.
+template <typename T>
+[[nodiscard]] bool ReadInOrder(std::endian order,
+                               ConstByteSpan buffer,
+                               T& value) {
+  if (buffer.size() < sizeof(T)) {
+    return false;
+  }
+
+  value = ReadInOrder<T>(order, buffer.data());
+  return true;
+}
+
+}  // namespace pw::bytes
diff --git a/pw_bytes/size_report/BUILD b/pw_bytes/size_report/BUILD
new file mode 100644
index 0000000..d58feea
--- /dev/null
+++ b/pw_bytes/size_report/BUILD
@@ -0,0 +1,31 @@
+# Copyright 2019 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_binary",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])  # Apache License 2.0
+
+pw_cc_binary(
+    name = "build_byte_buffer",
+    srcs = ["byte_builder_size_report.cc"],
+    deps = [
+        "//pw_bloat:bloat_this_binary",
+        "//pw_bytes",
+    ],
+)
\ No newline at end of file
diff --git a/pw_rpc/test_impl/BUILD.gn b/pw_bytes/size_report/BUILD.gn
similarity index 63%
rename from pw_rpc/test_impl/BUILD.gn
rename to pw_bytes/size_report/BUILD.gn
index 68f88c1..37aac05 100644
--- a/pw_rpc/test_impl/BUILD.gn
+++ b/pw_bytes/size_report/BUILD.gn
@@ -1,4 +1,4 @@
-# Copyright 2020 The Pigweed Authors
+# Copyright 2019 The Pigweed Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may not
 # use this file except in compliance with the License. You may obtain a copy of
@@ -12,19 +12,18 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
-import("$dir_pw_unit_test/test.gni")
-config("config") {
-  include_dirs = [ "public_overrides" ]
-  visibility = [ ":*" ]
+
+pw_executable("with_byte_builder") {
+  sources = [ "byte_builder_size_report.cc" ]
+  deps = [ ".." ]
+  defines = [ "USE_BYTE_BUILDER=1" ]
 }
 
-pw_source_set("test_impl") {
-  public_configs = [ ":config" ]
-  public = [ "public_overrides/pw_rpc/internal/method.h" ]
-  public_deps = [ "../:server_library_deps" ]
-  visibility = [ "..:*" ]
+pw_executable("without_byte_builder") {
+  sources = [ "byte_builder_size_report.cc" ]
+  deps = [ ".." ]
+  defines = [ "USE_BYTE_BUILDER=0" ]
 }
diff --git a/pw_bytes/size_report/byte_builder_size_report.cc b/pw_bytes/size_report/byte_builder_size_report.cc
new file mode 100644
index 0000000..4cde2e6
--- /dev/null
+++ b/pw_bytes/size_report/byte_builder_size_report.cc
@@ -0,0 +1,108 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// This size report uses either ByteBuilder or manual bytes manipulation
+// depending on whether "USE_BYTE_BUILDER" is set to true/false.
+// Building the file: ninja -C out build_me
+
+#include <array>
+#include <bit>
+#include <cstdint>
+#include <cstdio>
+
+#include "pw_bytes/byte_builder.h"
+
+#if !defined(USE_BYTE_BUILDER)
+#error "USE_BYTE_BUILDER must be defined"
+#endif  // !defined(USE_BYTE_BUILDER)
+
+namespace pw::bytes {
+
+#if USE_BYTE_BUILDER
+
+ByteBuffer<8> bb;
+
+void PutBytes() {
+  bb.PutUint32(0x482B3D9E);
+  bb.PutInt32(0x482B3D9E, std::endian::big);
+}
+
+void ReadBytes() {
+  auto it = bb.begin();
+
+  std::printf("%u\n", static_cast<unsigned>(it.ReadUint32()));
+  std::printf("%d\n", static_cast<int>(it.ReadInt32(std::endian::big)));
+}
+
+#else  // !USE_BYTE_BUILDER
+
+std::byte b_array[8];
+
+void PutBytes() {
+  uint32_t kVal1 = 0x482B3D9E;
+  int32_t kVal2 = 0x482B3D9E;
+
+  if (std::endian::native == std::endian::little) {
+    std::memcpy(b_array, &kVal1, sizeof(kVal1));
+
+    kVal2 = int32_t(((kVal2 & 0x000000FF) << 3 * 8) |  //
+                    ((kVal2 & 0x0000FF00) << 1 * 8) |  //
+                    ((kVal2 & 0x00FF0000) >> 1 * 8) |  //
+                    ((kVal2 & 0xFF000000) >> 3 * 8));
+    std::memcpy(b_array + 4, &kVal2, sizeof(kVal2));
+  } else {
+    kVal1 = uint32_t(((kVal1 & 0x000000FF) << 3 * 8) |  //
+                     ((kVal1 & 0x0000FF00) << 1 * 8) |  //
+                     ((kVal1 & 0x00FF0000) >> 1 * 8) |  //
+                     ((kVal1 & 0xFF000000) >> 3 * 8));
+    std::memcpy(b_array, &kVal1, sizeof(kVal1));
+
+    std::memcpy(b_array + 4, &kVal2, sizeof(kVal2));
+  }
+}
+
+void ReadBytes() {
+  uint32_t kVal1;
+  int32_t kVal2;
+
+  if (std::endian::native == std::endian::little) {
+    std::memcpy(&kVal1, b_array, sizeof(kVal1));
+    std::memcpy(&kVal2, b_array + 4, sizeof(kVal2));
+    kVal2 = int32_t(((kVal2 & 0x000000FF) << 3 * 8) |  //
+                    ((kVal2 & 0x0000FF00) << 1 * 8) |  //
+                    ((kVal2 & 0x00FF0000) >> 1 * 8) |  //
+                    ((kVal2 & 0xFF000000) >> 3 * 8));
+  } else {
+    std::memcpy(&kVal1, b_array, sizeof(kVal1));
+    std::memcpy(&kVal2, b_array + 4, sizeof(kVal2));
+
+    kVal1 = uint32_t(((kVal1 & 0x000000FF) << 3 * 8) |  //
+                     ((kVal1 & 0x0000FF00) << 1 * 8) |  //
+                     ((kVal1 & 0x00FF0000) >> 1 * 8) |  //
+                     ((kVal1 & 0xFF000000) >> 3 * 8));
+  }
+
+  std::printf("%u\n", static_cast<unsigned>(kVal1));
+  std::printf("%d\n", static_cast<int>(kVal2));
+}
+
+#endif  // USE_BYTE_BUILDER
+
+}  // namespace pw::bytes
+
+int main() {
+  pw::bytes::PutBytes();
+  pw::bytes::ReadBytes();
+  return 0;
+}
diff --git a/pw_checksum/BUILD b/pw_checksum/BUILD
index 1384325..5ac4a4c 100644
--- a/pw_checksum/BUILD
+++ b/pw_checksum/BUILD
@@ -25,25 +25,26 @@
 pw_cc_library(
     name = "pw_checksum",
     srcs = [
-        "ccitt_crc16.cc",
-        "crc32.cc"
+        "crc16_ccitt.cc",
+        "crc32.cc",
     ],
     hdrs = [
-        "public/pw_checksum/ccitt_crc16.h",
-        "public/pw_checksum/crc32.h"
+        "public/pw_checksum/crc16_ccitt.h",
+        "public/pw_checksum/crc32.h",
     ],
     includes = ["public"],
     deps = ["//pw_span"],
 )
 
 pw_cc_test(
-    name = "ccitt_crc16_test",
+    name = "crc16_ccitt_test",
     srcs = [
-        "ccitt_crc16_test_c.c",
-        "ccitt_crc16_test.cc",
+        "crc16_ccitt_test.cc",
+        "crc16_ccitt_test_c.c",
     ],
     deps = [
         ":pw_checksum",
+        "//pw_bytes",
         "//pw_unit_test",
     ],
 )
@@ -51,11 +52,12 @@
 pw_cc_test(
     name = "crc32_test",
     srcs = [
-        "crc32_test_c.c",
         "crc32_test.cc",
+        "crc32_test_c.c",
     ],
     deps = [
         ":pw_checksum",
+        "//pw_bytes",
         "//pw_unit_test",
     ],
 )
diff --git a/pw_checksum/BUILD.gn b/pw_checksum/BUILD.gn
index 43da086..59f85db 100644
--- a/pw_checksum/BUILD.gn
+++ b/pw_checksum/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -25,11 +25,11 @@
 pw_source_set("pw_checksum") {
   public_configs = [ ":default_config" ]
   public = [
-    "public/pw_checksum/ccitt_crc16.h",
+    "public/pw_checksum/crc16_ccitt.h",
     "public/pw_checksum/crc32.h",
   ]
   sources = [
-    "ccitt_crc16.cc",
+    "crc16_ccitt.cc",
     "crc32.cc",
   ]
   public_deps = [ dir_pw_span ]
@@ -37,21 +37,27 @@
 
 pw_test_group("tests") {
   tests = [
-    ":ccitt_crc16_test",
+    ":crc16_ccitt_test",
     ":crc32_test",
   ]
 }
 
-pw_test("ccitt_crc16_test") {
-  deps = [ ":pw_checksum" ]
+pw_test("crc16_ccitt_test") {
+  deps = [
+    ":pw_checksum",
+    dir_pw_bytes,
+  ]
   sources = [
-    "ccitt_crc16_test.cc",
-    "ccitt_crc16_test_c.c",
+    "crc16_ccitt_test.cc",
+    "crc16_ccitt_test_c.c",
   ]
 }
 
 pw_test("crc32_test") {
-  deps = [ ":pw_checksum" ]
+  deps = [
+    ":pw_checksum",
+    dir_pw_bytes,
+  ]
   sources = [
     "crc32_test.cc",
     "crc32_test_c.c",
diff --git a/pw_checksum/CMakeLists.txt b/pw_checksum/CMakeLists.txt
index 83cc5c9..94fe371 100644
--- a/pw_checksum/CMakeLists.txt
+++ b/pw_checksum/CMakeLists.txt
@@ -12,4 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-pw_auto_add_simple_module(pw_checksum PUBLIC_DEPS pw_span)
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
+pw_auto_add_simple_module(pw_checksum
+  PUBLIC_DEPS
+    pw_span
+  PRIVATE_DEPS
+    pw_bytes
+)
diff --git a/pw_checksum/ccitt_crc16.cc b/pw_checksum/crc16_ccitt.cc
similarity index 90%
rename from pw_checksum/ccitt_crc16.cc
rename to pw_checksum/crc16_ccitt.cc
index 85e9d5b..05ce8e9 100644
--- a/pw_checksum/ccitt_crc16.cc
+++ b/pw_checksum/crc16_ccitt.cc
@@ -12,12 +12,12 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-#include "pw_checksum/ccitt_crc16.h"
+#include "pw_checksum/crc16_ccitt.h"
 
 namespace pw::checksum {
 namespace {
 
-constexpr uint16_t kCcittCrc16Table[256]{
+constexpr uint16_t kCrc16CcittTable[256]{
     0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
     0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
     0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
@@ -54,13 +54,13 @@
 
 }  // namespace
 
-extern "C" uint16_t pw_ChecksumCcittCrc16(const void* data,
-                                          size_t size_bytes,
-                                          uint16_t value) {
+extern "C" uint16_t pw_checksum_Crc16Ccitt(const void* data,
+                                           size_t size_bytes,
+                                           uint16_t value) {
   const uint8_t* const array = static_cast<const uint8_t*>(data);
 
   for (size_t i = 0; i < size_bytes; ++i) {
-    value = kCcittCrc16Table[((value >> 8) ^ array[i]) & 0xffu] ^ (value << 8);
+    value = kCrc16CcittTable[((value >> 8) ^ array[i]) & 0xffu] ^ (value << 8);
   }
 
   return value;
diff --git a/pw_checksum/ccitt_crc16_test.cc b/pw_checksum/crc16_ccitt_test.cc
similarity index 62%
rename from pw_checksum/ccitt_crc16_test.cc
rename to pw_checksum/crc16_ccitt_test.cc
index e78f0e9..3ac9c78 100644
--- a/pw_checksum/ccitt_crc16_test.cc
+++ b/pw_checksum/crc16_ccitt_test.cc
@@ -12,7 +12,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-#include "pw_checksum/ccitt_crc16.h"
+#include "pw_checksum/crc16_ccitt.h"
 
 #include <string_view>
 
@@ -35,33 +35,48 @@
 constexpr uint16_t kStringCrc = 0xC184;
 
 TEST(Crc16, Empty) {
-  EXPECT_EQ(CcittCrc16(std::span<std::byte>()), kCcittCrc16DefaultInitialValue);
+  EXPECT_EQ(Crc16Ccitt::Calculate(std::span<std::byte>()),
+            Crc16Ccitt::kInitialValue);
 }
 
 TEST(Crc16, ByteByByte) {
-  uint16_t crc = kCcittCrc16DefaultInitialValue;
+  uint16_t crc = Crc16Ccitt::kInitialValue;
   for (size_t i = 0; i < sizeof(kBytes); i++) {
-    crc = CcittCrc16(std::byte{kBytes[i]}, crc);
+    crc = Crc16Ccitt::Calculate(std::byte{kBytes[i]}, crc);
   }
   EXPECT_EQ(crc, kBufferCrc);
 }
 
 TEST(Crc16, Buffer) {
-  EXPECT_EQ(CcittCrc16(std::as_bytes(std::span(kBytes))), kBufferCrc);
+  EXPECT_EQ(Crc16Ccitt::Calculate(std::as_bytes(std::span(kBytes))),
+            kBufferCrc);
 }
 
 TEST(Crc16, String) {
-  EXPECT_EQ(CcittCrc16(std::as_bytes(std::span(kString))), kStringCrc);
+  EXPECT_EQ(Crc16Ccitt::Calculate(std::as_bytes(std::span(kString))),
+            kStringCrc);
 }
 
-extern "C" uint16_t CallChecksumCcittCrc16(const void* data, size_t size_bytes);
+TEST(Crc16Class, Buffer) {
+  Crc16Ccitt crc16;
+  crc16.Update(std::as_bytes(std::span(kBytes)));
+  EXPECT_EQ(crc16.value(), kBufferCrc);
+}
+
+TEST(Crc16Class, String) {
+  Crc16Ccitt crc16;
+  crc16.Update(std::as_bytes(std::span(kString)));
+  EXPECT_EQ(crc16.value(), kStringCrc);
+}
+
+extern "C" uint16_t CallChecksumCrc16Ccitt(const void* data, size_t size_bytes);
 
 TEST(Crc16FromC, Buffer) {
-  EXPECT_EQ(CallChecksumCcittCrc16(kBytes, sizeof(kBytes)), kBufferCrc);
+  EXPECT_EQ(CallChecksumCrc16Ccitt(kBytes, sizeof(kBytes)), kBufferCrc);
 }
 
 TEST(Crc16FromC, String) {
-  EXPECT_EQ(CallChecksumCcittCrc16(kString.data(), kString.size()), kStringCrc);
+  EXPECT_EQ(CallChecksumCrc16Ccitt(kString.data(), kString.size()), kStringCrc);
 }
 
 }  // namespace
diff --git a/pw_checksum/ccitt_crc16_test_c.c b/pw_checksum/crc16_ccitt_test_c.c
similarity index 79%
rename from pw_checksum/ccitt_crc16_test_c.c
rename to pw_checksum/crc16_ccitt_test_c.c
index 72f3cec..629d3f3 100644
--- a/pw_checksum/ccitt_crc16_test_c.c
+++ b/pw_checksum/crc16_ccitt_test_c.c
@@ -12,8 +12,8 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-#include "pw_checksum/ccitt_crc16.h"
+#include "pw_checksum/crc16_ccitt.h"
 
-uint16_t CallChecksumCcittCrc16(const void* data, size_t size_bytes) {
-  return pw_ChecksumCcittCrc16(data, size_bytes, 0xFFFF);
+uint16_t CallChecksumCrc16Ccitt(const void* data, size_t size_bytes) {
+  return pw_checksum_Crc16Ccitt(data, size_bytes, 0xFFFF);
 }
diff --git a/pw_checksum/crc32.cc b/pw_checksum/crc32.cc
index e30d546..46a8091 100644
--- a/pw_checksum/crc32.cc
+++ b/pw_checksum/crc32.cc
@@ -64,21 +64,16 @@
 
 }  // namespace
 
-extern "C" uint32_t pw_ChecksumCrc32Append(const void* data,
-                                           size_t size_bytes,
-                                           uint32_t previous_result) {
-  const uint8_t* const array = static_cast<const uint8_t*>(data);
-
-  // CRC32 values are finalized by inverting the bits. The finalization step
-  // must be undone before appending to a prior CRC32 value.
-  previous_result = ~previous_result;
+extern "C" uint32_t _pw_checksum_InternalCrc32(const void* data,
+                                               size_t size_bytes,
+                                               uint32_t state) {
+  const uint8_t* array = static_cast<const uint8_t*>(data);
 
   for (size_t i = 0; i < size_bytes; ++i) {
-    previous_result = kCrc32Table[(previous_result ^ array[i]) & 0xffu] ^
-                      (previous_result >> 8);
+    state = kCrc32Table[(state ^ array[i]) & 0xFFu] ^ (state >> 8);
   }
 
-  return ~previous_result;
+  return state;
 }
 
 }  // namespace pw::checksum
diff --git a/pw_checksum/crc32_test.cc b/pw_checksum/crc32_test.cc
index 87b8430..59d649a 100644
--- a/pw_checksum/crc32_test.cc
+++ b/pw_checksum/crc32_test.cc
@@ -17,6 +17,7 @@
 #include <string_view>
 
 #include "gtest/gtest.h"
+#include "pw_bytes/array.h"
 
 namespace pw::checksum {
 namespace {
@@ -27,9 +28,9 @@
 //
 // with polynomial 0x4C11DB7, initial value 0xFFFFFFFF.
 
-constexpr uint8_t kBytes[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
-constexpr uint8_t kBytesPart0[] = {1, 2, 3, 4, 5};
-constexpr uint8_t kBytesPart1[] = {6, 7, 8, 9};
+constexpr auto kBytes = bytes::Array<1, 2, 3, 4, 5, 6, 7, 8, 9>();
+constexpr auto kBytesPart0 = bytes::Array<1, 2, 3, 4, 5>();
+constexpr auto kBytesPart1 = bytes::Array<6, 7, 8, 9>();
 constexpr uint32_t kBufferCrc = 0x40EFAB9E;
 
 constexpr std::string_view kString =
@@ -38,37 +39,51 @@
 constexpr uint32_t kStringCrc = 0x9EC87F88;
 
 TEST(Crc32, Empty) {
-  EXPECT_EQ(Crc32(std::span<std::byte>()), ~kCrc32InitialValue);
-}
-
-TEST(Crc32, ByteByByte) {
-  uint32_t crc;
-  crc = Crc32(std::byte{kBytes[0]});
-  for (size_t i = 1; i < sizeof(kBytes); i++) {
-    crc = Crc32(std::byte{kBytes[i]}, crc);
-  }
-  EXPECT_EQ(crc, kBufferCrc);
+  EXPECT_EQ(Crc32::Calculate(std::span<std::byte>()), PW_CHECKSUM_EMPTY_CRC32);
 }
 
 TEST(Crc32, Buffer) {
-  EXPECT_EQ(Crc32(as_bytes(std::span(kBytes))), kBufferCrc);
-}
-
-TEST(Crc32, BufferAppend) {
-  uint32_t crc = Crc32(as_bytes(std::span(kBytesPart0)));
-  EXPECT_EQ(Crc32(as_bytes(std::span(kBytesPart1)), crc), kBufferCrc);
+  EXPECT_EQ(Crc32::Calculate(std::as_bytes(std::span(kBytes))), kBufferCrc);
 }
 
 TEST(Crc32, String) {
-  EXPECT_EQ(Crc32(as_bytes(std::span(kString))), kStringCrc);
+  EXPECT_EQ(Crc32::Calculate(std::as_bytes(std::span(kString))), kStringCrc);
+}
+
+TEST(Crc32Class, ByteByByte) {
+  Crc32 crc;
+  for (std::byte b : kBytes) {
+    crc.Update(b);
+  }
+  EXPECT_EQ(crc.value(), kBufferCrc);
+}
+
+TEST(Crc32Class, Buffer) {
+  Crc32 crc32;
+  crc32.Update(std::as_bytes(std::span(kBytes)));
+  EXPECT_EQ(crc32.value(), kBufferCrc);
+}
+
+TEST(Crc32Class, BufferAppend) {
+  Crc32 crc32;
+  crc32.Update(kBytesPart0);
+  crc32.Update(kBytesPart1);
+  EXPECT_EQ(crc32.value(), kBufferCrc);
+}
+
+TEST(Crc32Class, String) {
+  Crc32 crc32;
+  crc32.Update(std::as_bytes(std::span(kString)));
+  EXPECT_EQ(crc32.value(), kStringCrc);
 }
 
 extern "C" uint32_t CallChecksumCrc32(const void* data, size_t size_bytes);
 extern "C" uint32_t CallChecksumCrc32Append(const void* data,
-                                            size_t size_bytes);
+                                            size_t size_bytes,
+                                            uint32_t value);
 
 TEST(Crc32FromC, Buffer) {
-  EXPECT_EQ(CallChecksumCrc32(kBytes, sizeof(kBytes)), kBufferCrc);
+  EXPECT_EQ(CallChecksumCrc32(kBytes.data(), kBytes.size()), kBufferCrc);
 }
 
 TEST(Crc32FromC, String) {
@@ -76,11 +91,17 @@
 }
 
 TEST(Crc32AppendFromC, Buffer) {
-  EXPECT_EQ(CallChecksumCrc32(kBytes, sizeof(kBytes)), kBufferCrc);
+  uint32_t crc = PW_CHECKSUM_EMPTY_CRC32;
+  for (std::byte b : kBytes) {
+    crc = CallChecksumCrc32Append(&b, 1, crc);
+  }
+
+  EXPECT_EQ(crc, kBufferCrc);
 }
 
 TEST(Crc32AppendFromC, String) {
-  EXPECT_EQ(CallChecksumCrc32Append(kString.data(), kString.size()),
+  EXPECT_EQ(CallChecksumCrc32Append(
+                kString.data(), kString.size(), PW_CHECKSUM_EMPTY_CRC32),
             kStringCrc);
 }
 
diff --git a/pw_checksum/crc32_test_c.c b/pw_checksum/crc32_test_c.c
index 107f077..e607fb3 100644
--- a/pw_checksum/crc32_test_c.c
+++ b/pw_checksum/crc32_test_c.c
@@ -15,9 +15,11 @@
 #include "pw_checksum/crc32.h"
 
 uint32_t CallChecksumCrc32(const void* data, size_t size_bytes) {
-  return pw_ChecksumCrc32(data, size_bytes);
+  return pw_checksum_Crc32(data, size_bytes);
 }
 
-uint32_t CallChecksumCrc32Append(const void* data, size_t size_bytes) {
-  return pw_ChecksumCrc32Append(data, size_bytes, ~0xFFFFFFFFu);
-}
\ No newline at end of file
+uint32_t CallChecksumCrc32Append(const void* data,
+                                 size_t size_bytes,
+                                 uint32_t value) {
+  return pw_checksum_Crc32Append(data, size_bytes, value);
+}
diff --git a/pw_checksum/docs.rst b/pw_checksum/docs.rst
index ed835c1..3006ab0 100644
--- a/pw_checksum/docs.rst
+++ b/pw_checksum/docs.rst
@@ -1,13 +1,11 @@
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_checksum:
 
 -----------
 pw_checksum
 -----------
 The ``pw_checksum`` module provides functions for calculating checksums.
 
-pw_checksum/ccitt_crc16.h
+pw_checksum/crc16_ccitt.h
 =========================
 
 .. cpp:namespace:: pw::checksum
diff --git a/pw_checksum/public/pw_checksum/ccitt_crc16.h b/pw_checksum/public/pw_checksum/ccitt_crc16.h
deleted file mode 100644
index 4d43f93..0000000
--- a/pw_checksum/public/pw_checksum/ccitt_crc16.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2020 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-// Provides an implementation of the CCITT CRC16 for the polynomial
-//
-//   x^16 + x^12 + x^5 + 1
-//
-// Polynomial 0x1021, initial value 0xFFFF. See https://www.zlib.net/crc_v3.txt.
-#pragma once
-
-#include <stddef.h>
-#include <stdint.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif  // __cplusplus
-
-// C API for calculating the CCITT CRC16 of an array of data.
-uint16_t pw_ChecksumCcittCrc16(const void* data,
-                               size_t size_bytes,
-                               uint16_t initial_value);
-
-#ifdef __cplusplus
-}  // extern "C"
-
-#include <span>
-
-namespace pw::checksum {
-
-inline constexpr uint16_t kCcittCrc16DefaultInitialValue = 0xFFFF;
-
-// Calculates the CCITT CRC16 for the provided data. To update an existing CRC,
-// pass the previous value as the initial_value argument.
-inline uint16_t CcittCrc16(
-    std::span<const std::byte> data,
-    uint16_t initial_value = kCcittCrc16DefaultInitialValue) {
-  return pw_ChecksumCcittCrc16(data.data(), data.size_bytes(), initial_value);
-}
-
-// Calculates the CCITT CRC16 for a single byte. This is useful for updating a
-// CRC byte-by-byte.
-inline uint16_t CcittCrc16(
-    std::byte value, uint16_t initial_value = kCcittCrc16DefaultInitialValue) {
-  return pw_ChecksumCcittCrc16(&value, sizeof(value), initial_value);
-}
-
-}  // namespace pw::checksum
-
-#endif  // __cplusplus
diff --git a/pw_checksum/public/pw_checksum/crc16_ccitt.h b/pw_checksum/public/pw_checksum/crc16_ccitt.h
new file mode 100644
index 0000000..0f7785c
--- /dev/null
+++ b/pw_checksum/public/pw_checksum/crc16_ccitt.h
@@ -0,0 +1,81 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Provides an implementation of the CRC-16-CCITT or CRC-CCITT checksum, which
+// uses the polynomial 0x1021:
+//
+//   x^16 + x^12 + x^5 + 1
+//
+// with initial value 0xFFFF. See https://www.zlib.net/crc_v3.txt.
+#pragma once
+
+#include <stddef.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif  // __cplusplus
+
+// C API for calculating the CRC-16-CCITT of an array of data.
+uint16_t pw_checksum_Crc16Ccitt(const void* data,
+                                size_t size_bytes,
+                                uint16_t initial_value);
+
+#ifdef __cplusplus
+}  // extern "C"
+
+#include <span>
+
+namespace pw::checksum {
+
+// Calculates the CRC-16-CCITT for all data passed to Update.
+class Crc16Ccitt {
+ public:
+  static constexpr uint16_t kInitialValue = 0xFFFF;
+
+  // Calculates the CRC-16-CCITT for the provided data and returns it as a
+  // uint16_t. To update a CRC in multiple calls, use an instance of the
+  // Crc16Ccitt class or pass the previous value as the initial_value argument.
+  static uint16_t Calculate(std::span<const std::byte> data,
+                            uint16_t initial_value = kInitialValue) {
+    return pw_checksum_Crc16Ccitt(
+        data.data(), data.size_bytes(), initial_value);
+  }
+
+  static uint16_t Calculate(std::byte data,
+                            uint16_t initial_value = kInitialValue) {
+    return Calculate(std::span(&data, 1), initial_value);
+  }
+
+  constexpr Crc16Ccitt() : value_(kInitialValue) {}
+
+  void Update(std::span<const std::byte> data) {
+    value_ = Calculate(data, value_);
+  }
+
+  void Update(std::byte data) { Update(std::span(&data, 1)); }
+
+  // Returns the value of the CRC-16-CCITT for all data passed to Update.
+  uint16_t value() const { return value_; }
+
+  // Resets the CRC to the initial value.
+  void clear() { value_ = kInitialValue; }
+
+ private:
+  uint16_t value_;
+};
+
+}  // namespace pw::checksum
+
+#endif  // __cplusplus
diff --git a/pw_checksum/public/pw_checksum/crc32.h b/pw_checksum/public/pw_checksum/crc32.h
index 9068206..a866365 100644
--- a/pw_checksum/public/pw_checksum/crc32.h
+++ b/pw_checksum/public/pw_checksum/crc32.h
@@ -11,6 +11,10 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+
+// CRC-32 (CRC32) implementation with initial value 0xFFFFFFFF. This provides C
+// functions and a C++ class. Use of the C API is discouraged; use the Crc32
+// class whevener possible.
 #pragma once
 
 #include <stddef.h>
@@ -20,20 +24,34 @@
 extern "C" {
 #endif  // __cplusplus
 
-#define PW_CHECKSUM_CRC32_INITIAL_VALUE 0xFFFFFFFFu
+// Value of an empty CRC32. May be serve as the starting CRC32 value for
+// pw_checksum_Crc32Append.
+#define PW_CHECKSUM_EMPTY_CRC32 ~_PW_CHECKSUM_CRC32_INITIAL_STATE
 
-// C API for calculating the CRC32 of an array of data.
+// The initial state for internal CRC32 calculations. Do not use this value
+// directly.
+#define _PW_CHECKSUM_CRC32_INITIAL_STATE 0xFFFFFFFFu
+
+// Internal implementation function for CRC32. Do not call it directly.
+uint32_t _pw_checksum_InternalCrc32(const void* data,
+                                    size_t size_bytes,
+                                    uint32_t state);
+
+// Calculates the CRC32 for the provided data.
+static inline uint32_t pw_checksum_Crc32(const void* data, size_t size_bytes) {
+  return ~_pw_checksum_InternalCrc32(
+      data, size_bytes, _PW_CHECKSUM_CRC32_INITIAL_STATE);
+}
 
 // Updates an existing CRC value. The previous_result must have been returned
-// from a previous CRC32 call; it is not used as the initial value.
-uint32_t pw_ChecksumCrc32Append(const void* data,
-                                size_t size_bytes,
-                                uint32_t previous_result);
-
-// Starts calculating a CRC32 for the provided data.
-static inline uint32_t pw_ChecksumCrc32(const void* data, size_t size_bytes) {
-  return pw_ChecksumCrc32Append(
-      data, size_bytes, ~PW_CHECKSUM_CRC32_INITIAL_VALUE);
+// from a previous CRC32 call.
+static inline uint32_t pw_checksum_Crc32Append(const void* data,
+                                               size_t size_bytes,
+                                               uint32_t previous_result) {
+  // CRC32 values are finalized by inverting the bits. The finalization step
+  // must be undone before appending to a prior CRC32 value, then redone so this
+  // function returns a usable value after each call.
+  return ~_pw_checksum_InternalCrc32(data, size_bytes, ~previous_result);
 }
 
 #ifdef __cplusplus
@@ -43,33 +61,38 @@
 
 namespace pw::checksum {
 
-inline constexpr uint32_t kCrc32InitialValue = PW_CHECKSUM_CRC32_INITIAL_VALUE;
+// Calculates the CRC32 for all data passed to Update.
+//
+// This class is more efficient than the CRC32 C functions since it doesn't
+// finalize the value each time it is appended to.
+class Crc32 {
+ public:
+  // Calculates the CRC32 for the provided data and returns it as a uint32_t.
+  // To update a CRC in multiple pieces, use an instance of the Crc32 class.
+  static uint32_t Calculate(std::span<const std::byte> data) {
+    return pw_checksum_Crc32(data.data(), data.size_bytes());
+  }
 
-// Starts calculating a CRC32 with the provided data. Uses kCrc32InitialValue as
-// the initial value.
-inline uint32_t Crc32(std::span<const std::byte> data) {
-  return pw_ChecksumCrc32(data.data(), data.size_bytes());
-}
+  constexpr Crc32() : state_(kInitialValue) {}
 
-// Updates an existing CRC value. The previous_result must have been returned
-// from a previous CRC32 call; it is not used as the initial value.
-inline uint32_t Crc32(std::span<const std::byte> data,
-                      uint32_t previous_result) {
-  return pw_ChecksumCrc32Append(
-      data.data(), data.size_bytes(), previous_result);
-}
+  void Update(std::span<const std::byte> data) {
+    state_ = _pw_checksum_InternalCrc32(data.data(), data.size(), state_);
+  }
 
-// Starts calculating a CRC32 with the provided byte. Uses kCrc32InitialValue as
-// the initial value.
-inline uint32_t Crc32(std::byte value) {
-  return pw_ChecksumCrc32(&value, sizeof(value));
-}
+  void Update(std::byte data) { Update(std::span(&data, 1)); }
 
-// Updates an existing CRC value. The previous_result must have been returned
-// from a previous CRC32 call; it is not used as the initial value.
-inline uint32_t Crc32(std::byte value, uint32_t previous_result) {
-  return pw_ChecksumCrc32Append(&value, sizeof(value), previous_result);
-}
+  // Returns the value of the CRC32 for all data passed to Update.
+  uint32_t value() const { return ~state_; }
+
+  // Resets the CRC to the initial value.
+  void clear() { state_ = kInitialValue; }
+
+ private:
+  static constexpr uint32_t kInitialValue = _PW_CHECKSUM_CRC32_INITIAL_STATE;
+
+  uint32_t state_;
+};
 
 }  // namespace pw::checksum
+
 #endif  // __cplusplus
diff --git a/pw_cli/BUILD.gn b/pw_cli/BUILD.gn
index 601472c..dd021e8 100644
--- a/pw_cli/BUILD.gn
+++ b/pw_cli/BUILD.gn
@@ -12,10 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_docgen/docs.gni")
+
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
 }
diff --git a/pw_cli/docs.rst b/pw_cli/docs.rst
index fca0fa0..5d03f05 100644
--- a/pw_cli/docs.rst
+++ b/pw_cli/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-cli:
-
-.. default-domain:: python
-
-.. highlight:: sh
+.. _module-pw_cli:
 
 ------
 pw_cli
diff --git a/pw_rpc/test_impl/BUILD.gn b/pw_cli/py/BUILD.gn
similarity index 62%
copy from pw_rpc/test_impl/BUILD.gn
copy to pw_cli/py/BUILD.gn
index 68f88c1..46e803e 100644
--- a/pw_rpc/test_impl/BUILD.gn
+++ b/pw_cli/py/BUILD.gn
@@ -12,19 +12,23 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
-import("$dir_pw_build/target_types.gni")
-import("$dir_pw_unit_test/test.gni")
-config("config") {
-  include_dirs = [ "public_overrides" ]
-  visibility = [ ":*" ]
-}
+import("$dir_pw_build/python.gni")
 
-pw_source_set("test_impl") {
-  public_configs = [ ":config" ]
-  public = [ "public_overrides/pw_rpc/internal/method.h" ]
-  public_deps = [ "../:server_library_deps" ]
-  visibility = [ "..:*" ]
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_cli/__init__.py",
+    "pw_cli/__main__.py",
+    "pw_cli/arguments.py",
+    "pw_cli/branding.py",
+    "pw_cli/color.py",
+    "pw_cli/env.py",
+    "pw_cli/envparse.py",
+    "pw_cli/envparse_test.py",
+    "pw_cli/log.py",
+    "pw_cli/plugins.py",
+    "pw_cli/process.py",
+  ]
 }
diff --git a/pw_cli/py/pw_cli/__init__.py b/pw_cli/py/pw_cli/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_cli/py/pw_cli/__init__.py
diff --git a/pw_cli/py/pw_cli/env.py b/pw_cli/py/pw_cli/env.py
index 4be9f9e..021c98e 100644
--- a/pw_cli/py/pw_cli/env.py
+++ b/pw_cli/py/pw_cli/env.py
@@ -23,7 +23,6 @@
     parser = envparse.EnvironmentParser(prefix='PW_')
 
     parser.add_var('PW_BOOTSTRAP_PYTHON')
-    parser.add_var('PW_CARGO_SETUP', type=envparse.strict_bool, default=False)
     parser.add_var('PW_ENABLE_PRESUBMIT_HOOK_WARNING', default=False)
     parser.add_var('PW_EMOJI', type=envparse.strict_bool, default=False)
     parser.add_var('PW_ENVSETUP')
@@ -35,6 +34,7 @@
                    type=envparse.strict_bool,
                    default=False)
     parser.add_var('PW_ENVIRONMENT_ROOT')
+    parser.add_var('PW_PROJECT_ROOT')
     parser.add_var('PW_ROOT')
     parser.add_var('PW_SKIP_BOOTSTRAP')
     parser.add_var('PW_SUBPROCESS', type=envparse.strict_bool, default=False)
@@ -45,11 +45,18 @@
 
     parser.add_var('PW_DOCTOR_SKIP_CIPD_CHECKS')
 
+    # TODO(pwbug/274) Remove after some transition time. These are no longer
+    # used but may be set by users or downstream projects, or just in currently
+    # active shells.
     parser.add_var('PW_CIPD_PACKAGE_FILES')
     parser.add_var('PW_VIRTUALENV_REQUIREMENTS')
+    parser.add_var('PW_VIRTUALENV_REQUIREMENTS_APPEND_DEFAULT')
     parser.add_var('PW_VIRTUALENV_SETUP_PY_ROOTS')
     parser.add_var('PW_CARGO_PACKAGE_FILES')
+    parser.add_var('PW_CARGO_SETUP', type=envparse.strict_bool, default=False)
+    parser.add_var('PW_VIRTUALENV_REQUIREMENTS_APPEND_DEFAULT')
 
+    parser.add_var('PW_BANNER_FUNC')
     parser.add_var('PW_BRANDING_BANNER')
     parser.add_var('PW_BRANDING_BANNER_COLOR', default='magenta')
 
diff --git a/pw_cli/py/pw_cli/log.py b/pw_cli/py/pw_cli/log.py
index 380ab81..8d9d1f7 100644
--- a/pw_cli/py/pw_cli/log.py
+++ b/pw_cli/py/pw_cli/log.py
@@ -33,8 +33,6 @@
 
 # Shorten all the log levels to 3 characters for column-aligned logs.
 # Color the logs using ANSI codes.
-# pylint: disable=bad-whitespace
-# yapf: disable
 _LOG_LEVELS = (
     LogLevel(logging.CRITICAL, 'bold_red', 'CRT', '☠️ '),
     LogLevel(logging.ERROR,    'red',      'ERR', '❌'),
@@ -42,9 +40,7 @@
     LogLevel(logging.INFO,     'magenta',  'INF', 'ℹ️ '),
     LogLevel(LOGLEVEL_STDOUT,  'cyan',     'OUT', '💬'),
     LogLevel(logging.DEBUG,    'blue',     'DBG', '👾'),
-)
-# yapf: enable
-# pylint: enable=bad-whitespace
+)  # yapf: disable
 
 _LOG = logging.getLogger(__name__)
 _STDERR_HANDLER = logging.StreamHandler()
diff --git a/pw_cli/py/pw_cli/process.py b/pw_cli/py/pw_cli/process.py
index 825dc08..a3e686b 100644
--- a/pw_cli/py/pw_cli/process.py
+++ b/pw_cli/py/pw_cli/process.py
@@ -35,7 +35,8 @@
     """Information about a process executed in run_async."""
     def __init__(self, process: 'asyncio.subprocess.Process',
                  output: Union[bytes, IO[bytes]]):
-        self.returncode = process.returncode
+        assert process.returncode is not None
+        self.returncode: int = process.returncode
         self.pid = process.pid
         self._output = output
 
diff --git a/pw_cli/py/pw_cli/py.typed b/pw_cli/py/pw_cli/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_cli/py/pw_cli/py.typed
diff --git a/pw_cli/py/setup.py b/pw_cli/py/setup.py
index 94df754..19827b3 100644
--- a/pw_cli/py/setup.py
+++ b/pw_cli/py/setup.py
@@ -13,7 +13,7 @@
 # the License.
 """pw_cli"""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='pw_cli',
@@ -22,5 +22,7 @@
     author_email='pigweed-developers@googlegroups.com',
     description='Pigweed swiss-army knife',
     packages=setuptools.find_packages(),
+    package_data={'pw_cli': ['py.typed']},
+    zip_safe=False,
     entry_points={'console_scripts': ['pw = pw_cli.__main__:main']},
 )
diff --git a/pw_containers/BUILD.gn b/pw_containers/BUILD.gn
index b637bd8..a7d79b7 100644
--- a/pw_containers/BUILD.gn
+++ b/pw_containers/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -40,7 +40,7 @@
     "public/pw_containers/internal/intrusive_list_impl.h",
     "public/pw_containers/intrusive_list.h",
   ]
-  deps = [ "$dir_pw_assert" ]
+  deps = [ dir_pw_assert ]
   sources = [ "intrusive_list.cc" ]
 }
 
diff --git a/pw_containers/CMakeLists.txt b/pw_containers/CMakeLists.txt
index 4d9ee9a..3936249 100644
--- a/pw_containers/CMakeLists.txt
+++ b/pw_containers/CMakeLists.txt
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_auto_add_simple_module(pw_containers
   PUBLIC_DEPS
     pw_assert
diff --git a/pw_containers/docs.rst b/pw_containers/docs.rst
index 3561936..dd36fc5 100644
--- a/pw_containers/docs.rst
+++ b/pw_containers/docs.rst
@@ -1,6 +1,4 @@
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_containers:
 
 -------------
 pw_containers
diff --git a/pw_containers/intrusive_list.cc b/pw_containers/intrusive_list.cc
index 0ddfbe8..ba35f61 100644
--- a/pw_containers/intrusive_list.cc
+++ b/pw_containers/intrusive_list.cc
@@ -18,28 +18,39 @@
 
 namespace pw::intrusive_list_impl {
 
+List::Item::~Item() { unlist(); }
+
+void List::Item::unlist(Item* prev) {
+  if (prev == nullptr) {
+    prev = previous();
+  }
+  // Skip over this.
+  prev->next_ = next_;
+
+  // Retain the invariant that unlisted items are self-cycles.
+  next_ = this;
+}
+
+List::Item* List::Item::previous() {
+  // Follow the cycle around to find the previous element; O(N).
+  Item* prev = next_;
+  while (prev->next_ != this) {
+    prev = prev->next_;
+  }
+  return prev;
+}
+
 void List::insert_after(Item* pos, Item& item) {
-  PW_CHECK_PTR_EQ(
-      item.next_,
-      nullptr,
+  PW_CHECK(
+      item.unlisted(),
       "Cannot add an item to a pw::IntrusiveList that is already in a list");
   item.next_ = pos->next_;
   pos->next_ = &item;
 }
 
-void List::erase_after(Item* pos) {
-  Item* const item_to_remove = pos->next_;
-  pos->next_ = item_to_remove->next_;
-  item_to_remove->next_ = nullptr;
-}
+void List::erase_after(Item* pos) { pos->next_->unlist(pos); }
 
-List::Item* List::before_end() noexcept {
-  Item* pos = before_begin();
-  while (pos->next_ != end()) {
-    pos = pos->next_;
-  }
-  return pos;
-}
+List::Item* List::before_end() noexcept { return before_begin()->previous(); }
 
 void List::clear() {
   while (!empty()) {
@@ -54,8 +65,17 @@
       return true;
     }
   }
-
   return false;
 }
 
+size_t List::size() const {
+  size_t total = 0;
+  Item* item = head_.next_;
+  while (item != &head_) {
+    item = item->next_;
+    total++;
+  }
+  return total;
+}
+
 }  // namespace pw::intrusive_list_impl
diff --git a/pw_containers/intrusive_list_test.cc b/pw_containers/intrusive_list_test.cc
index 3d666e4..9f56c1d 100644
--- a/pw_containers/intrusive_list_test.cc
+++ b/pw_containers/intrusive_list_test.cc
@@ -142,24 +142,25 @@
 
 TEST(IntrusiveList, PushOne) {
   constexpr int kMagicValue = 31;
-  IntrusiveList<TestItem> test_items;
   TestItem item1(kMagicValue);
-  test_items.push_back(item1);
-  EXPECT_FALSE(test_items.empty());
-  EXPECT_EQ(test_items.front().GetNumber(), kMagicValue);
+  IntrusiveList<TestItem> list;
+  list.push_back(item1);
+  EXPECT_FALSE(list.empty());
+  EXPECT_EQ(list.front().GetNumber(), kMagicValue);
 }
 
 TEST(IntrusiveList, PushThree) {
-  IntrusiveList<TestItem> test_items;
   TestItem item1(1);
   TestItem item2(2);
   TestItem item3(3);
-  test_items.push_back(item1);
-  test_items.push_back(item2);
-  test_items.push_back(item3);
+
+  IntrusiveList<TestItem> list;
+  list.push_back(item1);
+  list.push_back(item2);
+  list.push_back(item3);
 
   int loop_count = 0;
-  for (auto& test_item : test_items) {
+  for (auto& test_item : list) {
     loop_count++;
     EXPECT_EQ(loop_count, test_item.GetNumber());
   }
@@ -167,35 +168,38 @@
 }
 
 TEST(IntrusiveList, IsEmpty) {
-  IntrusiveList<TestItem> test_items;
-  EXPECT_TRUE(test_items.empty());
-
   TestItem item1(1);
-  test_items.push_back(item1);
-  EXPECT_FALSE(test_items.empty());
+
+  IntrusiveList<TestItem> list;
+  EXPECT_TRUE(list.empty());
+
+  list.push_back(item1);
+  EXPECT_FALSE(list.empty());
 }
 
 TEST(IntrusiveList, InsertAfter) {
+  // Create a test item to insert midway through the list.
   constexpr int kMagicValue = 42;
+  TestItem inserted_item(kMagicValue);
+
+  // Create initial values to fill in the start/end.
   TestItem item_array[20];
-  IntrusiveList<TestItem> test_list;
+
+  IntrusiveList<TestItem> list;
   // Fill the list with TestItem objects that have a value of zero.
   for (size_t i = 0; i < PW_ARRAY_SIZE(item_array); ++i) {
     item_array[i].SetNumber(0);
-    test_list.push_back(item_array[i]);
+    list.push_back(item_array[i]);
   }
 
-  // Create a test item to insert midway through the list.
-  TestItem inserted_item(kMagicValue);
-
-  // Move an iterator to the middle of the list, and then insert the item.
-  auto it = test_list.begin();
+  // Move an iterator to the middle of the list, and then insert the magic item.
+  auto it = list.begin();
   size_t expected_index = 1;  // Expected index is iterator index + 1.
   for (size_t i = 0; i < PW_ARRAY_SIZE(item_array) / 2; ++i) {
     it++;
     expected_index++;
   }
-  it = test_list.insert_after(it, inserted_item);
+  it = list.insert_after(it, inserted_item);
 
   // Ensure the returned iterator from insert_after is the newly inserted
   // element.
@@ -203,7 +207,7 @@
 
   // Ensure the value is in the expected location (index of the iterator + 1).
   size_t i = 0;
-  for (TestItem& item : test_list) {
+  for (TestItem& item : list) {
     if (item.GetNumber() == kMagicValue) {
       EXPECT_EQ(i, expected_index);
     } else {
@@ -218,68 +222,69 @@
 
 TEST(IntrusiveList, PushFront) {
   constexpr int kMagicValue = 42;
+  TestItem pushed_item(kMagicValue);
+
   TestItem item_array[20];
-  IntrusiveList<TestItem> test_list;
+  IntrusiveList<TestItem> list;
   // Fill the list with TestItem objects that have a value of zero.
   for (size_t i = 0; i < PW_ARRAY_SIZE(item_array); ++i) {
     item_array[i].SetNumber(0);
-    test_list.push_back(item_array[i]);
+    list.push_back(item_array[i]);
   }
 
   // Create a test item to push to the front of the list.
-  TestItem pushed_item(kMagicValue);
-  test_list.push_front(pushed_item);
-  EXPECT_EQ(test_list.front().GetNumber(), kMagicValue);
+  list.push_front(pushed_item);
+  EXPECT_EQ(list.front().GetNumber(), kMagicValue);
 }
 
 TEST(IntrusiveList, Clear_Empty) {
-  IntrusiveList<TestItem> test_list;
-  EXPECT_TRUE(test_list.empty());
-  test_list.clear();
-  EXPECT_TRUE(test_list.empty());
+  IntrusiveList<TestItem> list;
+  EXPECT_TRUE(list.empty());
+  list.clear();
+  EXPECT_TRUE(list.empty());
 }
 
 TEST(IntrusiveList, Clear_OneItem) {
-  IntrusiveList<TestItem> test_list;
   TestItem item(42);
-  test_list.push_back(item);
-  EXPECT_FALSE(test_list.empty());
-  test_list.clear();
-  EXPECT_TRUE(test_list.empty());
+  IntrusiveList<TestItem> list;
+  list.push_back(item);
+  EXPECT_FALSE(list.empty());
+  list.clear();
+  EXPECT_TRUE(list.empty());
 }
 
 TEST(IntrusiveList, Clear_TwoItems) {
-  IntrusiveList<TestItem> test_list;
   TestItem item1(42);
   TestItem item2(42);
-  test_list.push_back(item1);
-  test_list.push_back(item2);
-  EXPECT_FALSE(test_list.empty());
-  test_list.clear();
-  EXPECT_TRUE(test_list.empty());
+  IntrusiveList<TestItem> list;
+  list.push_back(item1);
+  list.push_back(item2);
+  EXPECT_FALSE(list.empty());
+  list.clear();
+  EXPECT_TRUE(list.empty());
 }
 
 TEST(IntrusiveList, Clear_ReinsertClearedItems) {
   std::array<TestItem, 20> item_array;
-  IntrusiveList<TestItem> test_list;
-  EXPECT_TRUE(test_list.empty());
-  test_list.clear();
-  EXPECT_TRUE(test_list.empty());
+  IntrusiveList<TestItem> list;
+  EXPECT_TRUE(list.empty());
+  list.clear();
+  EXPECT_TRUE(list.empty());
 
   // Fill the list with TestItem objects.
   for (size_t i = 0; i < item_array.size(); ++i) {
     item_array[i].SetNumber(0);
-    test_list.push_back(item_array[i]);
+    list.push_back(item_array[i]);
   }
 
   // Remove everything.
-  test_list.clear();
-  EXPECT_TRUE(test_list.empty());
+  list.clear();
+  EXPECT_TRUE(list.empty());
 
   // Ensure all the removed elements can still be added back to a list.
   for (size_t i = 0; i < item_array.size(); ++i) {
     item_array[i].SetNumber(0);
-    test_list.push_back(item_array[i]);
+    list.push_back(item_array[i]);
   }
 }
 
@@ -287,64 +292,63 @@
   constexpr int kValue1 = 32;
   constexpr int kValue2 = 4083;
 
-  IntrusiveList<TestItem> test_list;
-  EXPECT_TRUE(test_list.empty());
-
   TestItem item1(kValue1);
   TestItem item2(kValue2);
 
-  test_list.push_front(item2);
-  test_list.push_front(item1);
-  test_list.pop_front();
-  EXPECT_EQ(test_list.front().GetNumber(), kValue2);
-  EXPECT_FALSE(test_list.empty());
-  test_list.pop_front();
-  EXPECT_TRUE(test_list.empty());
+  IntrusiveList<TestItem> list;
+  EXPECT_TRUE(list.empty());
+
+  list.push_front(item2);
+  list.push_front(item1);
+  list.pop_front();
+  EXPECT_EQ(list.front().GetNumber(), kValue2);
+  EXPECT_FALSE(list.empty());
+  list.pop_front();
+  EXPECT_TRUE(list.empty());
 }
 
 TEST(IntrusiveList, PopFrontAndReinsert) {
   constexpr int kValue1 = 32;
   constexpr int kValue2 = 4083;
 
-  IntrusiveList<TestItem> test_list;
-  EXPECT_TRUE(test_list.empty());
-
   TestItem item1(kValue1);
   TestItem item2(kValue2);
 
-  test_list.push_front(item2);
-  test_list.push_front(item1);
-  test_list.pop_front();
-  test_list.push_front(item1);
-  EXPECT_EQ(test_list.front().GetNumber(), kValue1);
+  IntrusiveList<TestItem> list;
+  EXPECT_TRUE(list.empty());
+
+  list.push_front(item2);
+  list.push_front(item1);
+  list.pop_front();
+  list.push_front(item1);
+  EXPECT_EQ(list.front().GetNumber(), kValue1);
 }
 
 TEST(IntrusiveList, ListFront) {
-  IntrusiveList<TestItem> test_items;
-
   TestItem item1(1);
   TestItem item2(0);
   TestItem item3(0xffff);
 
-  test_items.push_back(item1);
-  test_items.push_back(item2);
-  test_items.push_back(item3);
+  IntrusiveList<TestItem> list;
+  list.push_back(item1);
+  list.push_back(item2);
+  list.push_back(item3);
 
-  EXPECT_EQ(&item1, &test_items.front());
-  EXPECT_EQ(&item1, &(*test_items.begin()));
+  EXPECT_EQ(&item1, &list.front());
+  EXPECT_EQ(&item1, &(*list.begin()));
 }
 
 TEST(IntrusiveList, IteratorIncrement) {
   TestItem item_array[20];
-  IntrusiveList<TestItem> test_list;
+  IntrusiveList<TestItem> list;
   for (size_t i = 0; i < PW_ARRAY_SIZE(item_array); ++i) {
     item_array[i].SetNumber(i);
-    test_list.push_back(item_array[i]);
+    list.push_back(item_array[i]);
   }
 
-  auto it = test_list.begin();
+  auto it = list.begin();
   int i = 0;
-  while (it != test_list.end()) {
+  while (it != list.end()) {
     if (i == 0) {
       // Test pre-incrementing on the first element.
       EXPECT_EQ((++it)->GetNumber(), item_array[++i].GetNumber());
@@ -358,12 +362,12 @@
   // For this test, items are checked to be non-zero.
   TestItem item1(1);
   TestItem item2(99);
-  IntrusiveList<TestItem> test_items;
+  IntrusiveList<TestItem> list;
 
-  const IntrusiveList<TestItem>* const_list = &test_items;
+  const IntrusiveList<TestItem>* const_list = &list;
 
-  test_items.push_back(item1);
-  test_items.push_back(item2);
+  list.push_back(item1);
+  list.push_back(item2);
 
   auto it = const_list->begin();
   while (it != const_list->end()) {
@@ -372,18 +376,19 @@
   }
 }
 
-#if NO_COMPILE_TESTS
 // TODO(pwbug/47): These tests should fail to compile, enable when no-compile
 // tests are set up in Pigweed.
+#define NO_COMPILE_TESTS 0
+#if NO_COMPILE_TESTS
 TEST(IntrusiveList, ConstIteratorModify) {
   TestItem item1(1);
   TestItem item2(99);
-  IntrusiveList<TestItem> test_items;
+  IntrusiveList<TestItem> list;
 
-  const IntrusiveList<TestItem>* const_list = &test_items;
+  const IntrusiveList<TestItem>* const_list = &list;
 
-  test_items.push_back(item1);
-  test_items.push_back(item2);
+  list.push_back(item1);
+  list.push_back(item2);
 
   auto it = const_list->begin();
   while (it != const_list->end()) {
@@ -391,13 +396,12 @@
     it++;
   }
 }
-
 #endif  // NO_COMPILE_TESTS
 
 // TODO(pwbug/88): These tests should trigger a CHECK failure. This requires
 // using a testing version of pw_assert.
+#define TESTING_CHECK_FAILURES_IS_SUPPORTED 0
 #if TESTING_CHECK_FAILURES_IS_SUPPORTED
-
 TEST(IntrusiveList, Construct_DuplicateItems) {
   TestItem item(1);
   IntrusiveList<TestItem> list({&item, &item});
@@ -430,7 +434,6 @@
 
   list.push_front(item);
 }
-
 #endif  // TESTING_CHECK_FAILURES_IS_SUPPORTED
 
 TEST(IntrusiveList, EraseAfter_FirstItem) {
@@ -517,5 +520,88 @@
   EXPECT_EQ(list.end(), it);
 }
 
+TEST(IntrusiveList, ItemsRemoveThemselvesFromListsWhenDestructed) {
+  // Create a list with some items it.
+  TestItem a, b, c, d;
+  IntrusiveList<TestItem> list;
+  list.push_back(a);
+  list.push_back(b);
+  list.push_back(c);
+  list.push_back(d);
+
+  // Insert items that will be destructed before the list.
+  {
+    TestItem x, y, z, w;
+    list.push_back(x);
+    list.push_back(z);
+    list.push_front(y);
+    list.push_front(w);
+
+    auto it = list.begin();
+    EXPECT_EQ(&w, &(*it++));
+    EXPECT_EQ(&y, &(*it++));
+    EXPECT_EQ(&a, &(*it++));
+    EXPECT_EQ(&b, &(*it++));
+    EXPECT_EQ(&c, &(*it++));
+    EXPECT_EQ(&d, &(*it++));
+    EXPECT_EQ(&x, &(*it++));
+    EXPECT_EQ(&z, &(*it++));
+    EXPECT_EQ(list.end(), it);
+
+    // Here, x, y, z, w are removed from the list for the destructor.
+  }
+
+  // Ensure we get back our original list.
+  auto it = list.begin();
+  EXPECT_EQ(&a, &(*it++));
+  EXPECT_EQ(&b, &(*it++));
+  EXPECT_EQ(&c, &(*it++));
+  EXPECT_EQ(&d, &(*it++));
+  EXPECT_EQ(list.end(), it);
+}
+
+TEST(IntrusiveList, SizeBasic) {
+  IntrusiveList<TestItem> list;
+  EXPECT_EQ(list.size(), 0u);
+
+  TestItem one(55);
+  list.push_front(one);
+  EXPECT_EQ(list.size(), static_cast<size_t>(1));
+
+  TestItem two(66);
+  list.push_back(two);
+  EXPECT_EQ(list.size(), static_cast<size_t>(2));
+
+  TestItem thr(77);
+  list.push_back(thr);
+  EXPECT_EQ(list.size(), static_cast<size_t>(3));
+}
+
+TEST(IntrusiveList, SizeScoped) {
+  IntrusiveList<TestItem> list;
+  EXPECT_EQ(list.size(), 0u);
+
+  // Add elements in new scopes; verify size on the way in and on the way out.
+  {
+    TestItem one(55);
+    list.push_back(one);
+    EXPECT_EQ(list.size(), static_cast<size_t>(1));
+
+    {
+      TestItem two(66);
+      list.push_back(two);
+      EXPECT_EQ(list.size(), static_cast<size_t>(2));
+      {
+        TestItem thr(77);
+        list.push_back(thr);
+        EXPECT_EQ(list.size(), static_cast<size_t>(3));
+      }
+      EXPECT_EQ(list.size(), static_cast<size_t>(2));
+    }
+    EXPECT_EQ(list.size(), static_cast<size_t>(1));
+  }
+  EXPECT_EQ(list.size(), static_cast<size_t>(0));
+}
+
 }  // namespace
 }  // namespace pw
diff --git a/pw_containers/public/pw_containers/internal/intrusive_list_impl.h b/pw_containers/public/pw_containers/internal/intrusive_list_impl.h
index fe7e07d..13bd448 100644
--- a/pw_containers/public/pw_containers/internal/intrusive_list_impl.h
+++ b/pw_containers/public/pw_containers/internal/intrusive_list_impl.h
@@ -71,7 +71,17 @@
  public:
   class Item {
    protected:
-    constexpr Item() : Item(nullptr) {}
+    constexpr Item() : Item(this) {}
+
+    bool unlisted() const { return this == next_; }
+
+    // Unlink this from the list it is apart of, if any. Specifying prev saves
+    // calling previous(), which requires looping around the cycle.
+    void unlist(Item* prev = nullptr);
+
+    Item* previous();  // Note: O(n) since it loops around the cycle.
+
+    ~Item();
 
    private:
     friend class List;
@@ -81,6 +91,7 @@
 
     constexpr Item(Item* next) : next_(next) {}
 
+    // The next pointer. Unlisted items must be self-cycles (next_ == this).
     Item* next_;
   };
 
@@ -95,8 +106,6 @@
   List(const List&) = delete;
   List& operator=(const List&) = delete;
 
-  ~List() { clear(); }
-
   template <typename Iterator>
   void assign(Iterator first, Iterator last) {
     clear();
@@ -124,6 +133,8 @@
   constexpr Item* end() noexcept { return &head_; }
   constexpr const Item* end() const noexcept { return &head_; }
 
+  size_t size() const;
+
  private:
   template <typename Iterator>
   void AssignFromIterator(Iterator first, Iterator last);
diff --git a/pw_containers/public/pw_containers/intrusive_list.h b/pw_containers/public/pw_containers/intrusive_list.h
index 14be30f..4b6bae4 100644
--- a/pw_containers/public/pw_containers/intrusive_list.h
+++ b/pw_containers/public/pw_containers/intrusive_list.h
@@ -48,6 +48,7 @@
 //   for (auto& test_item : test_items) {
 //     // Do a thing.
 //   }
+//
 template <typename T>
 class IntrusiveList {
  public:
@@ -145,6 +146,9 @@
   }
   const_iterator cend() const noexcept { return end(); }
 
+  // Operation is O(size).
+  size_t size() const { return list_.size(); }
+
  private:
   // Check that T is an Item in a function, since the class T will not be fully
   // defined when the IntrusiveList<T> class is instantiated.
diff --git a/pw_cpu_exception/BUILD.gn b/pw_cpu_exception/BUILD.gn
index 435781d..f2562e2 100644
--- a/pw_cpu_exception/BUILD.gn
+++ b/pw_cpu_exception/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/facade.gni")
 import("$dir_pw_docgen/docs.gni")
 import("backend.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -52,16 +52,14 @@
 
 pw_facade("entry") {
   backend = pw_cpu_exception_ENTRY_BACKEND
-  facade_name = "entry_facade"
   public_configs = [ ":default_config" ]
   public_deps = [ "$dir_pw_preprocessor" ]
-  deps = [ ":handler_facade" ]
+  deps = [ ":handler.facade" ]
   public = [ "public/pw_cpu_exception/entry.h" ]
 }
 
 pw_facade("handler") {
   backend = pw_cpu_exception_HANDLER_BACKEND
-  facade_name = "handler_facade"
   public_configs = [ ":default_config" ]
   public_deps = [
     "$dir_pw_preprocessor",
@@ -77,7 +75,6 @@
 # pw_CpuExceptionState members.
 pw_facade("support") {
   backend = pw_cpu_exception_SUPPORT_BACKEND
-  facade_name = "support_facade"
   public_configs = [ ":default_config" ]
   public_deps = [ "$dir_pw_span" ]
   public = [ "public/pw_cpu_exception/support.h" ]
@@ -85,7 +82,7 @@
 
 pw_source_set("basic_handler") {
   deps = [
-    ":handler_facade",
+    ":handler.facade",
     dir_pw_log,
   ]
   sources = [ "basic_handler.cc" ]
diff --git a/pw_cpu_exception/CMakeLists.txt b/pw_cpu_exception/CMakeLists.txt
index c30ab81..1e134c6 100644
--- a/pw_cpu_exception/CMakeLists.txt
+++ b/pw_cpu_exception/CMakeLists.txt
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_add_facade(pw_cpu_exception
   PUBLIC_DEPS
     pw_preprocessor
diff --git a/pw_cpu_exception/docs.rst b/pw_cpu_exception/docs.rst
index 99ff6ac..ec4892b 100644
--- a/pw_cpu_exception/docs.rst
+++ b/pw_cpu_exception/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-cpu-exception:
-
-.. default-domain:: cpp
-
-.. highlight:: cpp
+.. _module-pw_cpu_exception:
 
 ----------------
 pw_cpu_exception
diff --git a/pw_cpu_exception/public/pw_cpu_exception/support.h b/pw_cpu_exception/public/pw_cpu_exception/support.h
index 5ce1cc5..a078334 100644
--- a/pw_cpu_exception/public/pw_cpu_exception/support.h
+++ b/pw_cpu_exception/public/pw_cpu_exception/support.h
@@ -38,4 +38,7 @@
 void ToString(const pw_CpuExceptionState& cpu_state,
               const std::span<char>& dest);
 
+// Logs captured CPU state using pw_log at PW_LOG_LEVEL_INFO.
+void LogCpuState(const pw_CpuExceptionState& cpu_state);
+
 }  // namespace pw::cpu_exception
diff --git a/pw_cpu_exception_armv7m/BUILD b/pw_cpu_exception_armv7m/BUILD
index 8844338..453b448 100644
--- a/pw_cpu_exception_armv7m/BUILD
+++ b/pw_cpu_exception_armv7m/BUILD
@@ -21,7 +21,9 @@
     srcs = [
         "entry.cc",
         "cpu_state.cc",
+        "proto_dump.cc",
         "public/pw_cpu_exception_armv7m/cpu_state.h",
+        "public/pw_cpu_exception_armv7m/proto_dump.h",
     ],
 )
 
diff --git a/pw_cpu_exception_armv7m/BUILD.gn b/pw_cpu_exception_armv7m/BUILD.gn
index c1206f5..61395e2 100644
--- a/pw_cpu_exception_armv7m/BUILD.gn
+++ b/pw_cpu_exception_armv7m/BUILD.gn
@@ -12,12 +12,14 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
+import("$dir_pw_cpu_exception/backend.gni")
 import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_protobuf_compiler/proto.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -25,19 +27,37 @@
 pw_source_set("support") {
   public_configs = [ ":default_config" ]
   public_deps = [
-    "$dir_pw_cpu_exception:support_facade",
-    "$dir_pw_preprocessor",
-    "$dir_pw_string",
+    "$dir_pw_cpu_exception:support.facade",
+    dir_pw_preprocessor,
+    dir_pw_string,
   ]
+  deps = [ dir_pw_log ]
   public = [ "public/pw_cpu_exception_armv7m/cpu_state.h" ]
   sources = [ "cpu_state.cc" ]
 }
 
+pw_source_set("proto_dump") {
+  public_deps = [
+    ":support",
+    dir_pw_protobuf,
+    dir_pw_status,
+    dir_pw_stream,
+  ]
+  public = [ "public/pw_cpu_exception_armv7m/proto_dump.h" ]
+  deps = [ ":cpu_state_protos.pwpb" ]
+  sources = [ "proto_dump.cc" ]
+}
+
+pw_proto_library("cpu_state_protos") {
+  sources = [ "pw_cpu_exception_armv7m_protos/cpu_state.proto" ]
+}
+
 pw_source_set("pw_cpu_exception_armv7m") {
   public_configs = [ ":default_config" ]
   public_deps = [
+    ":proto_dump",
     ":support",
-    "$dir_pw_cpu_exception:entry_facade",
+    "$dir_pw_cpu_exception:entry.facade",
     "$dir_pw_cpu_exception:handler",
     "$dir_pw_preprocessor",
   ]
@@ -45,6 +65,7 @@
 }
 
 pw_test_group("tests") {
+  enable_if = pw_cpu_exception_ENTRY_BACKEND == dir_pw_cpu_exception_armv7m
   tests = [ ":cpu_exception_entry_test" ]
 }
 
diff --git a/pw_cpu_exception_armv7m/CMakeLists.txt b/pw_cpu_exception_armv7m/CMakeLists.txt
index 7f1bf23..55d3a01 100644
--- a/pw_cpu_exception_armv7m/CMakeLists.txt
+++ b/pw_cpu_exception_armv7m/CMakeLists.txt
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_auto_add_simple_module(pw_cpu_exception_armv7m
   IMPLEMENTS_FACADE
     pw_cpu_exception
diff --git a/pw_cpu_exception_armv7m/cpu_state.cc b/pw_cpu_exception_armv7m/cpu_state.cc
index 466a216..bec977e 100644
--- a/pw_cpu_exception_armv7m/cpu_state.cc
+++ b/pw_cpu_exception_armv7m/cpu_state.cc
@@ -19,6 +19,7 @@
 #include <span>
 
 #include "pw_cpu_exception/support.h"
+#include "pw_log/log.h"
 #include "pw_string/string_builder.h"
 
 namespace pw::cpu_exception {
@@ -50,6 +51,8 @@
   _PW_FORMAT_REGISTER(extended, mmfar);
   _PW_FORMAT_REGISTER(extended, bfar);
   _PW_FORMAT_REGISTER(extended, icsr);
+  _PW_FORMAT_REGISTER(extended, hfsr);
+  _PW_FORMAT_REGISTER(extended, shcsr);
   _PW_FORMAT_REGISTER(extended, control);
 
   // General purpose registers.
@@ -70,4 +73,47 @@
 #undef _PW_FORMAT_REGISTER
 }
 
+// Using this function adds approximately 100 bytes to binary size.
+void LogCpuState(const pw_CpuExceptionState& cpu_state) {
+  const ArmV7mFaultRegisters& base = cpu_state.base;
+  const ArmV7mExtraRegisters& extended = cpu_state.extended;
+
+  PW_LOG_INFO("Captured CPU state:");
+
+#define _PW_LOG_REGISTER(state_section, name) \
+  PW_LOG_INFO("  %-10s 0x%08" PRIx32, #name, state_section.name)
+
+  // Other registers.
+  _PW_LOG_REGISTER(base, pc);
+  _PW_LOG_REGISTER(base, lr);
+  _PW_LOG_REGISTER(base, psr);
+  _PW_LOG_REGISTER(extended, msp);
+  _PW_LOG_REGISTER(extended, psp);
+  _PW_LOG_REGISTER(extended, exc_return);
+  _PW_LOG_REGISTER(extended, cfsr);
+  _PW_LOG_REGISTER(extended, mmfar);
+  _PW_LOG_REGISTER(extended, bfar);
+  _PW_LOG_REGISTER(extended, icsr);
+  _PW_LOG_REGISTER(extended, hfsr);
+  _PW_LOG_REGISTER(extended, shcsr);
+  _PW_LOG_REGISTER(extended, control);
+
+  // General purpose registers.
+  _PW_LOG_REGISTER(base, r0);
+  _PW_LOG_REGISTER(base, r1);
+  _PW_LOG_REGISTER(base, r2);
+  _PW_LOG_REGISTER(base, r3);
+  _PW_LOG_REGISTER(extended, r4);
+  _PW_LOG_REGISTER(extended, r5);
+  _PW_LOG_REGISTER(extended, r6);
+  _PW_LOG_REGISTER(extended, r7);
+  _PW_LOG_REGISTER(extended, r8);
+  _PW_LOG_REGISTER(extended, r9);
+  _PW_LOG_REGISTER(extended, r10);
+  _PW_LOG_REGISTER(extended, r11);
+  _PW_LOG_REGISTER(base, r12);
+
+#undef _PW_LOG_REGISTER
+}
+
 }  // namespace pw::cpu_exception
diff --git a/pw_cpu_exception_armv7m/docs.rst b/pw_cpu_exception_armv7m/docs.rst
index d55339f..9c79720 100644
--- a/pw_cpu_exception_armv7m/docs.rst
+++ b/pw_cpu_exception_armv7m/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-cpu-exception-armv7m:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_cpu_exception_armv7m:
 
 -----------------------
 pw_cpu_exception_armv7m
diff --git a/pw_cpu_exception_armv7m/entry.cc b/pw_cpu_exception_armv7m/entry.cc
index a247dee..77d5c43 100644
--- a/pw_cpu_exception_armv7m/entry.cc
+++ b/pw_cpu_exception_armv7m/entry.cc
@@ -39,14 +39,18 @@
 constexpr uint32_t kExcReturnBasicFrameMask = (0x1u << 4);
 
 // Memory mapped registers. (ARMv7-M Section B3.2.2, Table B3-4)
-volatile uint32_t& arm_v7m_icsr =
-    *reinterpret_cast<volatile uint32_t*>(0xE000ED04u);
 volatile uint32_t& arm_v7m_cfsr =
     *reinterpret_cast<volatile uint32_t*>(0xE000ED28u);
 volatile uint32_t& arm_v7m_mmfar =
     *reinterpret_cast<volatile uint32_t*>(0xE000ED34u);
 volatile uint32_t& arm_v7m_bfar =
     *reinterpret_cast<volatile uint32_t*>(0xE000ED38u);
+volatile uint32_t& arm_v7m_icsr =
+    *reinterpret_cast<volatile uint32_t*>(0xE000ED04u);
+volatile uint32_t& arm_v7m_hfsr =
+    *reinterpret_cast<volatile uint32_t*>(0xE000ED2Cu);
+volatile uint32_t& arm_v7m_shcsr =
+    *reinterpret_cast<volatile uint32_t*>(0xE000ED24u);
 
 // If the CPU fails to capture some registers, the captured struct members will
 // be populated with this value. The only registers that this value should be
@@ -167,9 +171,11 @@
 PW_USED void pw_PackageAndHandleCpuException(pw_CpuExceptionState* cpu_state) {
   // Capture memory mapped registers.
   cpu_state->extended.cfsr = arm_v7m_cfsr;
-  cpu_state->extended.icsr = arm_v7m_icsr;
-  cpu_state->extended.bfar = arm_v7m_bfar;
   cpu_state->extended.mmfar = arm_v7m_mmfar;
+  cpu_state->extended.bfar = arm_v7m_bfar;
+  cpu_state->extended.icsr = arm_v7m_icsr;
+  cpu_state->extended.hfsr = arm_v7m_hfsr;
+  cpu_state->extended.shcsr = arm_v7m_shcsr;
 
   // CPU may have automatically pushed state to the program stack. If it did,
   // the values can be copied into in the pw_CpuExceptionState struct that is
diff --git a/pw_cpu_exception_armv7m/exception_entry_test.cc b/pw_cpu_exception_armv7m/exception_entry_test.cc
index 3000833..7cf2226 100644
--- a/pw_cpu_exception_armv7m/exception_entry_test.cc
+++ b/pw_cpu_exception_armv7m/exception_entry_test.cc
@@ -570,6 +570,7 @@
 
   // Clear HFSR forced (nested) hard fault mask if set. This will only be
   // set by the nested fault test.
+  EXPECT_EQ(state->extended.hfsr, arm_v7m_hfsr);
   if (arm_v7m_hfsr & kForcedHardfaultMask) {
     arm_v7m_hfsr = kForcedHardfaultMask;
   }
@@ -607,6 +608,8 @@
     return;
   }
 
+  EXPECT_EQ(state->extended.shcsr, arm_v7m_shcsr);
+
   // If an unexpected exception occurred, just enter an infinite loop.
   while (true) {
   }
diff --git a/pw_cpu_exception_armv7m/proto_dump.cc b/pw_cpu_exception_armv7m/proto_dump.cc
new file mode 100644
index 0000000..086652d
--- /dev/null
+++ b/pw_cpu_exception_armv7m/proto_dump.cc
@@ -0,0 +1,64 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#include "pw_cpu_exception_armv7m/cpu_state.h"
+#include "pw_cpu_exception_armv7m_protos/cpu_state.pwpb.h"
+#include "pw_preprocessor/compiler.h"
+#include "pw_protobuf/encoder.h"
+
+namespace pw::cpu_exception {
+
+Status DumpCpuStateProto(protobuf::Encoder& dest,
+                         const pw_CpuExceptionState& cpu_state) {
+  armv7m::ArmV7mCpuState::Encoder state_encoder(&dest);
+
+  // Special and mem-mapped registers.
+  state_encoder.WritePc(cpu_state.base.pc);
+  state_encoder.WriteLr(cpu_state.base.lr);
+  state_encoder.WritePsr(cpu_state.base.psr);
+  state_encoder.WriteMsp(cpu_state.extended.msp);
+  state_encoder.WritePsp(cpu_state.extended.psp);
+  state_encoder.WriteExcReturn(cpu_state.extended.exc_return);
+  state_encoder.WriteCfsr(cpu_state.extended.cfsr);
+  state_encoder.WriteMmfar(cpu_state.extended.mmfar);
+  state_encoder.WriteBfar(cpu_state.extended.bfar);
+  state_encoder.WriteIcsr(cpu_state.extended.icsr);
+  state_encoder.WriteHfsr(cpu_state.extended.hfsr);
+  state_encoder.WriteShcsr(cpu_state.extended.shcsr);
+  state_encoder.WriteControl(cpu_state.extended.control);
+
+  // General purpose registers.
+  state_encoder.WriteR0(cpu_state.base.r0);
+  state_encoder.WriteR1(cpu_state.base.r1);
+  state_encoder.WriteR2(cpu_state.base.r2);
+  state_encoder.WriteR3(cpu_state.base.r3);
+  state_encoder.WriteR4(cpu_state.extended.r4);
+  state_encoder.WriteR5(cpu_state.extended.r5);
+  state_encoder.WriteR6(cpu_state.extended.r6);
+  state_encoder.WriteR7(cpu_state.extended.r7);
+  state_encoder.WriteR8(cpu_state.extended.r8);
+  state_encoder.WriteR9(cpu_state.extended.r9);
+  state_encoder.WriteR10(cpu_state.extended.r10);
+  state_encoder.WriteR11(cpu_state.extended.r11);
+
+  // If the encode buffer was exhausted in an earlier write, it will be
+  // reflected here.
+  Status status = state_encoder.WriteR12(cpu_state.base.r12);
+  if (!status.ok()) {
+    return status == Status::ResourceExhausted() ? Status::ResourceExhausted()
+                                                 : Status::Unknown();
+  }
+  return Status::Ok();
+}
+
+}  // namespace pw::cpu_exception
diff --git a/pw_cpu_exception_armv7m/public/pw_cpu_exception_armv7m/cpu_state.h b/pw_cpu_exception_armv7m/public/pw_cpu_exception_armv7m/cpu_state.h
index f38b949..23158c8 100644
--- a/pw_cpu_exception_armv7m/public/pw_cpu_exception_armv7m/cpu_state.h
+++ b/pw_cpu_exception_armv7m/public/pw_cpu_exception_armv7m/cpu_state.h
@@ -68,6 +68,8 @@
   uint32_t mmfar;
   uint32_t bfar;
   uint32_t icsr;
+  uint32_t hfsr;
+  uint32_t shcsr;
   // Special registers.
   uint32_t exc_return;
   uint32_t msp;
diff --git a/pw_cpu_exception_armv7m/public/pw_cpu_exception_armv7m/proto_dump.h b/pw_cpu_exception_armv7m/public/pw_cpu_exception_armv7m/proto_dump.h
new file mode 100644
index 0000000..b4d99a2
--- /dev/null
+++ b/pw_cpu_exception_armv7m/public/pw_cpu_exception_armv7m/proto_dump.h
@@ -0,0 +1,33 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_cpu_exception_armv7m/cpu_state.h"
+#include "pw_protobuf/encoder.h"
+#include "pw_status/status.h"
+
+namespace pw::cpu_exception {
+
+// Dumps the cpu state struct as a proto (defined in
+// pw_cpu_exception_armv7m_protos/cpu_state.proto). The final proto is up to 144
+// bytes in size, so ensure your encoder is properly sized.
+//
+// Returns:
+//   OK - Entire proto was written to the encoder.
+//   RESOURCE_EXHAUSTED - Insufficient space to encode proto.
+//   UNKNOWN - Some other proto encoding error occurred.
+Status DumpCpuStateProto(protobuf::Encoder& dest,
+                         const pw_CpuExceptionState& cpu_state);
+
+}  // namespace pw::cpu_exception
diff --git a/pw_cpu_exception_armv7m/pw_cpu_exception_armv7m_protos/cpu_state.proto b/pw_cpu_exception_armv7m/pw_cpu_exception_armv7m_protos/cpu_state.proto
new file mode 100644
index 0000000..a557d0e
--- /dev/null
+++ b/pw_cpu_exception_armv7m/pw_cpu_exception_armv7m_protos/cpu_state.proto
@@ -0,0 +1,49 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+syntax = "proto2";
+
+package pw.cpu_exception.armv7m;
+
+message ArmV7mCpuState {
+  optional uint32 pc = 1;
+  optional uint32 lr = 2;
+  optional uint32 psr = 3;
+  optional uint32 msp = 4;
+  optional uint32 psp = 5;
+  optional uint32 exc_return = 6;
+  optional uint32 cfsr = 7;
+  optional uint32 mmfar = 8;
+  optional uint32 bfar = 9;
+  optional uint32 icsr = 10;
+  optional uint32 hfsr = 25;
+  optional uint32 shcsr = 26;
+  optional uint32 control = 11;
+
+  // General purpose registers.
+  optional uint32 r0 = 12;
+  optional uint32 r1 = 13;
+  optional uint32 r2 = 14;
+  optional uint32 r3 = 15;
+  optional uint32 r4 = 16;
+  optional uint32 r5 = 17;
+  optional uint32 r6 = 18;
+  optional uint32 r7 = 19;
+  optional uint32 r8 = 20;
+  optional uint32 r9 = 21;
+  optional uint32 r10 = 22;
+  optional uint32 r11 = 23;
+  optional uint32 r12 = 24;
+
+  // Next tag: 27
+}
diff --git a/pw_docgen/BUILD.gn b/pw_docgen/BUILD.gn
index 43d6db4..0aaa8cd 100644
--- a/pw_docgen/BUILD.gn
+++ b/pw_docgen/BUILD.gn
@@ -12,10 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("docs.gni")
+
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
 }
diff --git a/pw_docgen/docs.gni b/pw_docgen/docs.gni
index 194c4a9..a6354bb 100644
--- a/pw_docgen/docs.gni
+++ b/pw_docgen/docs.gni
@@ -12,11 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/input_group.gni")
-import("$dir_pw_build/python_script.gni")
+import("$dir_pw_build/python_action.gni")
+
 declare_args() {
   # Whether or not the current target should build docs.
   pw_docgen_BUILD_DOCS = false
@@ -107,8 +107,8 @@
   _script_args += rebase_path(invoker.sources)
 
   if (pw_docgen_BUILD_DOCS) {
-    pw_python_script(target_name) {
-      script = "$dir_pw_docgen/py/docgen.py"
+    pw_python_action(target_name) {
+      script = "$dir_pw_docgen/py/pw_docgen/docgen.py"
       args = _script_args
       deps = [ ":$_metadata_file_target" ]
       inputs = [ invoker.conf ]
diff --git a/pw_docgen/docs.rst b/pw_docgen/docs.rst
index 10600b2..041893e 100644
--- a/pw_docgen/docs.rst
+++ b/pw_docgen/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-docgen:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_docgen:
 
 ---------
 pw_docgen
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_docgen/py/BUILD.gn
similarity index 73%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_docgen/py/BUILD.gn
index 3c3be32..28d4734 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_docgen/py/BUILD.gn
@@ -12,8 +12,14 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_docgen/__init__.py",
+    "pw_docgen/docgen.py",
+  ]
 }
diff --git a/pw_docgen/py/pw_docgen/__init__.py b/pw_docgen/py/pw_docgen/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_docgen/py/pw_docgen/__init__.py
diff --git a/pw_docgen/py/docgen.py b/pw_docgen/py/pw_docgen/docgen.py
similarity index 100%
rename from pw_docgen/py/docgen.py
rename to pw_docgen/py/pw_docgen/docgen.py
diff --git a/pw_docgen/py/pw_docgen/py.typed b/pw_docgen/py/pw_docgen/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_docgen/py/pw_docgen/py.typed
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_docgen/py/setup.py
similarity index 61%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_docgen/py/setup.py
index 3c3be32..6953c15 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_docgen/py/setup.py
@@ -11,9 +11,17 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations under
 # the License.
+"""pw_docgen"""
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+import setuptools  # type: ignore
+
+setuptools.setup(
+    name='pw_docgen',
+    version='0.0.1',
+    author='Pigweed Authors',
+    author_email='pigweed-developers@googlegroups.com',
+    description='Generate Sphinx documentation',
+    packages=setuptools.find_packages(),
+    package_data={'pw_docgen': ['py.typed']},
+    zip_safe=False,
+)
diff --git a/pw_doctor/BUILD.gn b/pw_doctor/BUILD.gn
index 4ad8f50..0969672 100644
--- a/pw_doctor/BUILD.gn
+++ b/pw_doctor/BUILD.gn
@@ -12,10 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_docgen/docs.gni")
+
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
 }
diff --git a/pw_doctor/docs.rst b/pw_doctor/docs.rst
index a9ddd99..51087a5 100644
--- a/pw_doctor/docs.rst
+++ b/pw_doctor/docs.rst
@@ -1,4 +1,4 @@
-.. _chapter-pw-doctor:
+.. _module-pw_doctor:
 
 ---------
 pw_doctor
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_doctor/py/BUILD.gn
similarity index 73%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_doctor/py/BUILD.gn
index 3c3be32..0408128 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_doctor/py/BUILD.gn
@@ -12,8 +12,14 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_doctor/__init__.py",
+    "pw_doctor/doctor.py",
+  ]
 }
diff --git a/pw_doctor/py/pw_doctor/__init__.py b/pw_doctor/py/pw_doctor/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_doctor/py/pw_doctor/__init__.py
diff --git a/pw_doctor/py/pw_doctor/py.typed b/pw_doctor/py/pw_doctor/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_doctor/py/pw_doctor/py.typed
diff --git a/pw_doctor/py/setup.py b/pw_doctor/py/setup.py
index 752f5e2..bc80827 100644
--- a/pw_doctor/py/setup.py
+++ b/pw_doctor/py/setup.py
@@ -13,7 +13,7 @@
 # the License.
 """The pw_doctor package."""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='pw_doctor',
@@ -22,4 +22,6 @@
     author_email='pigweed-developers@googlegroups.com',
     description='Environment check script for Pigweed',
     packages=setuptools.find_packages(),
+    package_data={'pw_doctor': ['py.typed']},
+    zip_safe=False,
 )
diff --git a/pw_env_setup/BUILD.gn b/pw_env_setup/BUILD.gn
index 601472c..2232988 100644
--- a/pw_env_setup/BUILD.gn
+++ b/pw_env_setup/BUILD.gn
@@ -12,10 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_docgen/docs.gni")
+
 pw_doc_group("docs") {
+  inputs = [ "doc_resources/pw_env_setup_output.png" ]
   sources = [ "docs.rst" ]
 }
diff --git a/pw_env_setup/doc_resources/pw_env_setup_output.png b/pw_env_setup/doc_resources/pw_env_setup_output.png
new file mode 100644
index 0000000..204ed18
--- /dev/null
+++ b/pw_env_setup/doc_resources/pw_env_setup_output.png
Binary files differ
diff --git a/pw_env_setup/docs.rst b/pw_env_setup/docs.rst
index ee72bcb..018424e 100644
--- a/pw_env_setup/docs.rst
+++ b/pw_env_setup/docs.rst
@@ -1,4 +1,4 @@
-.. _chapter-pw-env_setup:
+.. _module-pw_env_setup:
 
 ------------
 pw_env_setup
@@ -30,6 +30,17 @@
 
 .. _CIPD: https://github.com/luci/luci-go/tree/master/cipd
 
+Users interact with  ``pw_env_setup`` with two commands: ``. bootstrap.sh`` and
+``. activate.sh``. The bootstrap command always pulls down the current versions
+of CIPD packages and sets up the Python virtual environment. The activate
+command reinitializes a previously configured environment, and if none is found,
+runs bootstrap.
+
+.. note::
+  On Windows the scripts used to set up the environment are ``bootstrap.bat``
+  and ``activate.bat``. For simplicity they will be referred to with the ``.sh``
+  endings unless the distinction is relevant.
+
 By default packages will be installed in a ``.environment`` folder within the
 checkout root, and CIPD will cache files in ``$HOME/.cipd-cache-dir``. These
 paths can be overridden by setting ``PW_ENVIRONMENT_ROOT`` and
@@ -43,25 +54,188 @@
 
 .. _send us a note: pigweed@googlegroups.com
 
-Projects using Pigweed can leverage ``pw_env_setup`` to install their own
-dependencies. The following environment variables are now used to pass options
-into pw_env_setup.
+==================================
+Using pw_env_setup in your project
+==================================
 
-    * ``PW_CIPD_PACKAGE_FILES``
-    * ``PW_VIRTUALENV_REQUIREMENTS``
-    * ``PW_VIRTUALENV_SETUP_PY_ROOTS``
-    * ``PW_CARGO_PACKAGE_FILES``
+Downstream Projects Using Pigweed's Packages
+********************************************
 
-Each of these variables can contain multiple entries separated by ``:``
-(or ``;`` on Windows) like the ``PATH`` environment variable. However, they
-will also be interpreted as globs, so
-``PW_VIRTUALENV_REQUIREMENTS="/foo/bar/*/requirements.txt"`` is perfectly
-valid. They should be full paths.
+Projects using Pigweed can leverage ``pw_env_setup`` to install Pigweed's
+dependencies or their own dependencies. Projects that only want to use Pigweed's
+dependencies without modifying them can just source Pigweed's ``bootstrap.sh``
+and ``activate.sh`` scripts.
 
-Projects depending on Pigweed should set these variables and then invoke
-Pigweed's ``bootstrap.sh`` (or ``bootstrap.bat``), which will add to each of
-these variables before invoking ``pw_env_setup``. Users wanting additional
-setup can set these variables in their shell init files. Pigweed will add to
-these variables and will not remove any existing values. At the end of
-Pigweed's bootstrap process, it will reset these variables to their initial
-values.
+An example of what your project's `bootstrap.sh` could look like is below. This
+assumes `bootstrap.sh` is at the top level of your repository.
+
+.. code-block:: bash
+
+  # Do not include a "#!" line, this must be sourced and not executed.
+
+  # This assumes the user is sourcing this file from it's parent directory. See
+  # below for a more flexible way to handle this.
+  PROJ_SETUP_SCRIPT_PATH="$(pwd)/bootstrap.sh"
+
+  export PW_PROJECT_ROOT="$(_python_abspath "$(dirname "$PROJ_SETUP_SCRIPT_PATH")")"
+
+  # You may wish to check if the user is attempting to execute this script
+  # instead of sourcing it. See below for an example of how to handle that
+  # situation.
+
+  # Source Pigweed's bootstrap utility script.
+  # Using '.' instead of 'source' for POSIX compatibility. Since users don't use
+  # dash directly, using 'source' in most documentation so users don't get
+  # confused and try to `./bootstrap.sh`.
+  . "$PW_PROJECT_ROOT/third_party/pigweed/pw_env_setup/util.sh"
+
+  pw_check_root "$PW_ROOT"
+  _PW_ACTUAL_ENVIRONMENT_ROOT="$(pw_get_env_root)"
+  export _PW_ACTUAL_ENVIRONMENT_ROOT
+  SETUP_SH="$_PW_ACTUAL_ENVIRONMENT_ROOT/activate.sh"
+  pw_bootstrap --args...  # See below for details about args.
+  pw_finalize bootstrap "$SETUP_SH"
+
+User-Friendliness
+-----------------
+
+You may wish to allow sourcing `bootstrap.sh` from a different directory. In
+that case you'll need the following at the top of `bootstrap.sh`.
+
+.. code-block:: bash
+
+  _python_abspath () {
+    python -c "import os.path; print(os.path.abspath('$@'))"
+  }
+
+  # Use this code from Pigweed's bootstrap to find the path to this script when
+  # sourced. This should work with common shells. PW_CHECKOUT_ROOT is only used in
+  # presubmit tests with strange setups, and can be omitted if you're not using
+  # Pigweed's automated testing infrastructure.
+  if test -n "$PW_CHECKOUT_ROOT"; then
+    PROJ_SETUP_SCRIPT_PATH="$(_python_abspath "$PW_CHECKOUT_ROOT/bootstrap.sh")"
+    unset PW_CHECKOUT_ROOT
+  # Shell: bash.
+  elif test -n "$BASH"; then
+    PROJ_SETUP_SCRIPT_PATH="$(_python_abspath "$BASH_SOURCE")"
+  # Shell: zsh.
+  elif test -n "$ZSH_NAME"; then
+    PROJ_SETUP_SCRIPT_PATH="$(_python_abspath "${(%):-%N}")"
+  # Shell: dash.
+  elif test ${0##*/} = dash; then
+    PROJ_SETUP_SCRIPT_PATH="$(_python_abspath \
+      "$(lsof -p $$ -Fn0 | tail -1 | sed 's#^[^/]*##;')")"
+  # If everything else fails, try $0. It could work.
+  else
+    PROJ_SETUP_SCRIPT_PATH="$(_python_abspath "$0")"
+  fi
+
+You may also wish to check if the user is attempting to execute `bootstrap.sh`
+instead of sourcing it. Executing `bootstrap.sh` would download everything
+required for the environment, but cannot modify the environment of the parent
+process. To check for this add the following.
+
+.. code-block:: bash
+
+  # Check if this file is being executed or sourced.
+  _pw_sourced=0
+  # If not running in Pigweed's automated testing infrastructure the
+  # SWARMING_BOT_ID check is unnecessary.
+  if [ -n "$SWARMING_BOT_ID" ]; then
+    # If set we're running on swarming and don't need this check.
+    _pw_sourced=1
+  elif [ -n "$ZSH_EVAL_CONTEXT" ]; then
+    case $ZSH_EVAL_CONTEXT in *:file) _pw_sourced=1;; esac
+  elif [ -n "$KSH_VERSION" ]; then
+    [ "$(cd $(dirname -- $0) && pwd -P)/$(basename -- $0)" != \
+      "$(cd $(dirname -- ${.sh.file}) && pwd -P)/$(basename -- ${.sh.file})" ] \
+      && _pw_sourced=1
+  elif [ -n "$BASH_VERSION" ]; then
+    (return 0 2>/dev/null) && _pw_sourced=1
+  else  # All other shells: examine $0 for known shell binary filenames
+    # Detects `sh` and `dash`; add additional shell filenames as needed.
+    case ${0##*/} in sh|dash) _pw_sourced=1;; esac
+  fi
+
+  _pw_eval_sourced "$_pw_sourced"
+
+Downstream Projects Using Different Packages
+********************************************
+
+Projects depending on Pigweed but using additional or different packages should
+copy the Pigweed `sample project`'s ``bootstrap.sh`` and update the call to
+``pw_bootstrap``. Search for "downstream" for other places that may require
+changes, like setting the ``PW_ROOT`` and ``PW_PROJECT_ROOT`` environment
+variables. Relevant arguments to ``pw_bootstrap`` are listed here.
+
+.. _sample project: https://pigweed.googlesource.com/pigweed/sample_project/+/master
+
+``--use-pigweed-defaults``
+  Use Pigweed default values in addition to the other switches.
+
+``--cipd-package-file path/to/packages.json``
+  CIPD package file. JSON file consisting of a list of dictionaries with "path"
+  and "tags" keys, where "tags" is a list of strings.
+
+``--virtualenv-requierements path/to/requirements.txt``
+  Pip requirements file. Compiled with pip-compile.
+
+``--virtualenv-gn-target path/to/directory#package-install-target``
+  Target for installing Python packages, and the directory from which it must be
+  run. Example for Pigweed: ``third_party/pigweed#:python.install`` (assuming
+  Pigweed is included in the project at ``third_party/pigweed``). Downstream
+  projects will need to create targets to install their packages and either
+  choose a subset of Pigweed packages or use
+  ``third_party/pigweed#:python.install`` to install all Pigweed packages.
+
+``--cargo-package-file path/to/packages.txt``
+  Rust cargo packages to install. Lines with package name and version separated
+  by a space. Has no effect without ``--enable-cargo``.
+
+``--enable-cargo``
+  Enable cargo package installation.
+
+An example of the changed env_setup.py line is below.
+
+.. code-block:: bash
+
+  pw_bootstrap \
+    --shell-file "$SETUP_SH" \
+    --install-dir "$_PW_ACTUAL_ENVIRONMENT_ROOT" \
+    --use-pigweed-defaults \
+    --cipd-package-file "$PW_PROJECT_ROOT/path/to/cipd.json" \
+    --virtualenv-gn-target "$PW_PROJECT_ROOT#:python.install"
+
+Projects wanting some of the Pigweed environment packages but not all of them
+should not use ``--use-pigweed-defaults`` and must manually add the references
+to Pigweed default packages through the other arguments. The arguments below
+are identical to using ``--use-pigweed-defaults``.
+
+.. code-block:: bash
+
+  --cipd-package-file
+  "$PW_ROOT/pw_env_setup/py/pw_env_setup/cipd_setup/pigweed.json"
+  --cipd-package-file
+  "$PW_ROOT/pw_env_setup/py/pw_env_setup/cipd_setup/luci.json"
+  --virtualenv-requirements
+  "$PW_ROOT/pw_env_setup/py/pw_env_setup/virtualenv_setup/requirements.txt"
+  --virtualenv-gn-target
+  "$PW_ROOT#:python.install"
+  --cargo-package-file
+  "$PW_ROOT/pw_env_setup/py/pw_env_setup/cargo_setup/packages.txt"
+
+Implementation
+**************
+
+The environment is set up by installing CIPD and Python packages in
+``PW_ENVIRONMENT_ROOT`` or ``<checkout>/.environment``, and saving modifications
+to environment variables in setup scripts in those directories. To support
+multiple operating systems this is done in an operating system-agnostic manner
+and then written into operating system-specific files to be sourced now and in
+the future when running ``activate.sh`` instead of ``bootstrap.sh``. In the
+future these could be extended to C shell and PowerShell. A logical mapping of
+high-level commands to system-specific initialization files is shown below.
+
+.. image:: doc_resources/pw_env_setup_output.png
+   :alt: Mapping of high-level commands to system-specific commands.
+   :align: left
diff --git a/pw_env_setup/py/BUILD.gn b/pw_env_setup/py/BUILD.gn
new file mode 100644
index 0000000..2ad8b67
--- /dev/null
+++ b/pw_env_setup/py/BUILD.gn
@@ -0,0 +1,37 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_env_setup/__init__.py",
+    "pw_env_setup/cargo_setup/__init__.py",
+    "pw_env_setup/cipd_setup/__init__.py",
+    "pw_env_setup/cipd_setup/update.py",
+    "pw_env_setup/cipd_setup/wrapper.py",
+    "pw_env_setup/colors.py",
+    "pw_env_setup/env_setup.py",
+    "pw_env_setup/environment.py",
+    "pw_env_setup/environment_test.py",
+    "pw_env_setup/spinner.py",
+    "pw_env_setup/virtualenv_setup/__init__.py",
+    "pw_env_setup/virtualenv_setup/__main__.py",
+    "pw_env_setup/virtualenv_setup/install.py",
+    "pw_env_setup/windows_env_start.py",
+  ]
+}
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/kythe.json b/pw_env_setup/py/pw_env_setup/cipd_setup/kythe.json
new file mode 100644
index 0000000..d1e02c0
--- /dev/null
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/kythe.json
@@ -0,0 +1,14 @@
+[
+  {
+    "path": "fuchsia/third_party/kythe",
+    "tags": [
+      "version:0.0.46"
+    ]
+  },
+  {
+    "path": "fuchsia/third_party/kythe-libs/${os=linux}-${arch=amd64}",
+    "tags": [
+      "version:2020-08-05"
+    ]
+  }
+]
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/pigweed.json b/pw_env_setup/py/pw_env_setup/cipd_setup/pigweed.json
index 29fab9c..846262e 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/pigweed.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/pigweed.json
@@ -1,8 +1,8 @@
 [
   {
-    "path": "gn/gn/${os}-${arch=amd64}",
+    "path": "gn/gn/${os}-${arch=amd64,arm64}",
     "tags": [
-      "git_revision:bca0631828a5564a81628680cc6151bc418224f2"
+      "git_revision:fe9e5db149b0cc78e03511d52c452039dbf5ac1b"
     ]
   },
   {
@@ -44,14 +44,14 @@
   },
   {
     "_comment": "TODO(pwbug/70) Put clang in cipd for Windows.",
-    "path": "fuchsia/third_party/clang/${os=linux,mac}-${arch=amd64}",
+    "path": "fuchsia/third_party/clang/${os=linux,mac}-${arch}",
     "tags": [
-      "git_revision:8e058feae0b0d07cd86257f0aa3154acfa887fe0"
+      "git_revision:8af160b0b8ca8102b9490a287244af75727872f5"
     ]
   },
   {
     "_comment": "When bumping the minor version (e.g., to 3.9.x) also update env_setup/virtualenv/init.py to check for the new version.",
-    "path": "infra/3pp/tools/cpython3/${os}-${arch=amd64}",
+    "path": "infra/3pp/tools/cpython3/${os}-${arch}",
     "tags": [
       "version:3.8.2.chromium.10"
     ]
@@ -91,25 +91,33 @@
   {
     "path": "pigweed/host_tools/${os}-${arch=amd64}",
     "tags": [
-      "git_revision:02a39ed73e06e73d3cce2980c39cafd65e153a59"
+      "git_revision:00e773eafb943b25643d2e32b0d0af2f032426b3"
     ]
   },
   {
     "path": "infra/goma/client/${os}-${arch=amd64}",
     "tags": [
-      "git_revision:528b6ee53e3d0aebe5b0a49b6513896fdf89d531"
+      "git_revision:b3d6d03fbdc1d0cfcdae70db30830a08eece4ae1"
     ]
   },
   {
-    "path": "fuchsia/third_party/qemu/${os=linux,mac}-${arch=amd64}",
+    "path": "fuchsia/third_party/qemu/${os=linux,mac}-${arch}",
     "tags": [
-      "git_revision:487fc6fd6e173571c9842a45dd8071a23f24aaf3-2"
+      "git_revision:841f14e74f5af7886cf49cfcd4fed264911ae58e"
     ]
   },
   {
     "path": "fuchsia/third_party/kythe",
+    "subdir": "kythe",
     "tags": [
-      "version:0.0.45"
+      "version:1.0.1"
+    ]
+  },
+  {
+    "path": "fuchsia/third_party/kythe-libs/${os=linux}-${arch=amd64}",
+    "subdir": "kythe",
+    "tags": [
+      "version:2020-08-05"
     ]
   }
 ]
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/update.py b/pw_env_setup/py/pw_env_setup/cipd_setup/update.py
index cbb791e..84c48c8 100755
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/update.py
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/update.py
@@ -140,6 +140,7 @@
                    '$ParanoidMode CheckPresence\n')
 
         for entry in data:
+            outs.write('@Subdir {}\n'.format(entry.get('subdir', '')))
             outs.write('{} {}\n'.format(entry['path'],
                                         ' '.join(entry['tags'])))
 
@@ -192,6 +193,7 @@
             '-ensure-file', ensure_file,
             '-root', install_dir,
             '-log-level', 'warning',
+            '-cache-dir', cache_dir,
             '-max-threads', '0',  # 0 means use CPU count.
         ]  # yapf: disable
 
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/wrapper.py b/pw_env_setup/py/pw_env_setup/cipd_setup/wrapper.py
index 787347a..572f4ab 100755
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/wrapper.py
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/wrapper.py
@@ -28,16 +28,17 @@
 import ssl
 import subprocess
 import sys
+import base64
 
 try:
-    import httplib
+    import httplib  # type: ignore
 except ImportError:
-    import http.client as httplib  # type: ignore
+    import http.client as httplib  # type: ignore[no-redef]
 
 try:
-    import urlparse  # Python 2.
+    import urlparse  # type: ignore
 except ImportError:
-    import urllib.parse as urlparse  # type: ignore
+    import urllib.parse as urlparse  # type: ignore[no-redef]
 
 try:
     SCRIPT_DIR = os.path.dirname(__file__)
@@ -145,6 +146,29 @@
                                                    DIGESTS_FILE))
 
 
+def https_connect_with_proxy(target_url):
+    """Create HTTPSConnection with proxy support."""
+
+    proxy_env = os.environ.get('HTTPS_PROXY') or os.environ.get('https_proxy')
+    if proxy_env in (None, ''):
+        conn = httplib.HTTPSConnection(target_url)
+        return conn
+
+    url = urlparse.urlparse(proxy_env)
+    conn = httplib.HTTPSConnection(url.hostname, url.port)
+    headers = {}
+    if url.username and url.password:
+        auth = '%s:%s' % (url.username, url.password)
+        py_version = sys.version_info.major
+        if py_version >= 3:
+            headers['Proxy-Authorization'] = 'Basic ' + str(
+                base64.b64encode(auth.encode()).decode())
+        else:
+            headers['Proxy-Authorization'] = 'Basic ' + base64.b64encode(auth)
+    conn.set_tunnel(target_url, 443, headers)
+    return conn
+
+
 def client_bytes():
     """Pull down the CIPD client and return it as a bytes object.
 
@@ -157,7 +181,7 @@
         version = ins.read().strip()
 
     try:
-        conn = httplib.HTTPSConnection(CIPD_HOST)
+        conn = https_connect_with_proxy(CIPD_HOST)
     except AttributeError:
         print('=' * 70)
         print('''
@@ -212,7 +236,7 @@
             location = res.getheader('location')
             url = urlparse.urlparse(location)
             if url.netloc != conn.host:
-                conn = httplib.HTTPSConnection(url.netloc)
+                conn = https_connect_with_proxy(url.netloc)
             path = '{}?{}'.format(url.path, url.query)
 
         # Some kind of error in this response.
diff --git a/pw_env_setup/py/pw_env_setup/env_setup.py b/pw_env_setup/py/pw_env_setup/env_setup.py
index e6c70b6..a03fdf4 100755
--- a/pw_env_setup/py/pw_env_setup/env_setup.py
+++ b/pw_env_setup/py/pw_env_setup/env_setup.py
@@ -73,6 +73,7 @@
 from pw_env_setup import environment
 from pw_env_setup import spinner
 from pw_env_setup import virtualenv_setup
+from pw_env_setup import windows_env_start
 
 
 # TODO(pwbug/67, pwbug/68) switch to shutil.which().
@@ -117,7 +118,7 @@
 
 
 class _Result:
-    class Status:  # pylint: disable=too-few-public-methods
+    class Status:
         DONE = 'done'
         SKIPPED = 'skipped'
         FAILED = 'failed'
@@ -136,8 +137,7 @@
         return self._messages
 
 
-def _get_env(varname):
-    globs = os.environ.get(varname, '').split(os.pathsep)
+def _process_globs(globs):
     unique_globs = []
     for pat in globs:
         if pat and pat not in unique_globs:
@@ -150,12 +150,11 @@
             matches = glob.glob(pat)
             if not matches:
                 warnings.append(
-                    'warning: pattern "{}" in {} matched 0 files'.format(
-                        pat, varname))
+                    'warning: pattern "{}" matched 0 files'.format(pat))
             files.extend(matches)
 
-    if not files:
-        warnings.append('warning: variable {} matched 0 files'.format(varname))
+    if globs and not files:
+        warnings.append('warning: matched 0 total files')
 
     return files, warnings
 
@@ -169,12 +168,16 @@
 
 # TODO(mohrr) remove disable=useless-object-inheritance once in Python 3.
 # pylint: disable=useless-object-inheritance
+# pylint: disable=too-many-instance-attributes
+# pylint: disable=too-many-arguments
 class EnvSetup(object):
     """Run environment setup for Pigweed."""
     def __init__(self, pw_root, cipd_cache_dir, shell_file, quiet, install_dir,
-                 *args, **kwargs):
-        super(EnvSetup, self).__init__(*args, **kwargs)
+                 use_pigweed_defaults, cipd_package_file, virtualenv_root,
+                 virtualenv_requirements, virtualenv_gn_target,
+                 cargo_package_file, enable_cargo, json_file, project_root):
         self._env = environment.Environment()
+        self._project_root = project_root
         self._pw_root = pw_root
         self._setup_root = os.path.join(pw_root, 'pw_env_setup', 'py',
                                         'pw_env_setup')
@@ -183,6 +186,8 @@
         self._is_windows = os.name == 'nt'
         self._quiet = quiet
         self._install_dir = install_dir
+        self._virtualenv_root = (virtualenv_root
+                                 or os.path.join(install_dir, 'pigweed-venv'))
 
         if os.path.isfile(shell_file):
             os.unlink(shell_file)
@@ -190,9 +195,45 @@
         if isinstance(self._pw_root, bytes) and bytes != str:
             self._pw_root = self._pw_root.decode()
 
-        # No need to set PW_ROOT or _PW_ACTUAL_ENVIRONMENT_ROOT, that will be
-        # done by bootstrap.sh and bootstrap.bat for both bootstrap and
-        # activate.
+        self._cipd_package_file = []
+        self._virtualenv_requirements = []
+        self._virtualenv_gn_targets = []
+        self._cargo_package_file = []
+        self._enable_cargo = enable_cargo
+
+        self._json_file = json_file
+
+        setup_root = os.path.join(pw_root, 'pw_env_setup', 'py',
+                                  'pw_env_setup')
+
+        # TODO(pwbug/67, pwbug/68) Investigate pulling these files into an
+        # oxidized env setup executable instead of referring to them in the
+        # source tree. Note that this could be error-prone because users expect
+        # changes to the files in the source tree to affect bootstrap.
+        if use_pigweed_defaults:
+            # If updating this section make sure to update
+            # $PW_ROOT/pw_env_setup/docs.rst as well.
+            self._cipd_package_file.append(
+                os.path.join(setup_root, 'cipd_setup', 'pigweed.json'))
+            self._cipd_package_file.append(
+                os.path.join(setup_root, 'cipd_setup', 'luci.json'))
+            self._virtualenv_requirements.append(
+                os.path.join(setup_root, 'virtualenv_setup',
+                             'requirements.txt'))
+            self._virtualenv_gn_targets.append(
+                virtualenv_setup.GnTarget(
+                    '{}#:python.install'.format(pw_root)))
+            self._cargo_package_file.append(
+                os.path.join(setup_root, 'cargo_setup', 'packages.txt'))
+
+        self._cipd_package_file.extend(cipd_package_file)
+        self._virtualenv_requirements.extend(virtualenv_requirements)
+        self._virtualenv_gn_targets.extend(virtualenv_gn_target)
+        self._cargo_package_file.extend(cargo_package_file)
+
+        self._env.set('PW_PROJECT_ROOT', project_root)
+        self._env.set('PW_ROOT', pw_root)
+        self._env.set('_PW_ACTUAL_ENVIRONMENT_ROOT', install_dir)
         self._env.add_replacement('_PW_ACTUAL_ENVIRONMENT_ROOT', install_dir)
         self._env.add_replacement('PW_ROOT', pw_root)
 
@@ -208,7 +249,10 @@
     def setup(self):
         """Runs each of the env_setup steps."""
 
-        enable_colors()
+        if os.name == 'nt':
+            windows_env_start.print_banner(bootstrap=True, no_shell_file=False)
+        else:
+            enable_colors()
 
         steps = [
             ('CIPD package manager', self.cipd),
@@ -217,9 +261,12 @@
         ]
 
         # TODO(pwbug/63): Add a Windows version of cargo to CIPD.
-        if not self._is_windows and os.environ.get('PW_CARGO_SETUP', ''):
+        if not self._is_windows and self._enable_cargo:
             steps.append(("Rust cargo", self.cargo))
 
+        if self._is_windows:
+            steps.append(("Windows scripts", self.win_scripts))
+
         self._log(
             Color.bold('Downloading and installing packages into local '
                        'source directory:\n'))
@@ -272,7 +319,7 @@
         self._log('')
         self._env.echo('')
 
-        self._env.hash()
+        self._env.finalize()
 
         self._env.echo(Color.bold('Sanity checking the environment:'))
         self._env.echo()
@@ -290,6 +337,12 @@
         with open(self._shell_file, 'w') as outs:
             self._env.write(outs)
 
+        deactivate = os.path.join(
+            self._install_dir,
+            'deactivate{}'.format(os.path.splitext(self._shell_file)[1]))
+        with open(deactivate, 'w') as outs:
+            self._env.write_deactivate(outs)
+
         config = {
             # Skipping sysname and nodename in os.uname(). nodename could change
             # based on the current network. sysname won't change, but is
@@ -303,6 +356,10 @@
             outs.write(
                 json.dumps(config, indent=4, separators=(',', ': ')) + '\n')
 
+        if self._json_file is not None:
+            with open(self._json_file, 'w') as outs:
+                self._env.json(outs)
+
         return 0
 
     def cipd(self):
@@ -310,7 +367,7 @@
 
         cipd_client = cipd_wrapper.init(install_dir, silent=True)
 
-        package_files, glob_warnings = _get_env('PW_CIPD_PACKAGE_FILES')
+        package_files, glob_warnings = _process_globs(self._cipd_package_file)
         result = result_func(glob_warnings)
 
         if not package_files:
@@ -328,13 +385,9 @@
     def virtualenv(self):
         """Setup virtualenv."""
 
-        venv_path = os.path.join(self._install_dir, 'python3-env')
-
-        requirements, req_glob_warnings = _get_env(
-            'PW_VIRTUALENV_REQUIREMENTS')
-        setup_py_roots, setup_glob_warnings = _get_env(
-            'PW_VIRTUALENV_SETUP_PY_ROOTS')
-        result = result_func(req_glob_warnings + setup_glob_warnings)
+        requirements, req_glob_warnings = _process_globs(
+            self._virtualenv_requirements)
+        result = result_func(req_glob_warnings)
 
         orig_python3 = _which('python3')
         with self._env():
@@ -352,14 +405,17 @@
                 shutil.copyfile(new_python3, python3_copy)
             new_python3 = python3_copy
 
-        if not requirements and not setup_py_roots:
+        if not requirements and not self._virtualenv_gn_targets:
             return result(_Result.Status.SKIPPED)
 
-        if not virtualenv_setup.install(venv_path=venv_path,
-                                        requirements=requirements,
-                                        setup_py_roots=setup_py_roots,
-                                        python=new_python3,
-                                        env=self._env):
+        if not virtualenv_setup.install(
+                project_root=self._project_root,
+                venv_path=self._virtualenv_root,
+                requirements=requirements,
+                gn_targets=self._virtualenv_gn_targets,
+                python=new_python3,
+                env=self._env,
+        ):
             return result(_Result.Status.FAILED)
 
         return result(_Result.Status.DONE)
@@ -372,18 +428,17 @@
         self._env.prepend('PATH', os.path.join(host_dir, 'host_tools'))
         return _Result(_Result.Status.DONE)
 
-    def cargo(self):
-        if not os.environ.get('PW_CARGO_SETUP', ''):
-            return _Result(
-                _Result.Status.SKIPPED,
-                '    Note: Re-run bootstrap with PW_CARGO_SETUP=1 set '
-                'in your environment',
-                '          to enable Rust. (Rust is usually not needed.)',
-            )
+    def win_scripts(self):
+        # These scripts act as a compatibility layer for windows.
+        env_setup_dir = os.path.join(self._pw_root, 'pw_env_setup')
+        self._env.prepend('PATH', os.path.join(env_setup_dir,
+                                               'windows_scripts'))
+        return _Result(_Result.Status.DONE)
 
+    def cargo(self):
         install_dir = os.path.join(self._install_dir, 'cargo')
 
-        package_files, glob_warnings = _get_env('PW_CARGO_PACKAGE_FILES')
+        package_files, glob_warnings = _process_globs(self._cargo_package_file)
         result = result_func(glob_warnings)
 
         if not package_files:
@@ -410,12 +465,21 @@
                     stderr=outs).strip()
         except subprocess.CalledProcessError:
             pw_root = None
+
     parser.add_argument(
         '--pw-root',
         default=pw_root,
         required=not pw_root,
     )
 
+    project_root = os.environ.get('PW_PROJECT_ROOT', None) or pw_root
+
+    parser.add_argument(
+        '--project-root',
+        default=project_root,
+        required=not project_root,
+    )
+
     parser.add_argument(
         '--cipd-cache-dir',
         default=os.environ.get('CIPD_CACHE_DIR',
@@ -441,11 +505,88 @@
         required=True,
     )
 
-    return parser.parse_args(argv)
+    parser.add_argument(
+        '--use-pigweed-defaults',
+        help='Use Pigweed default values in addition to the given environment '
+        'variables.',
+        action='store_true',
+    )
+
+    parser.add_argument(
+        '--cipd-package-file',
+        help='CIPD package file. JSON file consisting of a list of dicts with '
+        '"path" and "tags" keys, where "tags" a list of str.',
+        default=[],
+        action='append',
+    )
+
+    parser.add_argument(
+        '--virtualenv-requirements',
+        help='Pip requirements file. Compiled with pip-compile.',
+        default=[],
+        action='append',
+    )
+
+    parser.add_argument(
+        '--virtualenv-gn-target',
+        help=('GN targets that build and install Python packages. Format: '
+              "path/to/gn_root#target"),
+        default=[],
+        action='append',
+        type=virtualenv_setup.GnTarget,
+    )
+
+    parser.add_argument(
+        '--virtualenv-root',
+        help=('Root of virtualenv directory. Default: '
+              '<install_dir>/pigweed-venv'),
+        default=None,
+    )
+
+    parser.add_argument(
+        '--cargo-package-file',
+        help='Rust cargo packages to install. Lines with package name and '
+        'version separated by a space.',
+        default=[],
+        action='append',
+    )
+
+    parser.add_argument(
+        '--enable-cargo',
+        help='Enable cargo installation.',
+        action='store_true',
+    )
+
+    parser.add_argument(
+        '--json-file',
+        help='Dump environment variable operations to a JSON file.',
+        default=None,
+    )
+
+    args = parser.parse_args(argv)
+
+    one_required = (
+        'use_pigweed_defaults',
+        'cipd_package_file',
+        'virtualenv_requirements',
+        'virtualenv_gn_target',
+        'cargo_package_file',
+    )
+
+    if not any(getattr(args, x) for x in one_required):
+        parser.error('At least one of ({}) is required'.format(', '.join(
+            '"--{}"'.format(x.replace('_', '-')) for x in one_required)))
+
+    return args
 
 
 def main():
-    return EnvSetup(**vars(parse())).setup()
+    try:
+        return EnvSetup(**vars(parse())).setup()
+    except subprocess.CalledProcessError as err:
+        print()
+        print(err.output)
+        raise
 
 
 if __name__ == '__main__':
diff --git a/pw_env_setup/py/pw_env_setup/environment.py b/pw_env_setup/py/pw_env_setup/environment.py
index 57b3fca..3e4c9b2 100644
--- a/pw_env_setup/py/pw_env_setup/environment.py
+++ b/pw_env_setup/py/pw_env_setup/environment.py
@@ -14,9 +14,22 @@
 """Stores the environment changes necessary for Pigweed."""
 
 import contextlib
+import json
 import os
 import re
 
+# The order here is important. On Python 2 we want StringIO.StringIO and not
+# io.StringIO. On Python 3 there is no StringIO module so we want io.StringIO.
+# Not using six because six is not a standard package we can expect to have
+# installed in the system Python.
+try:
+    from StringIO import StringIO  # type: ignore
+except ImportError:
+    from io import StringIO
+
+# Disable super() warnings since this file must be Python 2 compatible.
+# pylint: disable=super-with-arguments
+
 # goto label written to the end of Windows batch files for exiting a script.
 _SCRIPT_END_LABEL = '_pw_end'
 
@@ -46,12 +59,20 @@
 
 
 class _Action(object):  # pylint: disable=useless-object-inheritance
-    def unapply(self, env, orig_env):  # pylint: disable=no-self-use
-        del env, orig_env  # Only used in _VariableAction and subclasses.
+    def unapply(self, env, orig_env):
+        pass
+
+    def json(self, data):
+        pass
+
+    def write_deactivate(self,
+                         outs,
+                         windows=(os.name == 'nt'),
+                         replacements=()):
+        pass
 
 
 class _VariableAction(_Action):
-    # pylint: disable=redefined-builtin,too-few-public-methods
     # pylint: disable=keyword-arg-before-vararg
     def __init__(self, name, value, allow_empty_values=False, *args, **kwargs):
         super(_VariableAction, self).__init__(*args, **kwargs)
@@ -64,7 +85,7 @@
     def _check(self):
         try:
             # In python2, unicode is a distinct type.
-            valid_types = (str, unicode)  # pylint: disable=undefined-variable
+            valid_types = (str, unicode)
         except NameError:
             valid_types = (str, )
 
@@ -118,9 +139,23 @@
             outs.write('{name}="{value}"\nexport {name}\n'.format(
                 name=self.name, value=value))
 
+    def write_deactivate(self,
+                         outs,
+                         windows=(os.name == 'nt'),
+                         replacements=()):
+        del replacements  # Unused.
+
+        if windows:
+            outs.write('set {name}=\n'.format(name=self.name))
+        else:
+            outs.write('unset {name}\n'.format(name=self.name))
+
     def apply(self, env):
         env[self.name] = self.value
 
+    def json(self, data):
+        data['set'][self.name] = self.value
+
 
 class Clear(_VariableAction):
     """Remove a variable from the environment."""
@@ -140,6 +175,24 @@
         if self.name in env:
             del env[self.name]
 
+    def json(self, data):
+        data['set'][self.name] = None
+
+
+def _initialize_path_like_variable(data, name):
+    default = {'append': [], 'prepend': [], 'remove': []}
+    data['modify'].setdefault(name, default)
+
+
+def _remove_value_from_path(variable, value, pathsep):
+    return ('{variable}="$(echo "${variable}"'
+            ' | sed "s|{pathsep}{value}{pathsep}|{pathsep}|g;"'
+            ' | sed "s|^{value}{pathsep}||g;"'
+            ' | sed "s|{pathsep}{value}$||g;"'
+            ')"\nexport {variable}\n'.format(variable=variable,
+                                             value=value,
+                                             pathsep=pathsep))
+
 
 class Remove(_VariableAction):
     """Remove a value from a PATH-like variable."""
@@ -163,15 +216,10 @@
             #              name=self.name, value=value, pathsep=self._pathsep))
 
         else:
-            outs.write('# Remove \n#   {value}\n# from\n#   {name}\n# before '
-                       'adding it back.\n'
-                       '{name}="$(echo "${name}"'
-                       ' | sed "s|{pathsep}{value}{pathsep}|{pathsep}|g;"'
-                       ' | sed "s|^{value}{pathsep}||g;"'
-                       ' | sed "s|{pathsep}{value}$||g;"'
-                       ')"\nexport {name}\n'.format(name=self.name,
-                                                    value=value,
-                                                    pathsep=self._pathsep))
+            outs.write('# Remove \n#   {value}\n# from\n#   {value}\n# before '
+                       'adding it back.\n')
+            outs.write(_remove_value_from_path(self.name, value,
+                                               self._pathsep))
 
     def apply(self, env):
         env[self.name] = env[self.name].replace(
@@ -179,6 +227,14 @@
         env[self.name] = env[self.name].replace(
             '{}{}'.format(self._pathsep, self.value), '')
 
+    def json(self, data):
+        _initialize_path_like_variable(data, self.name)
+        data['modify'][self.name]['remove'].append(self.value)
+        if self.value in data['modify'][self.name]['append']:
+            data['modify'][self.name]['append'].remove(self.value)
+        if self.value in data['modify'][self.name]['prepend']:
+            data['modify'][self.name]['prepend'].remove(self.value)
+
 
 class BadVariableValue(ValueError):
     pass
@@ -209,6 +265,18 @@
             outs.write('{name}="{value}"\nexport {name}\n'.format(
                 name=self.name, value=value))
 
+    def write_deactivate(self,
+                         outs,
+                         windows=(os.name == 'nt'),
+                         replacements=()):
+        value = self.value
+        for var, replacement in replacements:
+            if var != self.name:
+                value = value.replace(replacement, _var_form(var, windows))
+
+        outs.write(
+            _remove_value_from_path(self.name, value, self._join.pathsep))
+
     def apply(self, env):
         env[self.name] = self._join(self.value, env.get(self.name, ''))
 
@@ -216,6 +284,12 @@
         super(Prepend, self)._check()
         _append_prepend_check(self)
 
+    def json(self, data):
+        _initialize_path_like_variable(data, self.name)
+        data['modify'][self.name]['prepend'].append(self.value)
+        if self.value in data['modify'][self.name]['remove']:
+            data['modify'][self.name]['remove'].remove(self.value)
+
 
 class Append(_VariableAction):
     """Append a value to a PATH-like variable. (Uncommon, see Prepend.)"""
@@ -237,6 +311,18 @@
             outs.write('{name}="{value}"\nexport {name}\n'.format(
                 name=self.name, value=value))
 
+    def write_deactivate(self,
+                         outs,
+                         windows=(os.name == 'nt'),
+                         replacements=()):
+        value = self.value
+        for var, replacement in replacements:
+            if var != self.name:
+                value = value.replace(replacement, _var_form(var, windows))
+
+        outs.write(
+            _remove_value_from_path(self.name, value, self._join.pathsep))
+
     def apply(self, env):
         env[self.name] = self._join(env.get(self.name, ''), self.value)
 
@@ -244,6 +330,12 @@
         super(Append, self)._check()
         _append_prepend_check(self)
 
+    def json(self, data):
+        _initialize_path_like_variable(data, self.name)
+        data['modify'][self.name]['append'].append(self.value)
+        if self.value in data['modify'][self.name]['remove']:
+            data['modify'][self.name]['remove'].remove(self.value)
+
 
 class BadEchoValue(ValueError):
     pass
@@ -280,8 +372,8 @@
                 outs.write('  echo -n "{}"\n'.format(self.value))
             outs.write('fi\n')
 
-    def apply(self, env):  # pylint: disable=no-self-use
-        del env  # Unused.
+    def apply(self, env):
+        pass
 
 
 class Comment(_Action):
@@ -296,8 +388,8 @@
         for line in self.value.splitlines():
             outs.write('{} {}\n'.format(comment_char, line))
 
-    def apply(self, env):  # pylint: disable=no-self-use
-        del env  # Unused.
+    def apply(self, env):
+        pass
 
 
 class Command(_Action):
@@ -323,8 +415,8 @@
             # Assume failing command produced relevant output.
             outs.write('if [ "$?" -ne 0 ]; then\n  return 1\nfi\n')
 
-    def apply(self, env):  # pylint: disable=no-self-use
-        del env  # Unused.
+    def apply(self, env):
+        pass
 
 
 class BlankLine(_Action):
@@ -337,8 +429,29 @@
         del replacements, windows  # Unused.
         outs.write('\n')
 
-    def apply(self, env):  # pylint: disable=no-self-use
-        del env  # Unused.
+    def apply(self, env):
+        pass
+
+
+class Function(_Action):
+    def __init__(self, name, body, *args, **kwargs):
+        super(Function, self).__init__(*args, **kwargs)
+        self._name = name
+        self._body = body
+
+    def write(self, outs, windows=(os.name == 'nt'), replacements=()):
+        del replacements  # Unused.
+        if windows:
+            return
+
+        outs.write("""
+{name}() {{
+{body}
+}}
+        """.strip().format(name=self._name, body=self._body))
+
+    def apply(self, env):
+        pass
 
 
 class Hash(_Action):
@@ -361,8 +474,18 @@
 fi
 ''')
 
-    def apply(self, env):  # pylint: disable=no-self-use
-        del env  # Unused.
+    def apply(self, env):
+        pass
+
+
+class Join(object):  # pylint: disable=useless-object-inheritance
+    def __init__(self, pathsep=os.pathsep):
+        self.pathsep = pathsep
+
+    def __call__(self, *args):
+        if len(args) == 1 and isinstance(args[0], (list, tuple)):
+            args = args[0]
+        return self.pathsep.join(args)
 
 
 # TODO(mohrr) remove disable=useless-object-inheritance once in Python 3.
@@ -383,11 +506,8 @@
         self._windows = windows
         self._allcaps = allcaps
         self._replacements = []
-
-    def _join(self, *args):
-        if len(args) == 1 and isinstance(args[0], (list, tuple)):
-            args = args[0]
-        return self._pathsep.join(args)
+        self._join = Join(pathsep)
+        self._finalized = False
 
     def add_replacement(self, variable, value=None):
         self._replacements.append((variable, value))
@@ -407,31 +527,34 @@
 
     def set(self, name, value):
         """Set a variable."""
+        assert not self._finalized
         name = self.normalize_key(name)
         self._actions.append(Set(name, value))
         self._blankline()
 
     def clear(self, name):
         """Remove a variable."""
+        assert not self._finalized
         name = self.normalize_key(name)
         self._actions.append(Clear(name))
         self._blankline()
 
     def _remove(self, name, value):
         """Remove a value from a variable."""
-
+        assert not self._finalized
         name = self.normalize_key(name)
         if self.get(name, None):
             self._actions.append(Remove(name, value, self._pathsep))
 
     def remove(self, name, value):
         """Remove a value from a PATH-like variable."""
+        assert not self._finalized
         self._remove(name, value)
         self._blankline()
 
     def append(self, name, value):
         """Add a value to a PATH-like variable. Rarely used, see prepend()."""
-
+        assert not self._finalized
         name = self.normalize_key(name)
         if self.get(name, None):
             self._remove(name, value)
@@ -442,7 +565,7 @@
 
     def prepend(self, name, value):
         """Add a value to the beginning of a PATH-like variable."""
-
+        assert not self._finalized
         name = self.normalize_key(name)
         if self.get(name, None):
             self._remove(name, value)
@@ -453,30 +576,46 @@
 
     def echo(self, value='', newline=True):
         """Echo a value to the terminal."""
-
+        # echo() deliberately ignores self._finalized.
         self._actions.append(Echo(value, newline))
         if value:
             self._blankline()
 
     def comment(self, comment):
         """Add a comment to the init script."""
+        # comment() deliberately ignores self._finalized.
         self._actions.append(Comment(comment))
         self._blankline()
 
     def command(self, command, exit_on_error=True):
         """Run a command."""
-
+        # command() deliberately ignores self._finalized.
         self._actions.append(Command(command, exit_on_error=exit_on_error))
         self._blankline()
 
+    def function(self, name, body):
+        """Define a function."""
+        assert not self._finalized
+        self._actions.append(Command(name, body))
+        self._blankline()
+
     def _blankline(self):
         self._actions.append(BlankLine())
 
-    def hash(self):
-        """If required by the shell rehash the PATH variable."""
+    def finalize(self):
+        """Run cleanup at the end of environment setup."""
+        assert not self._finalized
+        self._finalized = True
         self._actions.append(Hash())
         self._blankline()
 
+        if not self._windows:
+            buf = StringIO()
+            for action in self._actions:
+                action.write_deactivate(buf, windows=self._windows)
+            self._actions.append(Function('_pw_deactivate', buf.getvalue()))
+            self._blankline()
+
     def write(self, outs):
         """Writes a shell init script to outs."""
         if self._windows:
@@ -495,6 +634,27 @@
         if self._windows:
             outs.write(':{}\n'.format(_SCRIPT_END_LABEL))
 
+    def json(self, outs):
+        data = {
+            'modify': {},
+            'set': {},
+        }
+
+        for action in self._actions:
+            action.json(data)
+
+        json.dump(data, outs, indent=4, separators=(',', ': '))
+        outs.write('\n')
+
+    def write_deactivate(self, outs):
+        if self._windows:
+            outs.write('@echo off\n')
+
+        for action in reversed(self._actions):
+            action.write_deactivate(outs,
+                                    windows=self._windows,
+                                    replacements=())
+
     @contextlib.contextmanager
     def __call__(self, export=True):
         """Set environment as if this was written to a file and sourced.
diff --git a/pw_env_setup/py/pw_env_setup/environment_test.py b/pw_env_setup/py/pw_env_setup/environment_test.py
index a7d7c3a..d062d55 100644
--- a/pw_env_setup/py/pw_env_setup/environment_test.py
+++ b/pw_env_setup/py/pw_env_setup/environment_test.py
@@ -33,6 +33,8 @@
 
 from pw_env_setup import environment
 
+# pylint: disable=super-with-arguments
+
 
 class WrittenEnvFailure(Exception):
     pass
diff --git a/pw_env_setup/py/pw_env_setup/py.typed b/pw_env_setup/py/pw_env_setup/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_env_setup/py/pw_env_setup/py.typed
diff --git a/pw_env_setup/py/pw_env_setup/spinner.py b/pw_env_setup/py/pw_env_setup/spinner.py
index fd6ea9c..5060395 100644
--- a/pw_env_setup/py/pw_env_setup/spinner.py
+++ b/pw_env_setup/py/pw_env_setup/spinner.py
@@ -21,8 +21,7 @@
 
 class Spinner(object):  # pylint: disable=useless-object-inheritance
     """Spinner!"""
-    def __init__(self, *args, **kwargs):
-        super(Spinner, self).__init__(*args, **kwargs)
+    def __init__(self):
         self._done = None
         self._thread = None
 
diff --git a/pw_env_setup/py/pw_env_setup/virtualenv_setup/__main__.py b/pw_env_setup/py/pw_env_setup/virtualenv_setup/__main__.py
index 0bb2d32..29ff21a 100644
--- a/pw_env_setup/py/pw_env_setup/virtualenv_setup/__main__.py
+++ b/pw_env_setup/py/pw_env_setup/virtualenv_setup/__main__.py
@@ -19,11 +19,20 @@
 
 # TODO(pwbug/67) switch back to 'from pw_env_setup import virtualenv_setup'.
 # from pw_env_setup import virtualenv_setup
-import install as virtualenv_setup  # pylint: disable=import-error
+# pylint: disable=import-error
+import install as virtualenv_setup  # type: ignore
+# pylint: enable=import-error
 
 
 def _main():
     parser = argparse.ArgumentParser(description=__doc__)
+
+    project_root = os.environ.get('PW_PROJECT_ROOT', None)
+
+    parser.add_argument('--project-root',
+                        default=project_root,
+                        required=not project_root,
+                        help='Path to overall project root.')
     parser.add_argument('--venv_path',
                         required=True,
                         help='Path at which to create the venv')
@@ -32,11 +41,12 @@
                         default=[],
                         action='append',
                         help='requirements.txt files to install')
-    parser.add_argument('-s',
-                        '--setup-py-roots',
+    parser.add_argument('--gn-target',
+                        dest='gn_targets',
                         default=[],
                         action='append',
-                        help='places to search for setup.py files')
+                        type=virtualenv_setup.GnTarget,
+                        help='GN targets that install packages')
     parser.add_argument('--quick-setup',
                         dest='full_envsetup',
                         action='store_false',
diff --git a/pw_env_setup/py/pw_env_setup/virtualenv_setup/install.py b/pw_env_setup/py/pw_env_setup/virtualenv_setup/install.py
index f0e78c0..6c0e497 100644
--- a/pw_env_setup/py/pw_env_setup/virtualenv_setup/install.py
+++ b/pw_env_setup/py/pw_env_setup/virtualenv_setup/install.py
@@ -16,12 +16,35 @@
 from __future__ import print_function
 
 import glob
+import hashlib
 import os
+import re
 import subprocess
 import sys
 import tempfile
 
 
+class GnTarget(object):  # pylint: disable=useless-object-inheritance
+    def __init__(self, val):
+        self.directory, self.target = val.split('#', 1)
+        # hash() doesn't necessarily give the same value in new runs of Python,
+        # so compute a unique id for this object that's consistent from run to
+        # run.
+        try:
+            val = val.encode()
+        except AttributeError:
+            pass
+        self._unique_id = hashlib.md5(val).hexdigest()
+
+    @property
+    def name(self):
+        """A reasonably stable and unique name for each pair."""
+        result = '{}-{}'.format(
+            os.path.basename(os.path.normpath(self.directory)),
+            self._unique_id)
+        return re.sub(r'[:/#_]+', '_', result)
+
+
 def git_stdout(*args, **kwargs):
     """Run git, passing args as git params and kwargs to subprocess."""
     return subprocess.check_output(['git'] + list(args), **kwargs).strip()
@@ -75,12 +98,16 @@
             raise
 
 
-def _find_files_by_name(roots, name):
+def _find_files_by_name(roots, name, allow_nesting=False):
     matches = []
     for root in roots:
         for dirpart, dirs, files in os.walk(root):
             if name in files:
                 matches.append(os.path.join(dirpart, name))
+                # If this directory is a match don't recurse inside it looking
+                # for more matches.
+                if not allow_nesting:
+                    dirs[:] = []
 
             # Filter directories starting with . to avoid searching unnecessary
             # paths and finding files that should be hidden.
@@ -89,11 +116,12 @@
 
 
 def install(
+        project_root,
         venv_path,
         full_envsetup=True,
         requirements=(),
+        gn_targets=(),
         python=sys.executable,
-        setup_py_roots=(),
         env=None,
 ):
     """Creates a venv and installs all packages in this Git repo."""
@@ -107,6 +135,16 @@
         print('=' * 60, file=sys.stderr)
         return False
 
+    # The bin/ directory is called Scripts/ on Windows. Don't ask.
+    venv_bin = os.path.join(venv_path, 'Scripts' if os.name == 'nt' else 'bin')
+
+    # Delete activation scripts. Typically they're created read-only and venv
+    # will complain when trying to write over them fails.
+    if os.path.isdir(venv_bin):
+        for entry in os.listdir(venv_bin):
+            if entry.lower().startswith('activate'):
+                os.unlink(os.path.join(venv_bin, entry))
+
     pyvenv_cfg = os.path.join(venv_path, 'pyvenv.cfg')
     if full_envsetup or not os.path.exists(pyvenv_cfg):
         # On Mac sometimes the CIPD Python has __PYVENV_LAUNCHER__ set to
@@ -117,11 +155,9 @@
         if '__PYVENV_LAUNCHER__' in envcopy:
             del envcopy['__PYVENV_LAUNCHER__']
 
-        cmd = (python, '-m', 'venv', '--clear', venv_path)
+        cmd = (python, '-m', 'venv', '--upgrade', venv_path)
         _check_call(cmd, env=envcopy)
 
-    # The bin/ directory is called Scripts/ on Windows. Don't ask.
-    venv_bin = os.path.join(venv_path, 'Scripts' if os.name == 'nt' else 'bin')
     venv_python = os.path.join(venv_bin, 'python')
 
     pw_root = os.environ.get('PW_ROOT')
@@ -130,8 +166,6 @@
     if not pw_root:
         raise GitRepoNotFound()
 
-    setup_py_files = _find_files_by_name(setup_py_roots, 'setup.py')
-
     # Sometimes we get an error saying "Egg-link ... does not match
     # installed location". This gets around that. The egg-link files
     # all come from 'pw'-prefixed packages we installed with --editable.
@@ -146,29 +180,51 @@
 
     pip_install('--upgrade', 'pip')
 
-    def package(pkg_path):
-        if isinstance(pkg_path, bytes) and bytes != str:
-            pkg_path = pkg_path.decode()
-        return os.path.join(pw_root, os.path.dirname(pkg_path))
-
     if requirements:
         requirement_args = tuple('--requirement={}'.format(req)
                                  for req in requirements)
         pip_install('--log', os.path.join(venv_path, 'pip-requirements.log'),
                     *requirement_args)
 
-    if setup_py_files:
-        # Run through sorted so pw_cli (on which other packages depend) comes
-        # early in the list.
-        # TODO(mohrr) come up with a way better than just using sorted().
-        package_args = tuple('--editable={}'.format(package(path))
-                             for path in sorted(setup_py_files))
-        pip_install('--log', os.path.join(venv_path, 'pip-packages.log'),
-                    *package_args)
+    def install_packages(gn_target):
+        build = os.path.join(venv_path, gn_target.name)
 
-    if env:
-        env.set('VIRTUAL_ENV', venv_path)
-        env.prepend('PATH', venv_bin)
-        env.clear('PYTHONHOME')
+        gn_log = 'gn-gen-{}.log'.format(gn_target.name)
+        gn_log_path = os.path.join(venv_path, gn_log)
+        try:
+            with open(gn_log_path, 'w') as outs:
+                subprocess.check_call(('gn', 'gen', build),
+                                      cwd=os.path.join(project_root,
+                                                       gn_target.directory),
+                                      stdout=outs,
+                                      stderr=outs)
+        except subprocess.CalledProcessError as err:
+            with open(gn_log_path, 'r') as ins:
+                raise subprocess.CalledProcessError(err.returncode, err.cmd,
+                                                    ins.read())
+
+        ninja_log = 'ninja-{}.log'.format(gn_target.name)
+        ninja_log_path = os.path.join(venv_path, ninja_log)
+        try:
+            with open(ninja_log_path, 'w') as outs:
+                ninja_cmd = ['ninja', '-C', build]
+                ninja_cmd.append(gn_target.target)
+                subprocess.check_call(ninja_cmd, stdout=outs, stderr=outs)
+        except subprocess.CalledProcessError as err:
+            with open(ninja_log_path, 'r') as ins:
+                raise subprocess.CalledProcessError(err.returncode, err.cmd,
+                                                    ins.read())
+
+    if gn_targets:
+        if env:
+            env.set('VIRTUAL_ENV', venv_path)
+            env.prepend('PATH', venv_bin)
+            env.clear('PYTHONHOME')
+            with env():
+                for gn_target in gn_targets:
+                    install_packages(gn_target)
+        else:
+            for gn_target in gn_targets:
+                install_packages(gn_target)
 
     return True
diff --git a/pw_env_setup/py/pw_env_setup/windows_env_start.py b/pw_env_setup/py/pw_env_setup/windows_env_start.py
index 46ce452..62770a8 100644
--- a/pw_env_setup/py/pw_env_setup/windows_env_start.py
+++ b/pw_env_setup/py/pw_env_setup/windows_env_start.py
@@ -26,7 +26,7 @@
 import os
 import sys
 
-from colors import Color, enable_colors
+from colors import Color, enable_colors  # type: ignore
 
 _PIGWEED_BANNER = u'''
  ▒█████▄   █▓  ▄███▒  ▒█    ▒█ ░▓████▒ ░▓████▒ ▒▓████▄
@@ -37,22 +37,14 @@
 '''
 
 
-def main():
-    """Script entry point."""
-    if os.name != 'nt':
-        return 1
-
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--bootstrap', action='store_true')
-    parser.add_argument('--no-shell-file', action='store_true')
-    args = parser.parse_args()
-
+def print_banner(bootstrap, no_shell_file):
+    """Print the Pigweed or project-specific banner"""
     enable_colors()
 
     print(Color.green('\n  WELCOME TO...'))
     print(Color.magenta(_PIGWEED_BANNER))
 
-    if args.bootstrap:
+    if bootstrap:
         print(
             Color.green('\n  BOOTSTRAP! Bootstrap may take a few minutes; '
                         'please be patient'))
@@ -65,7 +57,7 @@
                 '\n  ACTIVATOR! This sets your console environment variables.\n'
             ))
 
-        if args.no_shell_file:
+        if no_shell_file:
             print(Color.bold_red('Error!\n'))
             print(
                 Color.red('  Your Pigweed environment does not seem to be'
@@ -75,5 +67,20 @@
     return 0
 
 
+def parse():
+    """Parse command-line arguments."""
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--bootstrap', action='store_true')
+    parser.add_argument('--no-shell-file', action='store_true')
+    return parser.parse_args()
+
+
+def main():
+    """Script entry point."""
+    if os.name != 'nt':
+        return 1
+    return print_banner(**vars(parse()))
+
+
 if __name__ == '__main__':
     sys.exit(main())
diff --git a/pw_env_setup/py/setup.py b/pw_env_setup/py/setup.py
index a0beace..b50b414 100644
--- a/pw_env_setup/py/setup.py
+++ b/pw_env_setup/py/setup.py
@@ -11,9 +11,9 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations under
 # the License.
-"""env_setup module definition for PyOxidizer."""
+"""pw_env_setup package definition."""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='pw_env_setup',
@@ -27,6 +27,7 @@
     },
     package_data={
         'pw_env_setup': [
+            'py.typed',
             'cargo_setup/packages.txt',
             'cipd_setup/luci.json',
             'cipd_setup/pigweed.json',
@@ -34,4 +35,5 @@
             'virtualenv_setup/requirements.txt',
         ],
     },
+    zip_safe=False,
 )
diff --git a/pw_env_setup/util.sh b/pw_env_setup/util.sh
new file mode 100644
index 0000000..2a1168f
--- /dev/null
+++ b/pw_env_setup/util.sh
@@ -0,0 +1,269 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+_pw_abspath () {
+  python -c "import os.path; print(os.path.abspath('$@'))"
+}
+
+# Just in case PATH isn't already exported.
+export PATH
+
+# Note: Colors are unfortunately duplicated in several places; and removing the
+# duplication is not easy. Their locations are:
+#
+#   - bootstrap.sh
+#   - pw_cli/color.py
+#   - pw_env_setup/py/pw_env_setup/colors.py
+#
+# So please keep them matching then modifying them.
+pw_none() {
+  echo -e "$*"
+}
+
+pw_red() {
+  echo -e "\033[0;31m$*\033[0m"
+}
+
+pw_bold_red() {
+  echo -e "\033[1;31m$*\033[0m"
+}
+
+pw_yellow() {
+  echo -e "\033[0;33m$*\033[0m"
+}
+
+pw_bold_yellow() {
+  echo -e "\033[1;33m$*\033[0m"
+}
+
+pw_green() {
+  echo -e "\033[0;32m$*\033[0m"
+}
+
+pw_bold_green() {
+  echo -e "\033[1;32m$*\033[0m"
+}
+
+pw_blue() {
+  echo -e "\033[1;34m$*\033[0m"
+}
+
+pw_cyan() {
+  echo -e "\033[1;36m$*\033[0m"
+}
+
+pw_magenta() {
+  echo -e "\033[0;35m$*\033[0m"
+}
+
+pw_bold_white() {
+  echo -e "\033[1;37m$*\033[0m"
+}
+
+pw_eval_sourced() {
+  if [ "$1" -eq 0 ]; then
+    _PW_NAME=$(basename "$PW_SETUP_SCRIPT_PATH" .sh)
+    pw_bold_red "Error: Attempting to $_PW_NAME in a subshell"
+    pw_red "  Since $_PW_NAME.sh modifies your shell's environment variables,"
+    pw_red "  it must be sourced rather than executed. In particular, "
+    pw_red "  'bash $_PW_NAME.sh' will not work since the modified "
+    pw_red "  environment will get destroyed at the end of the script. "
+    pw_red "  Instead, source the script's contents in your shell:"
+    pw_red ""
+    pw_red "    \$ source $_PW_NAME.sh"
+    exit 1
+  fi
+}
+
+pw_check_root() {
+  _PW_ROOT="$1"
+  if [[ "$_PW_ROOT" = *" "* ]]; then
+    pw_bold_red "Error: The Pigweed path contains spaces\n"
+    pw_red "  The path '$_PW_ROOT' contains spaces. "
+    pw_red "  Pigweed's Python environment currently requires Pigweed to be "
+    pw_red "  at a path without spaces. Please checkout Pigweed in a "
+    pw_red "  directory without spaces and retry running bootstrap."
+    return
+  fi
+}
+
+pw_get_env_root() {
+  # PW_ENVIRONMENT_ROOT allows developers to specify where the environment
+  # should be installed. bootstrap.sh scripts should not use that variable to
+  # store the result of this function. This separation allows scripts to assume
+  # PW_ENVIRONMENT_ROOT came from the developer and not from a previous
+  # bootstrap possibly from another workspace.
+  if [ -z "$PW_ENVIRONMENT_ROOT" ]; then
+    echo "$PW_ROOT/.environment"
+  else
+    echo "$PW_ENVIRONMENT_ROOT"
+  fi
+}
+
+# Note: This banner is duplicated in three places; which is a lesser evil than
+# the contortions that would be needed to share this snippet across shell,
+# batch, and Python. Locations:
+#
+#   - pw_env_setup/util.sh
+#   - pw_cli/branding.py
+#   - pw_env_setup/py/pw_env_setup/windows_env_start.py
+#
+_PW_BANNER=$(cat <<EOF
+ ▒█████▄   █▓  ▄███▒  ▒█    ▒█ ░▓████▒ ░▓████▒ ▒▓████▄
+  ▒█░  █░ ░█▒ ██▒ ▀█▒ ▒█░ █ ▒█  ▒█   ▀  ▒█   ▀  ▒█  ▀█▌
+  ▒█▄▄▄█░ ░█▒ █▓░ ▄▄░ ▒█░ █ ▒█  ▒███    ▒███    ░█   █▌
+  ▒█▀     ░█░ ▓█   █▓ ░█░ █ ▒█  ▒█   ▄  ▒█   ▄  ░█  ▄█▌
+  ▒█      ░█░ ░▓███▀   ▒█▓▀▓█░ ░▓████▒ ░▓████▒ ▒▓████▀
+EOF
+)
+
+_pw_banner() {
+  if [ -z "$PW_ENVSETUP_QUIET" ] && [ -z "$PW_ENVSETUP_NO_BANNER" ]; then
+    pw_magenta "$_PW_BANNER\n"
+  fi
+}
+
+_PW_BANNER_FUNC="_pw_banner"
+
+_pw_hello() {
+  _PW_TEXT="$1"
+  if [ -n "$PW_BANNER_FUNC" ]; then
+    _PW_BANNER_FUNC="$PW_BANNER_FUNC"
+  fi
+  if [ -z "$PW_ENVSETUP_QUIET" ]; then
+    pw_green "\n  WELCOME TO...\n"
+    "$_PW_BANNER_FUNC"
+    pw_green "$_PW_TEXT"
+  fi
+}
+
+pw_deactivate() {
+  # Assume PW_ROOT and PW_PROJECT_ROOT has already been set and we need to
+  # preserve their values.
+  _NEW_PW_ROOT="$PW_ROOT"
+  _NEW_PW_PROJECT_ROOT="$PW_PROJECT_ROOT"
+
+  # Find deactivate script and run it.
+  _PW_DEACTIVATE_SH="$_PW_ACTUAL_ENVIRONMENT_ROOT/deactivate.sh"
+  if [ -f "$_PW_DEACTIVATE_SH" ]; then
+    . "$_PW_DEACTIVATE_SH"
+  fi
+
+  # If there's a _pw_deactivate function run it. Redirect output to /dev/null
+  # in case _pw_deactivate doesn't exist.
+  if [ -n "$(command -v _pw_deactivate)" ]; then
+    _pw_deactivate &> /dev/null
+  fi
+
+  # Restore.
+  PW_ROOT="$_NEW_PW_ROOT"
+  export PW_ROOT
+  PW_PROJECT_ROOT="$_NEW_PW_PROJECT_ROOT"
+  export PW_PROJECT_ROOT
+}
+
+# The next three functions use the following variables.
+# * PW_BANNER_FUNC: function to print banner
+# * PW_BOOTSTRAP_PYTHON: specific Python interpreter to use for bootstrap
+# * PW_USE_GCS_ENVSETUP: attempt to grab env setup executable from GCS if "true"
+# * PW_ROOT: path to Pigweed root
+# * PW_ENVSETUP_QUIET: limit output if "true"
+#
+# All arguments passed in are passed on to env_setup.py in pw_bootstrap,
+# pw_activate takes no arguments, and pw_finalize takes the name of the script
+# "bootstrap" or "activate" and the path to the setup script written by
+# bootstrap.sh.
+pw_bootstrap() {
+  _pw_hello "  BOOTSTRAP! Bootstrap may take a few minutes; please be patient.\n"
+
+  # Allow forcing a specific version of Python for testing pursposes.
+  if [ -n "$PW_BOOTSTRAP_PYTHON" ]; then
+    _PW_PYTHON="$PW_BOOTSTRAP_PYTHON"
+  elif which python &> /dev/null; then
+    _PW_PYTHON=python
+  else
+    pw_bold_red "Error: No system Python present\n"
+    pw_red "  Pigweed's bootstrap process requires a local system Python."
+    pw_red "  Please install Python on your system, add it to your PATH"
+    pw_red "  and re-try running bootstrap."
+    return
+  fi
+
+  if [ -n "$PW_USE_GCS_ENVSETUP" ]; then
+    _PW_ENV_SETUP="$("$PW_ROOT/pw_env_setup/get_pw_env_setup.sh")"
+  fi
+
+  if [ -n "$_PW_ENV_SETUP" ]; then
+    "$_PW_ENV_SETUP" "$@"
+  else
+    "$_PW_PYTHON" "$PW_ROOT/pw_env_setup/py/pw_env_setup/env_setup.py" "$@"
+  fi
+}
+
+pw_activate() {
+  _pw_hello "  ACTIVATOR! This sets your shell environment variables.\n"
+}
+
+pw_finalize() {
+  _PW_NAME="$1"
+  _PW_SETUP_SH="$2"
+  if [ -f "$_PW_SETUP_SH" ]; then
+    . "$_PW_SETUP_SH"
+
+    if [ "$?" -eq 0 ]; then
+      if [ "$_PW_NAME" = "bootstrap" ] && [ -z "$PW_ENVSETUP_QUIET" ]; then
+        echo "To activate this environment in the future, run this in your "
+        echo "terminal:"
+        echo
+        pw_green "  source ./activate.sh\n"
+      fi
+    else
+      pw_red "Error during $_PW_NAME--see messages above."
+    fi
+  else
+    pw_red "Error during $_PW_NAME--see messages above."
+  fi
+}
+
+pw_cleanup() {
+  unset _PW_BANNER
+  unset _PW_BANNER_FUNC
+  unset _PW_ENV_SETUP
+  unset _PW_NAME
+  unset _PW_PYTHON
+  unset _PW_SETUP_SH
+  unset _PW_DEACTIVATE_SH
+  unset _NEW_PW_ROOT
+
+  unset _pw_abspath
+  unset pw_none
+  unset pw_red
+  unset pw_bold_red
+  unset pw_yellow
+  unset pw_bold_yellow
+  unset pw_green
+  unset pw_bold_green
+  unset pw_blue
+  unset pw_cyan
+  unset pw_magenta
+  unset pw_bold_white
+  unset pw_eval_sourced
+  unset pw_check_root
+  unset pw_get_env_root
+  unset _pw_banner
+  unset pw_bootstrap
+  unset pw_activate
+  unset pw_finalize
+  unset _pw_cleanup
+}
diff --git a/pw_env_setup/windows_scripts/py.bat b/pw_env_setup/windows_scripts/py.bat
new file mode 100644
index 0000000..27810f3
--- /dev/null
+++ b/pw_env_setup/windows_scripts/py.bat
@@ -0,0 +1,34 @@
+:<<"::WINDOWS_ONLY"
+@echo off
+:: Copyright 2020 The Pigweed Authors
+::
+:: Licensed under the Apache License, Version 2.0 (the "License"); you may not
+:: use this file except in compliance with the License. You may obtain a copy of
+:: the License at
+::
+::     https://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+:: WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+:: License for the specific language governing permissions and limitations under
+:: the License.
+::WINDOWS_ONLY
+:; echo "ERROR: Attempting to run Windows .bat from a Unix/POSIX shell!"
+:; echo "Instead, run the following command."
+:; echo ""
+:; echo "    source ./bootstrap.sh"
+:; echo ""
+:<<"::WINDOWS_ONLY"
+
+if "%1"=="-3" (
+    for /f "tokens=1,* delims= " %%a in ("%*") do set ARGS=%%b
+) else (
+    echo ERROR: Pigweed's mini py launcher only supports python3.
+    exit /b 1
+)
+
+:: Ignore the `-3` flag, Pigweed's python is alwasy python3.
+call python %ARGS%
+
+::WINDOWS_ONLY
diff --git a/pw_fuzzer/BUILD.gn b/pw_fuzzer/BUILD.gn
index 61271c3..264033b 100644
--- a/pw_fuzzer/BUILD.gn
+++ b/pw_fuzzer/BUILD.gn
@@ -12,13 +12,13 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_fuzzer/fuzzer.gni")
 import("$dir_pw_fuzzer/oss_fuzz.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -73,11 +73,10 @@
     "public/pw_fuzzer/asan_interface.h",
     "public/pw_fuzzer/fuzzed_data_provider.h",
   ]
-  sources = public
   public_deps = [ "$dir_pw_log" ]
 }
 
-source_set("run_as_unit_test") {
+pw_source_set("run_as_unit_test") {
   configs = [ ":default_config" ]
   sources = [ "pw_fuzzer_disabled.cc" ]
   deps = [
diff --git a/pw_fuzzer/docs.rst b/pw_fuzzer/docs.rst
index e383566..baf34ce 100644
--- a/pw_fuzzer/docs.rst
+++ b/pw_fuzzer/docs.rst
@@ -1,7 +1,4 @@
-.. _chapter-pw-unit-test:
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_fuzzer:
 
 ---------
 pw_fuzzer
diff --git a/pw_fuzzer/fuzzer.gni b/pw_fuzzer/fuzzer.gni
index fd19308..4a6fbf4 100644
--- a/pw_fuzzer/fuzzer.gni
+++ b/pw_fuzzer/fuzzer.gni
@@ -12,7 +12,6 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_toolchain/host_clang/toolchains.gni")
diff --git a/pw_hdlc_lite/BUILD b/pw_hdlc_lite/BUILD
index a090c10..125064a 100644
--- a/pw_hdlc_lite/BUILD
+++ b/pw_hdlc_lite/BUILD
@@ -23,14 +23,41 @@
 
 pw_cc_library(
     name = "pw_hdlc_lite",
-    hdrs = [
-      "public/pw_hdlc_lite/encoder.h",
-      "public/pw_hdlc_lite/sys_io_stream.h",
-    ],
     srcs = [
-      "encoder.cc",
+        "decoder.cc",
+        "encoder.cc",
+        "pw_hdlc_lite_private/protocol.h",
+        "rpc_packets.cc",
+    ],
+    hdrs = [
+        "public/pw_hdlc_lite/decoder.h",
+        "public/pw_hdlc_lite/encoder.h",
+        "public/pw_hdlc_lite/sys_io_stream.h",
     ],
     includes = ["public"],
+    deps = [
+        "//pw_bytes",
+        "//pw_checksum",
+        "//pw_log",
+        "//pw_result",
+        "//pw_span",
+        "//pw_status",
+        "//pw_stream",
+    ],
+)
+
+pw_cc_library(
+    name = "pw_rpc",
+    srcs = ["rpc_packets.cc"],
+    hdrs = [
+        "public/pw_hdlc_lite/rpc_channel.h",
+        "public/pw_hdlc_lite/rpc_packets.h",
+    ],
+    includes = ["public"],
+    deps = [
+        ":pw_hdlc_lite",
+        "//pw_rpc:server",
+    ],
 )
 
 cc_test(
@@ -42,3 +69,24 @@
         "//pw_unit_test",
     ],
 )
+
+cc_test(
+    name = "decoder_test",
+    srcs = ["decoder_test.cc"],
+    deps = [
+        ":pw_hdlc_lite",
+        "//pw_result",
+        "//pw_stream",
+        "//pw_unit_test",
+    ],
+)
+
+cc_test(
+    name = "rpc_channel_test",
+    srcs = ["rpc_channel_test.cc"],
+    deps = [
+        ":pw_hdlc_lite",
+        "//pw_stream",
+        "//pw_unit_test",
+    ],
+)
diff --git a/pw_hdlc_lite/BUILD.gn b/pw_hdlc_lite/BUILD.gn
index 31135f5..75eb537 100644
--- a/pw_hdlc_lite/BUILD.gn
+++ b/pw_hdlc_lite/BUILD.gn
@@ -12,37 +12,81 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
 
-pw_source_set("pw_hdlc_lite") {
+group("pw_hdlc_lite") {
+  public_deps = [
+    ":decoder",
+    ":encoder",
+  ]
+}
+
+pw_source_set("decoder") {
+  public_configs = [ ":default_config" ]
+  public = [ "public/pw_hdlc_lite/decoder.h" ]
+  sources = [
+    "decoder.cc",
+    "pw_hdlc_lite_private/protocol.h",
+  ]
+  public_deps = [
+    dir_pw_bytes,
+    dir_pw_result,
+    dir_pw_status,
+  ]
+  deps = [
+    dir_pw_checksum,
+    dir_pw_log,
+  ]
+  friend = [ ":*" ]
+}
+
+pw_source_set("encoder") {
   public_configs = [ ":default_config" ]
   public = [
     "public/pw_hdlc_lite/encoder.h",
     "public/pw_hdlc_lite/sys_io_stream.h",
   ]
-  sources = [ "encoder.cc" ]
+  sources = [
+    "encoder.cc",
+    "pw_hdlc_lite_private/protocol.h",
+  ]
   public_deps = [
-    dir_pw_assert,
     dir_pw_bytes,
-    dir_pw_preprocessor,
-    dir_pw_span,
     dir_pw_status,
     dir_pw_stream,
     dir_pw_sys_io,
   ]
   deps = [ dir_pw_checksum ]
+  friend = [ ":*" ]
+}
+
+pw_source_set("pw_rpc") {
+  public_configs = [ ":default_config" ]
+  public = [
+    "public/pw_hdlc_lite/rpc_channel.h",
+    "public/pw_hdlc_lite/rpc_packets.h",
+  ]
+  sources = [ "rpc_packets.cc" ]
+  public_deps = [
+    ":pw_hdlc_lite",
+    "$dir_pw_rpc:server",
+  ]
 }
 
 pw_test_group("tests") {
-  tests = [ ":encoder_test" ]
+  tests = [
+    ":encoder_test",
+    ":decoder_test",
+    ":rpc_channel_test",
+  ]
   group_deps = [
     "$dir_pw_preprocessor:tests",
     "$dir_pw_span:tests",
@@ -52,13 +96,40 @@
 }
 
 pw_test("encoder_test") {
-  deps = [
-    ":pw_hdlc_lite",
-    "$dir_pw_stream",
-  ]
+  deps = [ ":pw_hdlc_lite" ]
   sources = [ "encoder_test.cc" ]
 }
 
+action("generate_decoder_test") {
+  outputs = [ "$target_gen_dir/generated_decoder_test.cc" ]
+  script = "py/decode_test.py"
+  args = [ "--generate-cc-test" ] + rebase_path(outputs)
+  deps = [ "$dir_pw_build/py" ]
+}
+
+pw_test("decoder_test") {
+  deps = [
+    ":generate_decoder_test",
+    ":pw_hdlc_lite",
+  ]
+  sources = [ "decoder_test.cc" ] + get_target_outputs(":generate_decoder_test")
+}
+
+pw_test("rpc_channel_test") {
+  deps = [
+    ":pw_hdlc_lite",
+    ":pw_rpc",
+  ]
+  sources = [ "rpc_channel_test.cc" ]
+}
+
 pw_doc_group("docs") {
-  sources = [ "docs.rst" ]
+  sources = [
+    "docs.rst",
+    "rpc_example/docs.rst",
+  ]
+  inputs = [
+    "py/pw_hdlc_lite/decode.py",
+    "py/pw_hdlc_lite/encode.py",
+  ]
 }
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_hdlc_lite/CMakeLists.txt
similarity index 68%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_hdlc_lite/CMakeLists.txt
index 3c3be32..44c0ba8 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_hdlc_lite/CMakeLists.txt
@@ -12,8 +12,19 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
+pw_auto_add_simple_module(pw_hdlc_lite
+  PUBLIC_DEPS
+    pw_bytes
+    pw_result
+    pw_rpc.common
+    pw_status
+    pw_stream
+    pw_sys_io
+  PRIVATE_DEPS
+    pw_checksum
+    pw_log
+)
+
+add_subdirectory(rpc_example)
diff --git a/pw_hdlc_lite/decoder.cc b/pw_hdlc_lite/decoder.cc
new file mode 100644
index 0000000..fc96824
--- /dev/null
+++ b/pw_hdlc_lite/decoder.cc
@@ -0,0 +1,138 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_hdlc_lite/decoder.h"
+
+#include "pw_assert/assert.h"
+#include "pw_bytes/endian.h"
+#include "pw_checksum/crc32.h"
+#include "pw_hdlc_lite_private/protocol.h"
+#include "pw_log/log.h"
+
+using std::byte;
+
+namespace pw::hdlc_lite {
+namespace {
+
+constexpr byte kUnescapeConstant = byte{0x20};
+
+}  // namespace
+
+Result<Frame> Decoder::Process(const byte new_byte) {
+  switch (state_) {
+    case State::kInterFrame: {
+      if (new_byte == kFlag) {
+        state_ = State::kFrame;
+
+        // Report an error if non-flag bytes were read between frames.
+        if (current_frame_size_ != 0u) {
+          current_frame_size_ = 0;
+          return Status::DataLoss();
+        }
+      } else {
+        // Count bytes to track how many are discarded.
+        current_frame_size_ += 1;
+      }
+      return Status::Unavailable();  // Report error when starting a new frame.
+    }
+    case State::kFrame: {
+      if (new_byte == kFlag) {
+        const Status status = CheckFrame();
+
+        state_ = State::kFrame;
+        const size_t completed_frame_size = current_frame_size_;
+        current_frame_size_ = 0;
+
+        if (status.ok()) {
+          return Frame(buffer_.first(completed_frame_size));
+        }
+        return status;
+      }
+
+      if (new_byte == kEscape) {
+        state_ = State::kFrameEscape;
+      } else {
+        AppendByte(new_byte);
+      }
+      return Status::Unavailable();
+    }
+    case State::kFrameEscape: {
+      // The flag character cannot be escaped; return an error.
+      if (new_byte == kFlag) {
+        state_ = State::kFrame;
+        current_frame_size_ = 0;
+        return Status::DataLoss();
+      }
+
+      if (new_byte == kEscape) {
+        // Two escape characters in a row is illegal -- invalidate this frame.
+        // The frame is reported abandoned when the next flag byte appears.
+        state_ = State::kInterFrame;
+
+        // Count the escape byte so that the inter-frame state detects an error.
+        current_frame_size_ += 1;
+      } else {
+        state_ = State::kFrame;
+        AppendByte(new_byte ^ kUnescapeConstant);
+      }
+      return Status::Unavailable();
+    }
+  }
+  PW_CRASH("Bad decoder state");
+}
+
+void Decoder::AppendByte(byte new_byte) {
+  if (current_frame_size_ < max_size()) {
+    buffer_[current_frame_size_] = new_byte;
+  }
+
+  // Always increase size: if it is larger than the buffer, overflow occurred.
+  current_frame_size_ += 1;
+}
+
+Status Decoder::CheckFrame() const {
+  // Empty frames are not an error; repeated flag characters are okay.
+  if (current_frame_size_ == 0u) {
+    return Status::Unavailable();
+  }
+
+  if (current_frame_size_ < Frame::kMinSizeBytes) {
+    PW_LOG_ERROR("Received %lu-byte frame; frame must be at least 6 bytes",
+                 static_cast<unsigned long>(current_frame_size_));
+    return Status::DataLoss();
+  }
+
+  if (current_frame_size_ > max_size()) {
+    PW_LOG_ERROR("Frame size [%lu] exceeds the maximum buffer size [%lu]",
+                 static_cast<unsigned long>(current_frame_size_),
+                 static_cast<unsigned long>(max_size()));
+    return Status::ResourceExhausted();
+  }
+
+  if (!VerifyFrameCheckSequence()) {
+    PW_LOG_ERROR("Frame check sequence verification failed");
+    return Status::DataLoss();
+  }
+
+  return Status::Ok();
+}
+
+bool Decoder::VerifyFrameCheckSequence() const {
+  uint32_t fcs = bytes::ReadInOrder<uint32_t>(
+      std::endian::little, buffer_.data() + current_frame_size_ - sizeof(fcs));
+  return fcs == checksum::Crc32::Calculate(
+                    buffer_.first(current_frame_size_ - sizeof(fcs)));
+}
+
+}  // namespace pw::hdlc_lite
diff --git a/pw_hdlc_lite/decoder_test.cc b/pw_hdlc_lite/decoder_test.cc
new file mode 100644
index 0000000..b8400e2
--- /dev/null
+++ b/pw_hdlc_lite/decoder_test.cc
@@ -0,0 +1,128 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_hdlc_lite/decoder.h"
+
+#include <array>
+#include <cstddef>
+
+#include "gtest/gtest.h"
+#include "pw_bytes/array.h"
+#include "pw_hdlc_lite_private/protocol.h"
+
+namespace pw::hdlc_lite {
+namespace {
+
+using std::byte;
+
+TEST(Frame, Fields) {
+  static constexpr auto kFrameData = bytes::String("1234\xa3\xe0\xe3\x9b");
+  constexpr Frame frame(kFrameData);
+
+  static_assert(frame.address() == unsigned{'1'});
+  static_assert(frame.control() == byte{'2'});
+
+  static_assert(frame.data().size() == 2u);
+  static_assert(frame.data()[0] == byte{'3'});
+  static_assert(frame.data()[1] == byte{'4'});
+}
+
+TEST(Decoder, Clear) {
+  DecoderBuffer<8> decoder;
+
+  // Process a partial packet
+  decoder.Process(bytes::String("~1234abcd"),
+                  [](const Result<Frame>&) { FAIL(); });
+
+  decoder.clear();
+  Status status = Status::Unknown();
+
+  decoder.Process(
+      bytes::String("~1234\xa3\xe0\xe3\x9b~"),
+      [&status](const Result<Frame>& result) { status = result.status(); });
+
+  EXPECT_EQ(Status::Ok(), status);
+}
+
+TEST(Decoder, ExactFit) {
+  DecoderBuffer<8> decoder;
+
+  for (byte b : bytes::String("~1234\xa3\xe0\xe3\x9b")) {
+    EXPECT_EQ(Status::Unavailable(), decoder.Process(b).status());
+  }
+  auto result = decoder.Process(kFlag);
+  ASSERT_EQ(Status::Ok(), result.status());
+  ASSERT_EQ(result.value().data().size(), 2u);
+  ASSERT_EQ(result.value().data()[0], byte{'3'});
+  ASSERT_EQ(result.value().data()[1], byte{'4'});
+}
+
+TEST(Decoder, MinimumSizedBuffer) {
+  DecoderBuffer<6> decoder;
+
+  for (byte b : bytes::String("~12\xcd\x44\x53\x4f")) {
+    EXPECT_EQ(Status::Unavailable(), decoder.Process(b).status());
+  }
+
+  auto result = decoder.Process(kFlag);
+  ASSERT_EQ(Status::Ok(), result.status());
+  EXPECT_EQ(result.value().data().size(), 0u);
+}
+
+TEST(Decoder, TooLargeForBuffer_ReportsResourceExhausted) {
+  DecoderBuffer<8> decoder;
+
+  for (byte b : bytes::String("~123456789")) {
+    EXPECT_EQ(Status::Unavailable(), decoder.Process(b).status());
+  }
+  EXPECT_EQ(Status::ResourceExhausted(), decoder.Process(kFlag).status());
+
+  for (byte b : bytes::String("~123456789012345678901234567890")) {
+    EXPECT_EQ(Status::Unavailable(), decoder.Process(b).status());
+  }
+  EXPECT_EQ(Status::ResourceExhausted(), decoder.Process(kFlag).status());
+}
+
+TEST(Decoder, TooLargeForBuffer_StaysWithinBufferBoundaries) {
+  std::array<byte, 16> buffer = bytes::Initialized<16>('?');
+
+  Decoder decoder(std::span(buffer.data(), 8));
+
+  for (byte b : bytes::String("~1234567890123456789012345678901234567890")) {
+    EXPECT_EQ(Status::Unavailable(), decoder.Process(b).status());
+  }
+
+  for (size_t i = 8; i < buffer.size(); ++i) {
+    ASSERT_EQ(byte{'?'}, buffer[i]);
+  }
+
+  EXPECT_EQ(Status::ResourceExhausted(), decoder.Process(kFlag).status());
+}
+
+TEST(Decoder, TooLargeForBuffer_DecodesNextFrame) {
+  DecoderBuffer<8> decoder;
+
+  for (byte b : bytes::String("~123456789012345678901234567890")) {
+    EXPECT_EQ(Status::Unavailable(), decoder.Process(b).status());
+  }
+  EXPECT_EQ(Status::ResourceExhausted(), decoder.Process(kFlag).status());
+
+  for (byte b : bytes::String("1234\xa3\xe0\xe3\x9b")) {
+    EXPECT_EQ(Status::Unavailable(), decoder.Process(b).status());
+  }
+  EXPECT_EQ(Status::Ok(), decoder.Process(kFlag).status());
+}
+
+}  // namespace
+}  // namespace pw::hdlc_lite
diff --git a/pw_hdlc_lite/docs.rst b/pw_hdlc_lite/docs.rst
index 1314330..e42d917 100644
--- a/pw_hdlc_lite/docs.rst
+++ b/pw_hdlc_lite/docs.rst
@@ -1,36 +1,234 @@
-.. _chapter-pw-hdlc:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_hdlc_lite:
 
 ------------
 pw_hdlc_lite
 ------------
-pw_hdlc_lite is a module that enables serial communication between devices
-using the HDLC-Lite protocol.
+`High-Level Data Link Control (HDLC)
+<https://en.wikipedia.org/wiki/High-Level_Data_Link_Control>`_ is a data link
+layer protocol intended for serial communication between devices. HDLC is
+standardized as `ISO/IEC 13239:2002 <https://www.iso.org/standard/37010.html>`_.
+
+The ``pw_hdlc_lite`` module provides a simple, robust frame-oriented
+transport that uses a subset of the HDLC protocol. ``pw_hdlc_lite`` supports
+sending between embedded devices or the host. It can be used with
+:ref:`module-pw_rpc` to enable remote procedure calls (RPCs) on embedded on
+devices.
+
+**Why use the pw_hdlc_lite module?**
+
+  * Enables the transmission of RPCs and other data between devices over serial.
+  * Detects corruption and data loss.
+  * Light-weight, simple, and easy to use.
+  * Supports streaming to transport without buffering, since the length is not
+    encoded.
+
+.. admonition:: Try it out!
+
+  For an example of how to use HDLC with :ref:`module-pw_rpc`, see the
+  :ref:`module-pw_hdlc_lite-rpc-example`.
+
+.. toctree::
+  :maxdepth: 1
+  :hidden:
+
+  rpc_example/docs
+
+Protocol Description
+====================
+
+Frames
+------
+The HDLC implementation in ``pw_hdlc_lite`` supports only HDLC information
+frames. These frames are encoded as follows:
+
+.. code-block:: text
+
+    _________________________________________
+    | | | |                          |    | |...
+    | | | |                          |    | |... [More frames]
+    |_|_|_|__________________________|____|_|...
+     F A C       Payload              FCS  F
+
+     F = flag byte (0x7e, the ~ character)
+     A = address field
+     C = control field
+     FCS = frame check sequence (CRC-32)
+
+
+Encoding and sending data
+-------------------------
+This module first writes an initial frame delimiter byte (0x7E) to indicate the
+beginning of the frame. Before sending any of the payload data through serial,
+the special bytes are escaped:
+
+            +-------------------------+-----------------------+
+            | Unescaped Special Bytes | Escaped Special Bytes |
+            +=========================+=======================+
+            |           7E            |        7D 5E          |
+            +-------------------------+-----------------------+
+            |           7D            |        7D 5D          |
+            +-------------------------+-----------------------+
+
+The bytes of the payload are escaped and written in a single pass. The
+frame check sequence is calculated, escaped, and written after. After this, a
+final frame delimiter byte (0x7E) is written to mark the end of the frame.
+
+Decoding received bytes
+-----------------------
+Frames may be received in multiple parts, so we need to store the received data
+in a buffer until the ending frame delimiter (0x7E) is read. When the
+``pw_hdlc_lite`` decoder receives data, it unescapes it and adds it to a buffer.
+When the frame is complete, it calculates and verifies the frame check sequence
+and does the following:
+
+* If correctly verified, the decoder returns the decoded frame.
+* If the checksum verification fails, the frame is discarded and an error is
+  reported.
+
+API Usage
+=========
+There are two primary functions of the ``pw_hdlc_lite`` module:
+
+  * **Encoding** data by constructing a frame with the escaped payload bytes and
+    frame check sequence.
+  * **Decoding** data by unescaping the received bytes, verifying the frame
+    check sequence, and returning successfully decoded frames.
+
+Encoder
+-------
+The Encoder API provides a single function that encodes data as an HDLC
+information frame.
+
+C++
+^^^
+.. cpp:namespace:: pw
+
+.. cpp:function:: Status hdlc_lite::WriteInformationFrame(uint8_t address, ConstByteSpan data, stream::Writer& writer)
+
+  Writes a span of data to a :ref:`pw::stream::Writer <module-pw_stream>` and
+  returns the status. This implementation uses the :ref:`module-pw_checksum`
+  module to compute the CRC-32 frame check sequence.
+
+.. code-block:: cpp
+
+  #include "pw_hdlc_lite/encoder.h"
+  #include "pw_hdlc_lite/sys_io_stream.h"
+
+  int main() {
+    pw::stream::SysIoWriter serial_writer;
+    Status status = WriteInformationFrame(123 /* address */,
+                                          data,
+                                          serial_writer);
+    if (!status.ok()) {
+      PW_LOG_INFO("Writing frame failed! %s", status.str());
+    }
+  }
+
+Python
+^^^^^^
+.. automodule:: pw_hdlc_lite.encode
+  :members:
+
+.. code-block:: python
+
+  import serial
+  from pw_hdlc_lite import encode
+
+  ser = serial.Serial()
+  ser.write(encode.information_frame(b'your data here!'))
+
+Decoder
+-------
+The decoder class unescapes received bytes and adds them to a buffer. Complete,
+valid HDLC frames are yielded as they are received.
+
+C++
+^^^
+.. cpp:class:: pw::hdlc_lite::Decoder
+
+  .. cpp:function:: pw::Result<Frame> Process(std::byte b)
+
+    Parses a single byte of an HDLC stream. Returns a Result with the complete
+    frame if the byte completes a frame. The status is the following:
+
+      - OK - A frame was successfully decoded. The Result contains the Frame,
+        which is invalidated by the next Process call.
+      - UNAVAILABLE - No frame is available.
+      - RESOURCE_EXHAUSTED - A frame completed, but it was too large to fit in
+        the decoder's buffer.
+      - DATA_LOSS - A frame completed, but it was invalid. The frame was
+        incomplete or the frame check sequence verification failed.
+
+  .. cpp:function:: void Process(pw::ConstByteSpan data, F&& callback, Args&&... args)
+
+    Processes a span of data and calls the provided callback with each frame or
+    error.
+
+This example demonstrates reading individual bytes from ``pw::sys_io`` and
+decoding HDLC frames:
+
+.. code-block:: cpp
+
+  #include "pw_hdlc_lite/decoder.h"
+  #include "pw_sys_io/sys_io.h"
+
+  int main() {
+    std::byte data;
+    while (true) {
+      if (!pw::sys_io::ReadByte(&data).ok()) {
+        // Log serial reading error
+      }
+      Result<Frame> decoded_frame = decoder.Process(data);
+
+      if (decoded_frame.ok()) {
+        // Handle the decoded frame
+      }
+    }
+  }
+
+Python
+^^^^^^
+.. autoclass:: pw_hdlc_lite.decode.FrameDecoder
+  :members:
+
+Below is an example using the decoder class to decode data read from serial:
+
+.. code-block:: python
+
+  import serial
+  from pw_hdlc_lite import decode
+
+  ser = serial.Serial()
+  decoder = decode.FrameDecoder()
+
+  while True:
+      for frame in decoder.process_valid_frames(ser.read()):
+          # Handle the decoded frame
+
+Additional features
+===================
+
+pw::stream::SysIoWriter
+------------------------
+The ``SysIoWriter`` C++ class implements the ``Writer`` interface with
+``pw::sys_io``. This Writer may be used by the C++ encoder to send HDLC frames
+over serial.
+
+HdlcRpcClient
+-------------
+.. autoclass:: pw_hdlc_lite.rpc.HdlcRpcClient
+  :members:
+
+Roadmap
+=======
+- **Expanded protocol support** - ``pw_hdlc_lite`` currently only supports
+  information frames with a single address byte and control byte. Support for
+  different frame types and extended address or control fields may be added in
+  the future.
+
+- **Higher performance** - We plan to improve the overall performance of the
+  decoder and encoder implementations by using SIMD/NEON.
 
 Compatibility
 =============
 C++17
-
-Dependencies
-============
-* ``pw_preprocessor``
-* ``pw_status``
-* ``pw_span``
-* ``pw_sys_io``
-* ``pw_stream``
-
-Features
-========
-
-pw::stream::SerialWriter
-------------------------
-The ``SerialWriter`` class implements the ``Writer`` interface by using sys_io
-to write data over a communication channel.
-
-
-Future work
-^^^^^^^^^^^
-- Adding the code for the Encoder and Decoder.
diff --git a/pw_hdlc_lite/encoder.cc b/pw_hdlc_lite/encoder.cc
index 2106f3b..4a785ee 100644
--- a/pw_hdlc_lite/encoder.cc
+++ b/pw_hdlc_lite/encoder.cc
@@ -20,79 +20,101 @@
 #include <cstring>
 #include <span>
 
-#include "pw_checksum/ccitt_crc16.h"
+#include "pw_bytes/endian.h"
+#include "pw_checksum/crc32.h"
+#include "pw_hdlc_lite_private/protocol.h"
 
 using std::byte;
 
 namespace pw::hdlc_lite {
 namespace {
 
-constexpr byte kHdlcFrameDelimiter = byte{0x7E};
-constexpr byte kHdlcEscape = byte{0x7D};
-constexpr std::array<byte, 2> kEscapedFrameDelimiterArray = {byte{0x7D},
-                                                             byte{0x5E}};
-constexpr std::array<byte, 2> kEscapedEscapeFlagArray = {byte{0x7D},
-                                                         byte{0x5D}};
+// Indicates this an information packet with sequence numbers set to 0.
+constexpr byte kUnusedControl = byte{0};
 
-Status WriteFrameDelimiter(stream::Writer& writer) {
-  return writer.Write(kHdlcFrameDelimiter);
-}
-
-Status EscapeAndWriteByte(const byte b, stream::Writer& writer) {
-  if (b == kHdlcFrameDelimiter) {
-    return writer.Write(kEscapedFrameDelimiterArray);
-  } else if (b == kHdlcEscape) {
-    return writer.Write(kEscapedEscapeFlagArray);
+Status EscapeAndWrite(const byte b, stream::Writer& writer) {
+  if (b == kFlag) {
+    return writer.Write(kEscapedFlag);
+  }
+  if (b == kEscape) {
+    return writer.Write(kEscapedEscape);
   }
   return writer.Write(b);
 }
 
-Status WriteCrc(uint16_t crc, stream::Writer& writer) {
-  if (Status status = EscapeAndWriteByte(byte(crc & 0x00FF), writer)) {
-    return status;
-  }
-  return EscapeAndWriteByte(byte((crc & 0xFF00) >> 8), writer);
-}
+// Encodes and writes HDLC frames.
+class Encoder {
+ public:
+  constexpr Encoder(stream::Writer& output) : writer_(output) {}
 
-bool NeedsEscaping(byte b) {
-  return (b == kHdlcFrameDelimiter || b == kHdlcEscape);
-}
+  // Writes the header for an I-frame. After successfully calling
+  // StartInformationFrame, WriteData may be called any number of times.
+  Status StartInformationFrame(uint8_t address);
 
-}  // namespace
+  // Writes data for an ongoing frame. Must only be called after a successful
+  // StartInformationFrame call, and prior to a FinishFrame() call.
+  Status WriteData(ConstByteSpan data);
 
-Status EncodeAndWritePayload(ConstByteSpan payload, stream::Writer& writer) {
-  uint16_t crc = 0xFFFF;
+  // Finishes a frame. Writes the frame check sequence and a terminating flag.
+  Status FinishFrame();
 
-  if (Status status = WriteFrameDelimiter(writer); !status.ok()) {
+ private:
+  stream::Writer& writer_;
+  checksum::Crc32 fcs_;
+};
+
+Status Encoder::StartInformationFrame(uint8_t address) {
+  fcs_.clear();
+  if (Status status = writer_.Write(kFlag); !status.ok()) {
     return status;
   }
 
-  auto begin = payload.begin();
+  const byte address_and_control[] = {std::byte{address}, kUnusedControl};
+  return WriteData(address_and_control);
+}
+
+Status Encoder::WriteData(ConstByteSpan data) {
+  auto begin = data.begin();
   while (true) {
-    auto end = std::find_if(begin, payload.end(), NeedsEscaping);
+    auto end = std::find_if(begin, data.end(), NeedsEscaping);
 
-    if (Status status = writer.Write(std::span(begin, end)); !status.ok()) {
+    if (Status status = writer_.Write(std::span(begin, end)); !status.ok()) {
       return status;
     }
-    crc = checksum::CcittCrc16(std::span(begin, end), crc);
-
-    if (end == payload.end()) {
-      break;
+    if (end == data.end()) {
+      fcs_.Update(data);
+      return Status::Ok();
     }
-    crc = checksum::CcittCrc16(*end, crc);
-    if (Status status = EscapeAndWriteByte(*end, writer); !status.ok()) {
+    if (Status status = EscapeAndWrite(*end, writer_); !status.ok()) {
       return status;
     }
     begin = end + 1;
   }
+}
 
-  if (Status status = WriteCrc(crc, writer); !status.ok()) {
+Status Encoder::FinishFrame() {
+  if (Status status =
+          WriteData(bytes::CopyInOrder(std::endian::little, fcs_.value()));
+      !status.ok()) {
     return status;
   }
-  if (Status status = WriteFrameDelimiter(writer); !status.ok()) {
+  return writer_.Write(kFlag);
+}
+
+}  // namespace
+
+Status WriteInformationFrame(uint8_t address,
+                             ConstByteSpan payload,
+                             stream::Writer& writer) {
+  Encoder encoder(writer);
+
+  if (Status status = encoder.StartInformationFrame(address); !status.ok()) {
     return status;
   }
-  return Status::OK;
+  if (Status status = encoder.WriteData(payload); !status.ok()) {
+    return status;
+  }
+  return encoder.FinishFrame();
 }
 
 }  // namespace pw::hdlc_lite
diff --git a/pw_hdlc_lite/encoder_test.cc b/pw_hdlc_lite/encoder_test.cc
index 3b1d64e..72a66ce 100644
--- a/pw_hdlc_lite/encoder_test.cc
+++ b/pw_hdlc_lite/encoder_test.cc
@@ -19,173 +19,151 @@
 #include <cstddef>
 
 #include "gtest/gtest.h"
+#include "pw_bytes/array.h"
+#include "pw_hdlc_lite_private/protocol.h"
 #include "pw_stream/memory_stream.h"
 
 using std::byte;
 
-template <typename... Args>
-constexpr std::array<byte, sizeof...(Args)> MakeBytes(Args... args) noexcept {
-  return {static_cast<byte>(args)...};
-}
-
 namespace pw::hdlc_lite {
 namespace {
-// Size of the in-memory buffer to use for this test.
-constexpr size_t kSinkBufferSize = 15;
 
-TEST(Encoder, FrameFormatTest_1BytePayload) {
-  std::array<byte, kSinkBufferSize> memory_buffer;
-  stream::MemoryWriter memory_writer(memory_buffer);
+constexpr uint8_t kAddress = 0x7B;  // 123
+constexpr byte kControl = byte{0};
 
-  constexpr std::array<byte, 1> test_array = MakeBytes(0x41);
-  constexpr std::array<byte, 5> expected_array =
-      MakeBytes(0x7E, 0x41, 0x15, 0xB9, 0x7E);
+class WriteInfoFrame : public ::testing::Test {
+ protected:
+  WriteInfoFrame() : writer_(buffer_) {}
 
-  EXPECT_TRUE(EncodeAndWritePayload(test_array, memory_writer).ok());
-  EXPECT_EQ(memory_writer.bytes_written(), 5u);
-  EXPECT_EQ(std::memcmp(memory_writer.data(),
-                        expected_array.data(),
-                        memory_writer.bytes_written()),
-            0);
+  stream::MemoryWriter writer_;
+  std::array<byte, 32> buffer_;
+};
+
+#define EXPECT_ENCODER_WROTE(...)                                           \
+  do {                                                                      \
+    constexpr auto expected_data = (__VA_ARGS__);                           \
+    EXPECT_EQ(writer_.bytes_written(), expected_data.size());               \
+    EXPECT_EQ(                                                              \
+        std::memcmp(                                                        \
+            writer_.data(), expected_data.data(), writer_.bytes_written()), \
+        0);                                                                 \
+  } while (0)
+
+TEST_F(WriteInfoFrame, EmptyPayload) {
+  ASSERT_EQ(Status::Ok(),
+            WriteInformationFrame(kAddress, std::span<byte>(), writer_));
+  EXPECT_ENCODER_WROTE(
+      bytes::Concat(kFlag, kAddress, kControl, uint32_t{0x8D12B2C2}, kFlag));
 }
 
-TEST(Encoder, FrameFormatTest_EmptyPayload) {
-  std::array<byte, kSinkBufferSize> memory_buffer;
-  stream::MemoryWriter memory_writer(memory_buffer);
-
-  constexpr std::array<byte, 4> expected_array =
-      MakeBytes(0x7E, 0xFF, 0xFF, 0x7E);
-
-  EXPECT_TRUE(EncodeAndWritePayload(std::span<byte>(), memory_writer).ok());
-  EXPECT_EQ(memory_writer.bytes_written(), 4u);
-  EXPECT_EQ(std::memcmp(memory_writer.data(),
-                        expected_array.data(),
-                        memory_writer.bytes_written()),
-            0);
+TEST_F(WriteInfoFrame, OneBytePayload) {
+  ASSERT_EQ(Status::Ok(),
+            WriteInformationFrame(kAddress, bytes::String("A"), writer_));
+  EXPECT_ENCODER_WROTE(bytes::Concat(
+      kFlag, kAddress, kControl, 'A', uint32_t{0xA63E2FA5}, kFlag));
 }
 
-TEST(Encoder, FrameFormatTest_9BytePayload) {
-  std::array<byte, kSinkBufferSize> memory_buffer;
-  stream::MemoryWriter memory_writer(memory_buffer);
-
-  constexpr std::array<byte, 9> test_array =
-      MakeBytes(0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39);
-  constexpr std::array<byte, 13> expected_array = MakeBytes(0x7E,
-                                                            0x31,
-                                                            0x32,
-                                                            0x33,
-                                                            0x34,
-                                                            0x35,
-                                                            0x36,
-                                                            0x37,
-                                                            0x38,
-                                                            0x39,
-                                                            0xB1,
-                                                            0x29,
-                                                            0x7E);
-
-  EXPECT_TRUE(EncodeAndWritePayload(test_array, memory_writer).ok());
-  EXPECT_EQ(memory_writer.bytes_written(), 13u);
-  EXPECT_EQ(std::memcmp(memory_writer.data(),
-                        expected_array.data(),
-                        memory_writer.bytes_written()),
-            0);
+TEST_F(WriteInfoFrame, OneBytePayload_Escape0x7d) {
+  ASSERT_EQ(Status::Ok(),
+            WriteInformationFrame(kAddress, bytes::Array<0x7d>(), writer_));
+  EXPECT_ENCODER_WROTE(bytes::Concat(kFlag,
+                                     kAddress,
+                                     kControl,
+                                     kEscape,
+                                     byte{0x7d} ^ byte{0x20},
+                                     uint32_t{0x89515322},
+                                     kFlag));
 }
 
-TEST(Encoder, EncodingMultiplePayloads) {
-  std::array<byte, kSinkBufferSize> memory_buffer;
-  stream::MemoryWriter memory_writer(memory_buffer);
-
-  constexpr std::array<byte, 1> test_array = MakeBytes(0x41);
-  constexpr std::array<byte, 5> expected_array_1 =
-      MakeBytes(0x7E, 0x41, 0x15, 0xB9, 0x7E);
-  constexpr std::array<byte, 10> expected_array_2 =
-      MakeBytes(0x7E, 0x41, 0x15, 0xB9, 0x7E, 0x7E, 0x41, 0x15, 0xB9, 0x7E);
-
-  EXPECT_TRUE(EncodeAndWritePayload(test_array, memory_writer).ok());
-  EXPECT_EQ(memory_writer.bytes_written(), 5u);
-  EXPECT_EQ(std::memcmp(memory_writer.data(),
-                        expected_array_1.data(),
-                        memory_writer.bytes_written()),
-            0);
-
-  EXPECT_TRUE(EncodeAndWritePayload(test_array, memory_writer).ok());
-  EXPECT_EQ(memory_writer.bytes_written(), 10u);
-  EXPECT_EQ(std::memcmp(memory_writer.data(),
-                        expected_array_2.data(),
-                        memory_writer.bytes_written()),
-            0);
+TEST_F(WriteInfoFrame, OneBytePayload_Escape0x7E) {
+  ASSERT_EQ(Status::Ok(),
+            WriteInformationFrame(kAddress, bytes::Array<0x7e>(), writer_));
+  EXPECT_ENCODER_WROTE(bytes::Concat(kFlag,
+                                     kAddress,
+                                     kControl,
+                                     kEscape,
+                                     byte{0x7e} ^ byte{0x20},
+                                     uint32_t{0x10580298},
+                                     kFlag));
 }
 
-TEST(Encoder, EscapingTest_0x7D) {
-  std::array<byte, kSinkBufferSize> memory_buffer;
-  stream::MemoryWriter memory_writer(memory_buffer);
-
-  constexpr std::array<byte, 1> test_array = MakeBytes(0x7D);
-  constexpr std::array<byte, 6> expected_array =
-      MakeBytes(0x7E, 0x7D, 0x5D, 0xCA, 0x4E, 0x7E);
-
-  EXPECT_TRUE(EncodeAndWritePayload(test_array, memory_writer).ok());
-  EXPECT_EQ(memory_writer.bytes_written(), 6u);
-  EXPECT_EQ(std::memcmp(memory_writer.data(),
-                        expected_array.data(),
-                        memory_writer.bytes_written()),
-            0);
+TEST_F(WriteInfoFrame, AddressNeedsEscaping) {
+  ASSERT_EQ(Status::Ok(),
+            WriteInformationFrame(0x7d, bytes::String("A"), writer_));
+  EXPECT_ENCODER_WROTE(bytes::Concat(
+      kFlag, kEscape, byte{0x5d}, kControl, 'A', uint32_t{0xA2B35317}, kFlag));
 }
 
-TEST(Encoder, EscapingTest_0x7E) {
-  std::array<byte, kSinkBufferSize> memory_buffer;
-  stream::MemoryWriter memory_writer(memory_buffer);
+TEST_F(WriteInfoFrame, Crc32NeedsEscaping) {
+  ASSERT_EQ(Status::Ok(),
+            WriteInformationFrame(kAddress, bytes::String("abcdefg"), writer_));
 
-  constexpr std::array<byte, 1> test_array = MakeBytes(0x7E);
-  constexpr std::array<byte, 7> expected_array =
-      MakeBytes(0x7E, 0x7D, 0x5E, 0xA9, 0x7D, 0x5E, 0x7E);
-
-  EXPECT_TRUE(EncodeAndWritePayload(test_array, memory_writer).ok());
-  EXPECT_EQ(memory_writer.bytes_written(), 7u);
-  EXPECT_EQ(std::memcmp(memory_writer.data(),
-                        expected_array.data(),
-                        memory_writer.bytes_written()),
-            0);
+  // The CRC-32 is 0x38B9FC7E, so the 0x7E must be escaped.
+  constexpr auto expected_crc32 = bytes::Array<0x7d, 0x5e, 0xfc, 0xb9, 0x38>();
+  EXPECT_ENCODER_WROTE(bytes::Concat(kFlag,
+                                     kAddress,
+                                     kControl,
+                                     bytes::String("abcdefg"),
+                                     expected_crc32,
+                                     kFlag));
 }
 
-TEST(Encoder, EscapingTest_Mix) {
-  std::array<byte, kSinkBufferSize> memory_buffer;
-  stream::MemoryWriter memory_writer(memory_buffer);
-
-  constexpr std::array<byte, 7> test_array =
-      MakeBytes(0x7E, 0x7B, 0x61, 0x62, 0x63, 0x7D, 0x7E);
-  constexpr std::array<byte, 14> expected_array = MakeBytes(0x7E,
-                                                            0x7D,
-                                                            0x5E,
-                                                            0x7B,
-                                                            0x61,
-                                                            0x62,
-                                                            0x63,
-                                                            0x7D,
-                                                            0x5D,
-                                                            0x7D,
-                                                            0x5E,
-                                                            0x49,
-                                                            0xE5,
-                                                            0x7E);
-
-  EXPECT_TRUE(EncodeAndWritePayload(test_array, memory_writer).ok());
-  EXPECT_EQ(memory_writer.bytes_written(), 14u);
-  EXPECT_EQ(std::memcmp(memory_writer.data(),
-                        expected_array.data(),
-                        memory_writer.bytes_written()),
-            0);
+TEST_F(WriteInfoFrame, MultiplePayloads) {
+  ASSERT_EQ(Status::Ok(),
+            WriteInformationFrame(kAddress, bytes::String("ABC"), writer_));
+  ASSERT_EQ(Status::Ok(),
+            WriteInformationFrame(kAddress, bytes::String("DEF"), writer_));
+  EXPECT_ENCODER_WROTE(bytes::Concat(kFlag,
+                                     kAddress,
+                                     kControl,
+                                     bytes::String("ABC"),
+                                     uint32_t{0x14E2FC99},
+                                     kFlag,
+                                     kFlag,
+                                     kAddress,
+                                     kControl,
+                                     bytes::String("DEF"),
+                                     uint32_t{0x2D025C3A},
+                                     kFlag));
 }
 
-TEST(Encoder, WriterErrorTest) {
-  std::array<byte, kSinkBufferSize> memory_buffer;
-  stream::MemoryWriter memory_writer(memory_buffer);
+TEST_F(WriteInfoFrame, PayloadWithNoEscapes) {
+  ASSERT_EQ(Status::Ok(),
+            WriteInformationFrame(
+                kAddress, bytes::String("123456789012345678901234"), writer_));
 
-  constexpr std::array<byte, 12> test_array = MakeBytes(
-      0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40, 0x41);
+  // Fill the memory writer's buffer.
+  ASSERT_EQ(writer_.bytes_written(), buffer_.size());
 
-  EXPECT_FALSE(EncodeAndWritePayload(test_array, memory_writer).ok());
+  EXPECT_ENCODER_WROTE(bytes::Concat(kFlag,
+                                     kAddress,
+                                     kControl,
+                                     bytes::String("123456789012345678901234"),
+                                     uint32_t{0x50AA35EC},
+                                     kFlag));
+}
+
+TEST_F(WriteInfoFrame, PayloadWithMultipleEscapes) {
+  ASSERT_EQ(Status::Ok(),
+            WriteInformationFrame(
+                kAddress,
+                bytes::Array<0x7E, 0x7B, 0x61, 0x62, 0x63, 0x7D, 0x7E>(),
+                writer_));
+  EXPECT_ENCODER_WROTE(bytes::Concat(
+      kFlag,
+      kAddress,
+      kControl,
+      bytes::
+          Array<0x7D, 0x5E, 0x7B, 0x61, 0x62, 0x63, 0x7D, 0x5D, 0x7D, 0x5E>(),
+      uint32_t{0x1B8D505E},
+      kFlag));
+}
+
+TEST_F(WriteInfoFrame, WriterError) {
+  constexpr auto data = bytes::Initialized<sizeof(buffer_)>(0x7e);
+
+  EXPECT_EQ(Status::ResourceExhausted(),
+            WriteInformationFrame(kAddress, data, writer_));
 }
 
 }  // namespace
diff --git a/pw_hdlc_lite/public/pw_hdlc_lite/decoder.h b/pw_hdlc_lite/public/pw_hdlc_lite/decoder.h
new file mode 100644
index 0000000..d15cbe7
--- /dev/null
+++ b/pw_hdlc_lite/public/pw_hdlc_lite/decoder.h
@@ -0,0 +1,154 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <algorithm>
+#include <array>
+#include <cstddef>
+#include <cstring>
+#include <functional>  // std::invoke
+
+#include "pw_bytes/span.h"
+#include "pw_result/result.h"
+#include "pw_status/status.h"
+
+namespace pw::hdlc_lite {
+
+// Represents the contents of an HDLC frame -- the unescaped data between two
+// flag bytes. Instances of Frame are only created when a full, valid frame has
+// been read.
+//
+// For now, the Frame class assumes single-byte address and control fields and a
+// 32-bit frame check sequence (FCS).
+class Frame {
+ private:
+  static constexpr size_t kAddressSize = 1;
+  static constexpr size_t kControlSize = 1;
+  static constexpr size_t kFcsSize = sizeof(uint32_t);
+
+ public:
+  // The minimum size of a frame, excluding control bytes (flag or escape).
+  static constexpr size_t kMinSizeBytes =
+      kAddressSize + kControlSize + kFcsSize;
+
+  // Creates a Frame with the specified data. The data MUST be valid frame data
+  // with a verified frame check sequence.
+  explicit constexpr Frame(ConstByteSpan data) : frame_(data) {
+    // TODO(pwbug/246): Use PW_DASSERT when available.
+    // PW_DASSERT(data.size() >= kMinSizeBytes);
+  }
+
+  constexpr unsigned address() const {
+    return std::to_integer<unsigned>(frame_[0]);
+  }
+
+  constexpr std::byte control() const { return frame_[kAddressSize]; }
+
+  constexpr ConstByteSpan data() const {
+    return frame_.subspan(kAddressSize + kControlSize,
+                          frame_.size() - kMinSizeBytes);
+  }
+
+ private:
+  ConstByteSpan frame_;
+};
+
+// The Decoder class facilitates decoding of data frames using the HDLC-Lite
+// protocol, by returning packets as they are decoded and storing incomplete
+// data frames in a buffer.
+//
+// The Decoder class does not own the buffer it writes to. It can be used to
+// write bytes to any buffer. The DecoderBuffer template class, defined below,
+// allocates a buffer.
+class Decoder {
+ public:
+  constexpr Decoder(ByteSpan buffer)
+      : buffer_(buffer), current_frame_size_(0), state_(State::kInterFrame) {}
+
+  Decoder(const Decoder&) = delete;
+  Decoder& operator=(const Decoder&) = delete;
+
+  // Parses a single byte of an HDLC stream. Returns a Result with the complete
+  // frame if the byte completes a frame. The status is the following:
+  //
+  //     OK - A frame was successfully decoded. The Result contains the Frame,
+  //         which is invalidated by the next Process call.
+  //     UNAVAILABLE - No frame is available.
+  //     RESOURCE_EXHAUSTED - A frame completed, but it was too large to fit in
+  //         the decoder's buffer.
+  //     DATA_LOSS - A frame completed, but it was invalid. The frame was
+  //         incomplete or the frame check sequence verification failed.
+  //
+  Result<Frame> Process(std::byte b);
+
+  // Processes a span of data and calls the provided callback with each frame or
+  // error.
+  template <typename F, typename... Args>
+  void Process(ConstByteSpan data, F&& callback, Args&&... args) {
+    for (std::byte b : data) {
+      auto result = Process(b);
+      if (result.status() != Status::Unavailable()) {
+        std::invoke(
+            std::forward<F>(callback), std::forward<Args>(args)..., result);
+      }
+    }
+  }
+
+  // Returns the maximum size of the Decoder's frame buffer.
+  size_t max_size() const { return buffer_.size(); }
+
+  // Clears and resets the decoder.
+  void clear() {
+    current_frame_size_ = 0;
+    state_ = State::kInterFrame;
+  };
+
+ private:
+  // State enum class is used to make the Decoder a finite state machine.
+  enum class State {
+    kInterFrame,
+    kFrame,
+    kFrameEscape,
+  };
+
+  void AppendByte(std::byte new_byte);
+
+  Status CheckFrame() const;
+
+  bool VerifyFrameCheckSequence() const;
+
+  const ByteSpan buffer_;
+
+  size_t current_frame_size_;
+
+  State state_;
+};
+
+// DecoderBuffers declare a buffer along with a Decoder.
+template <size_t size_bytes>
+class DecoderBuffer : public Decoder {
+ public:
+  DecoderBuffer() : Decoder(frame_buffer_) {}
+
+  // Returns the maximum length of the bytes that can be inserted in the bytes
+  // buffer.
+  static constexpr size_t max_size() { return size_bytes; }
+
+ private:
+  static_assert(size_bytes >= Frame::kMinSizeBytes);
+
+  std::array<std::byte, size_bytes> frame_buffer_;
+};
+
+}  // namespace pw::hdlc_lite
diff --git a/pw_hdlc_lite/public/pw_hdlc_lite/encoder.h b/pw_hdlc_lite/public/pw_hdlc_lite/encoder.h
index d06c26b..ba2388c 100644
--- a/pw_hdlc_lite/public/pw_hdlc_lite/encoder.h
+++ b/pw_hdlc_lite/public/pw_hdlc_lite/encoder.h
@@ -19,11 +19,18 @@
 
 namespace pw::hdlc_lite {
 
-// Function used to encode 0-kMaxPayloadSize bytes and write it to our
-// pw::stream::writer. This function is safe to call multiple times in
-// succession since it automatically writes a delimiter byte at the
-// beginning and the end. This enables successive encoding of multiple
-// data frames.
-Status EncodeAndWritePayload(ConstByteSpan payload, stream::Writer& writer);
+// Writes an HDLC information frame (I-frame) to the provided writer. The frame
+// contains the following:
+//
+//   - HDLC flag byte (0x7e)
+//   - Address
+//   - Control byte (fixed at 0; sequence numbers are not used currently).
+//   - Data (0 or more bytes)
+//   - Frame check sequence (CRC-32)
+//   - HDLC flag byte (0x7e)
+//
+Status WriteInformationFrame(uint8_t address,
+                             ConstByteSpan data,
+                             stream::Writer& writer);
 
 }  // namespace pw::hdlc_lite
diff --git a/pw_hdlc_lite/public/pw_hdlc_lite/rpc_channel.h b/pw_hdlc_lite/public/pw_hdlc_lite/rpc_channel.h
new file mode 100644
index 0000000..4f0e401
--- /dev/null
+++ b/pw_hdlc_lite/public/pw_hdlc_lite/rpc_channel.h
@@ -0,0 +1,76 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <array>
+#include <span>
+
+#include "pw_hdlc_lite/encoder.h"
+#include "pw_rpc/channel.h"
+#include "pw_stream/stream.h"
+
+namespace pw::hdlc_lite {
+
+// Custom HDLC ChannelOutput class to write and read data through serial using
+// the HDLC-Lite protocol.
+class RpcChannelOutput : public rpc::ChannelOutput {
+ public:
+  // The RpcChannelOutput class does not own the buffer it uses to store the
+  // protobuf bytes. This buffer is specified at the time of creation along with
+  // a writer object to which will be used to write and send the bytes.
+  constexpr RpcChannelOutput(stream::Writer& writer,
+                             std::span<std::byte> buffer,
+                             uint8_t address,
+                             const char* channel_name)
+      : ChannelOutput(channel_name),
+        writer_(writer),
+        buffer_(buffer),
+        address_(address) {}
+
+  std::span<std::byte> AcquireBuffer() override { return buffer_; }
+
+  Status SendAndReleaseBuffer(size_t size) override {
+    return hdlc_lite::WriteInformationFrame(
+        address_, buffer_.first(size), writer_);
+  }
+
+ private:
+  stream::Writer& writer_;
+  const std::span<std::byte> buffer_;
+  const uint8_t address_;
+};
+
+// RpcChannelOutput with its own buffer.
+template <size_t buffer_size>
+class RpcChannelOutputBuffer : public rpc::ChannelOutput {
+ public:
+  constexpr RpcChannelOutputBuffer(stream::Writer& writer,
+                                   uint8_t address,
+                                   const char* channel_name)
+      : ChannelOutput(channel_name), writer_(writer), address_(address) {}
+
+  std::span<std::byte> AcquireBuffer() override { return buffer_; }
+
+  Status SendAndReleaseBuffer(size_t size) override {
+    return hdlc_lite::WriteInformationFrame(
+        address_, std::span(buffer_.data(), size), writer_);
+  }
+
+ private:
+  stream::Writer& writer_;
+  std::array<std::byte, buffer_size> buffer_;
+  const uint8_t address_;
+};
+
+}  // namespace pw::hdlc_lite
diff --git a/pw_hdlc_lite/public/pw_hdlc_lite/rpc_packets.h b/pw_hdlc_lite/public/pw_hdlc_lite/rpc_packets.h
new file mode 100644
index 0000000..de2a191
--- /dev/null
+++ b/pw_hdlc_lite/public/pw_hdlc_lite/rpc_packets.h
@@ -0,0 +1,34 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <cstdint>
+
+#include "pw_hdlc_lite/decoder.h"
+#include "pw_rpc/channel.h"
+#include "pw_rpc/server.h"
+#include "pw_status/status.h"
+
+namespace pw::hdlc_lite {
+
+inline constexpr uint8_t kDefaultRpcAddress = 'R';
+
+// Reads HDLC frames with sys_io::ReadByte, using decode_buffer to store frames.
+// HDLC frames sent to rpc_address are passed to the RPC server.
+Status ReadAndProcessPackets(rpc::Server& server,
+                             rpc::ChannelOutput& output,
+                             std::span<std::byte> decode_buffer,
+                             unsigned rpc_address = kDefaultRpcAddress);
+
+}  // namespace pw::hdlc_lite
diff --git a/pw_hdlc_lite/public/pw_hdlc_lite/sys_io_stream.h b/pw_hdlc_lite/public/pw_hdlc_lite/sys_io_stream.h
index 32ddbb6..219d8eb 100644
--- a/pw_hdlc_lite/public/pw_hdlc_lite/sys_io_stream.h
+++ b/pw_hdlc_lite/public/pw_hdlc_lite/sys_io_stream.h
@@ -15,6 +15,7 @@
 
 #include <array>
 #include <cstddef>
+#include <limits>
 #include <span>
 
 #include "pw_stream/stream.h"
@@ -22,18 +23,16 @@
 
 namespace pw::stream {
 
-class SerialWriter : public Writer {
+class SysIoWriter : public Writer {
  public:
-  size_t bytes_written() const { return bytes_written_; }
-
- private:
-  // Implementation for writing data to this stream.
-  Status DoWrite(std::span<const std::byte> data) override {
-    bytes_written_ += data.size_bytes();
-    return pw::sys_io::WriteBytes(data).status();
+  size_t ConservativeWriteLimit() const override {
+    return std::numeric_limits<size_t>::max();
   }
 
-  size_t bytes_written_ = 0;
+ private:
+  Status DoWrite(std::span<const std::byte> data) override {
+    return pw::sys_io::WriteBytes(data).status();
+  }
 };
 
 }  // namespace pw::stream
diff --git a/pw_hdlc_lite/pw_hdlc_lite_private/protocol.h b/pw_hdlc_lite/pw_hdlc_lite_private/protocol.h
new file mode 100644
index 0000000..25159dbc
--- /dev/null
+++ b/pw_hdlc_lite/pw_hdlc_lite_private/protocol.h
@@ -0,0 +1,32 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <cstddef>
+
+namespace pw::hdlc_lite {
+
+inline constexpr std::byte kFlag = std::byte{0x7E};
+inline constexpr std::byte kEscape = std::byte{0x7D};
+
+inline constexpr std::array<std::byte, 2> kEscapedFlag = {kEscape,
+                                                          std::byte{0x5E}};
+inline constexpr std::array<std::byte, 2> kEscapedEscape = {kEscape,
+                                                            std::byte{0x5D}};
+
+constexpr bool NeedsEscaping(std::byte b) {
+  return (b == kFlag || b == kEscape);
+}
+
+}  // namespace pw::hdlc_lite
diff --git a/pw_hdlc_lite/py/BUILD.gn b/pw_hdlc_lite/py/BUILD.gn
new file mode 100644
index 0000000..1b18011
--- /dev/null
+++ b/pw_hdlc_lite/py/BUILD.gn
@@ -0,0 +1,37 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_hdlc_lite/__init__.py",
+    "pw_hdlc_lite/decode.py",
+    "pw_hdlc_lite/encode.py",
+    "pw_hdlc_lite/protocol.py",
+    "pw_hdlc_lite/rpc.py",
+    "pw_hdlc_lite/rpc_console.py",
+  ]
+  tests = [
+    "decode_test.py",
+    "encode_test.py",
+  ]
+  python_deps = [
+    "$dir_pw_protobuf_compiler/py",
+    "$dir_pw_rpc/py",
+  ]
+}
diff --git a/pw_hdlc_lite/py/decode_test.py b/pw_hdlc_lite/py/decode_test.py
new file mode 100755
index 0000000..7924632
--- /dev/null
+++ b/pw_hdlc_lite/py/decode_test.py
@@ -0,0 +1,273 @@
+#!/usr/bin/env python
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Contains the Python decoder tests and generates C++ decoder tests."""
+
+from typing import Iterator, List, NamedTuple, Tuple
+import unittest
+
+from pw_build.generated_tests import Context, PyTest, TestGenerator, GroupOrTest
+from pw_build.generated_tests import parse_test_generation_args
+from pw_hdlc_lite.decode import Frame, FrameDecoder, FrameStatus, NO_ADDRESS
+from pw_hdlc_lite.protocol import frame_check_sequence as fcs
+
+
+def _encode(address: int, control: int, data: bytes) -> bytes:
+    frame = bytearray([address, control]) + data
+    frame += fcs(frame)
+    frame = frame.replace(b'\x7d', b'\x7d\x5d')
+    frame = frame.replace(b'\x7e', b'\x7d\x5e')
+    return b''.join([b'\x7e', frame, b'\x7e'])
+
+
+class Expected(NamedTuple):
+    address: int
+    control: bytes
+    data: bytes
+    status: FrameStatus = FrameStatus.OK
+
+    def __eq__(self, other) -> bool:
+        """Define == so an Expected and a Frame can be compared."""
+        return (self.address == other.address and self.control == other.control
+                and self.data == other.data and self.status is other.status)
+
+
+_PARTIAL = fcs(b'\x0ACmsg\x5e')
+_ESCAPED_FLAG_TEST_CASE = (
+    b'\x7e\x0ACmsg\x7d\x7e' + _PARTIAL + b'\x7e',
+    [
+        Expected(0xA, b'C', b'', FrameStatus.INCOMPLETE),
+        Expected(_PARTIAL[0], _PARTIAL[1:2], b'', FrameStatus.INCOMPLETE),
+    ],
+)
+
+TEST_CASES: Tuple[GroupOrTest[Tuple[bytes, List[Expected]]], ...] = (
+    'Empty payload',
+    (_encode(0, 0, b''), [Expected(0, b'\0', b'')]),
+    (_encode(55, 0x99, b''), [Expected(55, b'\x99', b'')]),
+    (_encode(55, 0x99, b'') * 3, [Expected(55, b'\x99', b'')] * 3),
+    'Simple one-byte payload',
+    (_encode(0, 0, b'\0'), [Expected(0, b'\0', b'\0')]),
+    (_encode(123, 0, b'A'), [Expected(123, b'\0', b'A')]),
+    'Simple multi-byte payload',
+    (_encode(0, 0, b'Hello, world!'), [Expected(0, b'\0', b'Hello, world!')]),
+    (_encode(123, 0, b'\0\0\1\0\0'), [Expected(123, b'\0', b'\0\0\1\0\0')]),
+    'Escaped one-byte payload',
+    (_encode(1, 2, b'\x7e'), [Expected(1, b'\2', b'\x7e')]),
+    (_encode(1, 2, b'\x7d'), [Expected(1, b'\2', b'\x7d')]),
+    (_encode(1, 2, b'\x7e') + _encode(1, 2, b'\x7d'),
+     [Expected(1, b'\2', b'\x7e'),
+      Expected(1, b'\2', b'\x7d')]),
+    'Escaped address',
+    (_encode(0x7e, 0, b'A'), [Expected(0x7e, b'\0', b'A')]),
+    (_encode(0x7d, 0, b'B'), [Expected(0x7d, b'\0', b'B')]),
+    'Escaped control',
+    (_encode(0, 0x7e, b'C'), [Expected(0, b'\x7e', b'C')]),
+    (_encode(0, 0x7d, b'D'), [Expected(0, b'\x7d', b'D')]),
+    'Escaped address and control',
+    (_encode(0x7e, 0x7d, b'E'), [Expected(0x7e, b'\x7d', b'E')]),
+    (_encode(0x7d, 0x7e, b'F'), [Expected(0x7d, b'\x7e', b'F')]),
+    (_encode(0x7e, 0x7e, b'\x7e'), [Expected(0x7e, b'\x7e', b'\x7e')]),
+    'Multiple frames separated by single flag',
+    (_encode(0, 0, b'A')[:-1] + _encode(1, 2, b'123'),
+     [Expected(0, b'\0', b'A'),
+      Expected(1, b'\2', b'123')]),
+    (_encode(0xff, 0, b'Yo')[:-1] * 3 + b'\x7e',
+     [Expected(0xff, b'\0', b'Yo')] * 3),
+    'Ignore empty frames',
+    (b'\x7e\x7e', []),
+    (b'\x7e' * 10, []),
+    (b'\x7e\x7e' + _encode(1, 2, b'3') + b'\x7e' * 5,
+     [Expected(1, b'\2', b'3')]),
+    (b'\x7e' * 10 + _encode(1, 2, b':O') + b'\x7e' * 3 + _encode(3, 4, b':P'),
+     [Expected(1, b'\2', b':O'),
+      Expected(3, b'\4', b':P')]),
+    'Cannot escape flag',
+    (b'\x7e\xAA\x7d\x7e\xab\x00Hello' + fcs(b'\xab\0Hello') + b'\x7e', [
+        Expected(0xAA, b'', b'', FrameStatus.INCOMPLETE),
+        Expected(0xab, b'\0', b'Hello'),
+    ]),
+    _ESCAPED_FLAG_TEST_CASE,
+    'Frame too short',
+    (b'\x7e1\x7e', [Expected(ord('1'), b'', b'', FrameStatus.INCOMPLETE)]),
+    (b'\x7e12\x7e', [Expected(ord('1'), b'2', b'', FrameStatus.INCOMPLETE)]),
+    (b'\x7e12345\x7e', [Expected(ord('1'), b'2', b'',
+                                 FrameStatus.INCOMPLETE)]),
+    'Incorrect frame check sequence',
+    (b'\x7e123456\x7e',
+     [Expected(ord('1'), b'2', b'', FrameStatus.FCS_MISMATCH)]),
+    (b'\x7e\1\2msg\xff\xff\xff\xff\x7e',
+     [Expected(0x1, b'\2', b'msg', FrameStatus.FCS_MISMATCH)]),
+    (_encode(0xA, 0xB, b'???')[:-2] + _encode(1, 2, b'def'), [
+        Expected(0xA, b'\x0B', b'??', FrameStatus.FCS_MISMATCH),
+        Expected(1, b'\2', b'def'),
+    ]),
+    'Invalid escape in address',
+    (b'\x7e\x7d\x7d\0' + fcs(b'\x5d\0') + b'\x7e',
+     [Expected(0,
+               fcs(b'\x5d\0')[0:1], b'', FrameStatus.INVALID_ESCAPE)]),
+    'Invalid escape in control',
+    (b'\x7e\0\x7d\x7d' + fcs(b'\0\x5d') + b'\x7e',
+     [Expected(0,
+               fcs(b'\0\x5d')[0:1], b'', FrameStatus.INVALID_ESCAPE)]),
+    'Invalid escape in data',
+    (b'\x7e\0\1\x7d\x7d' + fcs(b'\0\1\x5d') + b'\x7e',
+     [Expected(0, b'\1', b'', FrameStatus.INVALID_ESCAPE)]),
+    'Frame ends with escape',
+    (b'\x7e\x7d\x7e', [Expected(NO_ADDRESS, b'', b'',
+                                FrameStatus.INCOMPLETE)]),
+    (b'\x7e\1\x7d\x7e', [Expected(1, b'', b'', FrameStatus.INCOMPLETE)]),
+    (b'\x7e\1\2abc\x7d\x7e', [Expected(1, b'\2', b'',
+                                       FrameStatus.INCOMPLETE)]),
+    (b'\x7e\1\2abcd\x7d\x7e',
+     [Expected(1, b'\2', b'', FrameStatus.INCOMPLETE)]),
+    (b'\x7e\1\2abcd1234\x7d\x7e',
+     [Expected(1, b'\2', b'abcd', FrameStatus.INCOMPLETE)]),
+    'Inter-frame data is only escapes',
+    (b'\x7e\x7d\x7e\x7d\x7e', [
+        Expected(NO_ADDRESS, b'', b'', FrameStatus.INCOMPLETE),
+        Expected(NO_ADDRESS, b'', b'', FrameStatus.INCOMPLETE),
+    ]),
+    (b'\x7e\x7d\x7d\x7e\x7d\x7d\x7e', [
+        Expected(NO_ADDRESS, b'', b'', FrameStatus.INVALID_ESCAPE),
+        Expected(NO_ADDRESS, b'', b'', FrameStatus.INVALID_ESCAPE),
+    ]),
+    'Data before first flag',
+    (b'\0\1' + fcs(b'\0\1'), []),
+    (b'\0\1' + fcs(b'\0\1') + b'\x7e',
+     [Expected(0, b'\1', b'', FrameStatus.INCOMPLETE)]),
+    'No frames emitted until flag',
+    (_encode(1, 2, b'3')[:-1], []),
+    (b'\x7e' + _encode(1, 2, b'3')[1:-1] * 2, []),
+)  # yapf: disable
+# Formatting for the above tuple is very slow, so disable yapf.
+
+_TESTS = TestGenerator(TEST_CASES)
+
+
+def _expected(frames: List[Frame]) -> Iterator[str]:
+    for i, frame in enumerate(frames, 1):
+        if frame.ok():
+            yield f'      Frame(kDecodedFrame{i:02}),'
+        else:
+            yield f'      Status::DATA_LOSS,  // Frame {i}'
+
+
+_CPP_HEADER = """\
+#include "pw_hdlc_lite/decoder.h"
+
+#include <array>
+#include <cstddef>
+#include <variant>
+
+#include "gtest/gtest.h"
+#include "pw_bytes/array.h"
+
+namespace pw::hdlc_lite {
+namespace {
+"""
+
+_CPP_FOOTER = """\
+}  // namespace
+}  // namespace pw::hdlc_lite"""
+
+
+def _cpp_test(ctx: Context) -> Iterator[str]:
+    """Generates a C++ test for the provided test data."""
+    data, _ = ctx.test_case
+    frames = list(FrameDecoder().process(data))
+    data_bytes = ''.join(rf'\x{byte:02x}' for byte in data)
+
+    yield f'TEST(Decoder, {ctx.cc_name()}) {{'
+    yield f'  static constexpr auto kData = bytes::String("{data_bytes}");\n'
+
+    for i, frame in enumerate(frames, 1):
+        if frame.status is FrameStatus.OK:
+            frame_bytes = ''.join(rf'\x{byte:02x}' for byte in frame.raw)
+            yield (f'  static constexpr auto kDecodedFrame{i:02} = '
+                   f'bytes::String("{frame_bytes}");')
+        else:
+            yield f'  // Frame {i}: {frame.status.value}'
+
+    yield ''
+
+    expected = '\n'.join(_expected(frames)) or '      // No frames'
+    decoder_size = max(len(data), 8)  # Make sure large enough for a frame
+
+    yield f"""\
+  DecoderBuffer<{decoder_size}> decoder;
+
+  static constexpr std::array<std::variant<Frame, Status>, {len(frames)}> kExpected = {{
+{expected}
+  }};
+
+  size_t decoded_frames = 0;
+
+  decoder.Process(kData, [&](const Result<Frame>& result) {{
+    ASSERT_LT(decoded_frames++, kExpected.size());
+    auto& expected = kExpected[decoded_frames - 1];
+
+    if (std::holds_alternative<Status>(expected)) {{
+      EXPECT_EQ(Status::DATA_LOSS, result.status());
+    }} else {{
+      ASSERT_EQ(Status::OK, result.status());
+
+      const Frame& decoded_frame = result.value();
+      const Frame& expected_frame = std::get<Frame>(expected);
+      EXPECT_EQ(expected_frame.address(), decoded_frame.address());
+      EXPECT_EQ(expected_frame.control(), decoded_frame.control());
+      ASSERT_EQ(expected_frame.data().size(), decoded_frame.data().size());
+      EXPECT_EQ(std::memcmp(expected_frame.data().data(),
+                            decoded_frame.data().data(),
+                            expected_frame.data().size()),
+                0);
+    }}
+  }});
+
+  EXPECT_EQ(decoded_frames, kExpected.size());
+}}"""
+
+
+def _define_py_test(ctx: Context) -> PyTest:
+    data, expected_frames = ctx.test_case
+
+    def test(self) -> None:
+        # Decode in one call
+        self.assertEqual(expected_frames,
+                         list(FrameDecoder().process(data)),
+                         msg=f'{ctx.group}: {data!r}')
+
+        # Decode byte-by-byte
+        decoder = FrameDecoder()
+        decoded_frames: List[Frame] = []
+        for i in range(len(data)):
+            decoded_frames += decoder.process(data[i:i + 1])
+
+        self.assertEqual(expected_frames,
+                         decoded_frames,
+                         msg=f'{ctx.group} (byte-by-byte): {data!r}')
+
+    return test
+
+
+# Class that tests all cases in TEST_CASES.
+DecoderTest = _TESTS.python_tests('DecoderTest', _define_py_test)
+
+if __name__ == '__main__':
+    args = parse_test_generation_args()
+    if args.generate_cc_test:
+        _TESTS.cc_tests(args.generate_cc_test, _cpp_test, _CPP_HEADER,
+                        _CPP_FOOTER)
+    else:
+        unittest.main()
diff --git a/pw_hdlc_lite/py/encode_test.py b/pw_hdlc_lite/py/encode_test.py
new file mode 100755
index 0000000..b0e68ab
--- /dev/null
+++ b/pw_hdlc_lite/py/encode_test.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python3
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Tests encoding HDLC frames."""
+
+import unittest
+
+from pw_hdlc_lite import encode
+from pw_hdlc_lite import protocol
+from pw_hdlc_lite.protocol import frame_check_sequence as _fcs
+
+FLAG = bytes([protocol.FLAG])
+
+
+def _with_fcs(data: bytes) -> bytes:
+    return data + _fcs(data)
+
+
+class TestEncodeInformationFrame(unittest.TestCase):
+    """Tests Encoding bytes with different arguments using a custom serial."""
+    def test_empty(self):
+        self.assertEqual(encode.information_frame(0, b''),
+                         FLAG + _with_fcs(b'\0\0') + FLAG)
+        self.assertEqual(encode.information_frame(0x1a, b''),
+                         FLAG + _with_fcs(b'\x1a\0') + FLAG)
+
+    def test_1byte(self):
+        self.assertEqual(encode.information_frame(0, b'A'),
+                         FLAG + _with_fcs(b'\0\0A') + FLAG)
+
+    def test_multibyte(self):
+        self.assertEqual(encode.information_frame(0, b'123456789'),
+                         FLAG + _with_fcs(b'\x00\x00123456789') + FLAG)
+
+    def test_escape(self):
+        self.assertEqual(
+            encode.information_frame(0x7e, b'\x7d'),
+            FLAG + b'\x7d\x5e\x00\x7d\x5d' + _fcs(b'\x7e\x00\x7d') + FLAG)
+        self.assertEqual(
+            encode.information_frame(0x7d, b'A\x7e\x7dBC'),
+            FLAG + b'\x7d\x5d\x00A\x7d\x5e\x7d\x5dBC' +
+            _fcs(b'\x7d\x00A\x7e\x7dBC') + FLAG)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/pw_hdlc_lite/py/pw_hdlc_lite/__init__.py b/pw_hdlc_lite/py/pw_hdlc_lite/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_hdlc_lite/py/pw_hdlc_lite/__init__.py
diff --git a/pw_hdlc_lite/py/pw_hdlc_lite/decode.py b/pw_hdlc_lite/py/pw_hdlc_lite/decode.py
new file mode 100644
index 0000000..70c63ae
--- /dev/null
+++ b/pw_hdlc_lite/py/pw_hdlc_lite/decode.py
@@ -0,0 +1,202 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Decoder class for decoding bytes using HDLC-Lite protocol"""
+
+import enum
+import logging
+from typing import Iterator, NamedTuple, Optional, Tuple
+import zlib
+
+from pw_hdlc_lite import protocol
+
+_LOG = logging.getLogger('pw_hdlc_lite')
+
+
+class FrameStatus(enum.Enum):
+    """Indicates that an error occurred."""
+    OK = 'OK'
+    FCS_MISMATCH = 'frame check sequence failure'
+    INCOMPLETE = 'incomplete frame'
+    INVALID_ESCAPE = 'invalid escape character'
+
+
+_MIN_FRAME_SIZE = 6  # 1 B address + 1 B control + 4 B CRC-32
+
+NO_ADDRESS = -1
+
+
+class Frame(NamedTuple):
+    """Represents an HDLC frame."""
+
+    # All bytes in the frame (address, control, information, FCS)
+    raw: bytes
+
+    # Whether parsing the frame succeeded.
+    status: FrameStatus = FrameStatus.OK
+
+    @property
+    def address(self) -> int:
+        """The frame's address field (assumes only one byte for now)."""
+        return self.raw[0] if self.raw else NO_ADDRESS
+
+    @property
+    def control(self) -> bytes:
+        """The control byte (assumes only one byte for now)."""
+        return self.raw[1:2] if len(self.raw) >= 2 else b''
+
+    @property
+    def data(self) -> bytes:
+        """The information field in the frame."""
+        return self.raw[2:-4] if len(self.raw) >= _MIN_FRAME_SIZE else b''
+
+    def ok(self) -> bool:
+        """True if this represents a valid frame.
+
+        If false, then parsing failed. The status is set to indicate what type
+        of error occurred, and the data field contains all bytes parsed from the
+        frame (including bytes parsed as address or control bytes).
+        """
+        return self.status is FrameStatus.OK
+
+
+class _BaseFrameState:
+    """Base class for all frame parsing states."""
+    def __init__(self, data: bytearray):
+        self._data = data  # All data seen in the current frame
+        self._escape_next = False
+
+    def handle_flag(self) -> Tuple['_BaseFrameState', Optional[Frame]]:
+        """Handles an HDLC flag character (0x7e).
+
+        The HDLC flag is always interpreted as the start of a new frame.
+
+        Returns:
+            (next state, optional frame or error)
+        """
+        # If there is data or an escape character, the frame is incomplete.
+        if self._escape_next or self._data:
+            return _AddressState(), Frame(bytes(self._data),
+                                          FrameStatus.INCOMPLETE)
+
+        return _AddressState(), None
+
+    def handle_escape(self) -> '_BaseFrameState':
+        """Handles an HDLC escape character (0x7d); returns the next state."""
+        if self._escape_next:
+            # If two escapes occur in a row, the frame is invalid.
+            return _InterframeState(self._data, FrameStatus.INVALID_ESCAPE)
+
+        self._escape_next = True
+        return self
+
+    def handle_byte(self, byte: int) -> '_BaseFrameState':
+        """Handles a byte, which may have been escaped; returns next state."""
+        self._data.append(protocol.escape(byte) if self._escape_next else byte)
+        self._escape_next = False
+        return self
+
+
+class _InterframeState(_BaseFrameState):
+    """Not currently in a frame; any data is discarded."""
+    def __init__(self, data: bytearray, error: FrameStatus):
+        super().__init__(data)
+        self._error = error
+
+    def handle_flag(self) -> Tuple[_BaseFrameState, Optional[Frame]]:
+        # If this state was entered due to an error, report that error before
+        # starting a new frame.
+        if self._error is not FrameStatus.OK:
+            return _AddressState(), Frame(bytes(self._data), self._error)
+
+        return super().handle_flag()
+
+
+class _AddressState(_BaseFrameState):
+    """First field in a frame: the address."""
+    def __init__(self):
+        super().__init__(bytearray())
+
+    def handle_byte(self, byte: int) -> _BaseFrameState:
+        super().handle_byte(byte)
+        # Only handle single-byte addresses for now.
+        return _ControlState(self._data)
+
+
+class _ControlState(_BaseFrameState):
+    """Second field in a frame: control."""
+    def handle_byte(self, byte: int) -> _BaseFrameState:
+        super().handle_byte(byte)
+        # Only handle a single control byte for now.
+        return _DataState(self._data)
+
+
+class _DataState(_BaseFrameState):
+    """The information field in a frame."""
+    def handle_flag(self) -> Tuple[_BaseFrameState, Frame]:
+        return _AddressState(), Frame(bytes(self._data), self._check_frame())
+
+    def _check_frame(self) -> FrameStatus:
+        # If the last character was an escape, assume bytes are missing.
+        if self._escape_next or len(self._data) < _MIN_FRAME_SIZE:
+            return FrameStatus.INCOMPLETE
+
+        frame_crc = int.from_bytes(self._data[-4:], 'little')
+        if zlib.crc32(self._data[:-4]) != frame_crc:
+            return FrameStatus.FCS_MISMATCH
+
+        return FrameStatus.OK
+
+
+class FrameDecoder:
+    """Decodes one or more HDLC frames from a stream of data."""
+    def __init__(self):
+        self._data = bytearray()
+        self._unescape_next_byte_flag = False
+        self._state = _InterframeState(bytearray(), FrameStatus.OK)
+
+    def process(self, data: bytes) -> Iterator[Frame]:
+        """Decodes and yields HDLC frames, including corrupt frames.
+
+        The ok() method on Frame indicates whether it is valid or represents a
+        frame parsing error.
+
+        Yields:
+          Frames, which may be valid (frame.ok()) or corrupt (!frame.ok())
+        """
+        for byte in data:
+            frame = self._process_byte(byte)
+            if frame:
+                yield frame
+
+    def process_valid_frames(self, data: bytes) -> Iterator[Frame]:
+        """Decodes and yields valid HDLC frames, logging any errors."""
+        for frame in self.process(data):
+            if frame.ok():
+                yield frame
+            else:
+                _LOG.warning('Failed to decode frame: %s; discarded %d bytes',
+                             frame.status.value, len(frame.data))
+                _LOG.debug('Discarded data: %s', frame.data)
+
+    def _process_byte(self, byte: int) -> Optional[Frame]:
+        if byte == protocol.FLAG:
+            self._state, frame = self._state.handle_flag()
+            return frame
+
+        if byte == protocol.ESCAPE:
+            self._state = self._state.handle_escape()
+        else:
+            self._state = self._state.handle_byte(byte)
+
+        return None
diff --git a/pw_hdlc_lite/py/pw_hdlc_lite/encode.py b/pw_hdlc_lite/py/pw_hdlc_lite/encode.py
new file mode 100644
index 0000000..9db8a30
--- /dev/null
+++ b/pw_hdlc_lite/py/pw_hdlc_lite/encode.py
@@ -0,0 +1,29 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""The encode module supports encoding HDLC frames."""
+
+from pw_hdlc_lite import protocol
+
+_ESCAPE_BYTE = bytes([protocol.ESCAPE])
+_FLAG_BYTE = bytes([protocol.FLAG])
+_CONTROL = 0  # Currently, hard-coded to 0; no sequence numbers are used
+
+
+def information_frame(address: int, data: bytes) -> bytes:
+    """Encodes an HDLC I-frame with a CRC-32 frame check sequence."""
+    frame = bytearray([address, _CONTROL]) + data
+    frame += protocol.frame_check_sequence(frame)
+    frame = frame.replace(_ESCAPE_BYTE, b'\x7d\x5d')
+    frame = frame.replace(_FLAG_BYTE, b'\x7d\x5e')
+    return b''.join([_FLAG_BYTE, frame, _FLAG_BYTE])
diff --git a/pw_hdlc_lite/py/pw_hdlc_lite/protocol.py b/pw_hdlc_lite/py/pw_hdlc_lite/protocol.py
new file mode 100644
index 0000000..4f9098a
--- /dev/null
+++ b/pw_hdlc_lite/py/pw_hdlc_lite/protocol.py
@@ -0,0 +1,31 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Module for low-level HDLC protocol features."""
+
+import zlib
+
+# Special flag character for delimiting HDLC frames.
+FLAG = 0x7E
+
+# Special character for escaping other special characters in a frame.
+ESCAPE = 0x7D
+
+
+def escape(byte: int) -> int:
+    """Escapes or unescapes a byte, which should have been preceeded by 0x7d."""
+    return byte ^ 0x20
+
+
+def frame_check_sequence(data: bytes) -> bytes:
+    return zlib.crc32(data).to_bytes(4, 'little')
diff --git a/pw_hdlc_lite/py/pw_hdlc_lite/py.typed b/pw_hdlc_lite/py/pw_hdlc_lite/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_hdlc_lite/py/pw_hdlc_lite/py.typed
diff --git a/pw_hdlc_lite/py/pw_hdlc_lite/rpc.py b/pw_hdlc_lite/py/pw_hdlc_lite/rpc.py
new file mode 100644
index 0000000..57b74b2
--- /dev/null
+++ b/pw_hdlc_lite/py/pw_hdlc_lite/rpc.py
@@ -0,0 +1,138 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Utilities for using HDLC with pw_rpc."""
+
+import logging
+from pathlib import Path
+import sys
+import threading
+import time
+from types import ModuleType
+from typing import Any, BinaryIO, Callable, Iterable, List, NoReturn, Union
+
+from pw_hdlc_lite.decode import FrameDecoder
+from pw_hdlc_lite import encode
+import pw_rpc
+from pw_rpc import callback_client
+from pw_protobuf_compiler import python_protos
+
+_LOG = logging.getLogger(__name__)
+
+STDOUT_ADDRESS = 1
+DEFAULT_ADDRESS = ord('R')
+
+
+def channel_output(writer: Callable[[bytes], Any],
+                   address: int = DEFAULT_ADDRESS,
+                   delay_s: float = 0) -> Callable[[bytes], None]:
+    """Returns a function that can be used as a channel output for pw_rpc."""
+
+    if delay_s:
+
+        def slow_write(data: bytes) -> None:
+            """Slows down writes in case unbuffered serial is in use."""
+            for byte in data:
+                time.sleep(delay_s)
+                writer(bytes([byte]))
+
+        return lambda data: slow_write(encode.information_frame(address, data))
+
+    return lambda data: writer(encode.information_frame(address, data))
+
+
+def read_and_process_data(rpc_client: pw_rpc.Client,
+                          device: BinaryIO,
+                          output: Callable[[bytes], Any],
+                          rpc_address: int = DEFAULT_ADDRESS) -> NoReturn:
+    """Reads HDLC frames from the device and passes them to the RPC client."""
+    decoder = FrameDecoder()
+
+    while True:
+        byte = device.read()
+        for frame in decoder.process_valid_frames(byte):
+            if not frame.ok():
+                _LOG.error('Failed to parse frame: %s', frame.status.value)
+                continue
+
+            if frame.address == rpc_address:
+                if not rpc_client.process_packet(frame.data):
+                    _LOG.error('Packet not handled by RPC client: %s', frame)
+            elif frame.address == STDOUT_ADDRESS:
+                output(frame.data)
+            else:
+                _LOG.error('Unhandled frame for address %d: %s', frame.address,
+                           frame.data.decode(errors='replace'))
+
+
+_PathOrModule = Union[str, Path, ModuleType]
+
+
+def write_to_file(data: bytes, output: BinaryIO = sys.stdout.buffer):
+    output.write(data)
+    output.write(b'\n')
+    output.flush()
+
+
+class HdlcRpcClient:
+    """An RPC client configured to run over HDLC."""
+    def __init__(self,
+                 device: BinaryIO,
+                 proto_paths_or_modules: Iterable[_PathOrModule],
+                 output: Callable[[bytes], Any] = write_to_file,
+                 channels: Iterable[pw_rpc.Channel] = None,
+                 client_impl: pw_rpc.client.ClientImpl = None):
+        """Creates an RPC client configured to communicate using HDLC.
+
+        Args:
+          device: serial.Serial (or any BinaryIO class) for reading/writing data
+          proto_paths_or_modules: paths to .proto files or proto modules
+          output: where to write "stdout" output from the device
+        """
+        self.device = device
+
+        proto_modules = []
+        proto_paths: List[Union[Path, str]] = []
+        for proto in proto_paths_or_modules:
+            if isinstance(proto, (Path, str)):
+                proto_paths.append(proto)
+            else:
+                proto_modules.append(proto)
+
+        proto_modules += python_protos.compile_and_import(proto_paths)
+
+        if channels is None:
+            channels = [pw_rpc.Channel(1, channel_output(device.write))]
+
+        if client_impl is None:
+            client_impl = callback_client.Impl()
+
+        self.client = pw_rpc.Client.from_modules(client_impl, channels,
+                                                 proto_modules)
+
+        # Start background thread that reads and processes RPC packets.
+        threading.Thread(target=read_and_process_data,
+                         daemon=True,
+                         args=(self.client, device, output)).start()
+
+    def rpcs(self, channel_id: int = None) -> Any:
+        """Returns object for accessing services on the specified channel.
+
+        This skips some intermediate layers to make it simpler to invoke RPCs
+        from an HdlcRpcClient. If only one channel is in use, the channel ID is
+        not necessary.
+        """
+        if channel_id is None:
+            return next(iter(self.client.channels())).rpcs
+
+        return self.client.channel(channel_id).rpcs
diff --git a/pw_hdlc_lite/py/pw_hdlc_lite/rpc_console.py b/pw_hdlc_lite/py/pw_hdlc_lite/rpc_console.py
new file mode 100644
index 0000000..1e3b929
--- /dev/null
+++ b/pw_hdlc_lite/py/pw_hdlc_lite/rpc_console.py
@@ -0,0 +1,124 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Console for interacting with pw_rpc over HDLC.
+
+To start the console, provide a serial port as the --device argument and paths
+or globs for .proto files that define the RPC services to support:
+
+  python -m pw_hdlc_lite.rpc_console --device /dev/ttyUSB0 sample.proto
+
+This starts an IPython console for communicating with the connected device. A
+few variables are predefined in the interactive console. These include:
+
+    rpcs   - used to invoke RPCs
+    device - the serial device used for communication
+    client - the pw_rpc.Client
+
+An example echo RPC command:
+
+  rpcs.pw.rpc.EchoService.Echo(msg="hello!")
+"""
+
+import argparse
+import glob
+import logging
+from pathlib import Path
+import sys
+from typing import Collection, Iterable, Iterator, BinaryIO
+
+import IPython  # type: ignore
+import serial  # type: ignore
+
+from pw_hdlc_lite.rpc import HdlcRpcClient, write_to_file
+
+_LOG = logging.getLogger(__name__)
+
+
+def _parse_args():
+    """Parses and returns the command line arguments."""
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument('-d',
+                        '--device',
+                        required=True,
+                        help='the serial port to use')
+    parser.add_argument('-b',
+                        '--baudrate',
+                        type=int,
+                        default=115200,
+                        help='the baud rate to use')
+    parser.add_argument(
+        '-o',
+        '--output',
+        type=argparse.FileType('wb'),
+        default=sys.stdout.buffer,
+        help=('The file to which to write device output (HDLC channel 1); '
+              'provide - or omit for stdout.'))
+    parser.add_argument('proto_globs',
+                        nargs='+',
+                        help='glob pattern for .proto files')
+    return parser.parse_args()
+
+
+def _expand_globs(globs: Iterable[str]) -> Iterator[Path]:
+    for pattern in globs:
+        for file in glob.glob(pattern, recursive=True):
+            yield Path(file)
+
+
+def _start_ipython_terminal(client: HdlcRpcClient) -> None:
+    """Starts an interactive IPython terminal with preset variables."""
+    local_variables = dict(
+        client=client,
+        channel_client=client.client.channel(1),
+        rpcs=client.client.channel(1).rpcs,
+    )
+
+    print(__doc__)  # Print the banner
+    IPython.terminal.embed.InteractiveShellEmbed().mainloop(
+        local_ns=local_variables, module=argparse.Namespace())
+
+
+def console(device: str, baudrate: int, proto_globs: Collection[str],
+            output: BinaryIO) -> int:
+    """Starts an interactive RPC console for HDLC."""
+    # argparse.FileType doesn't correctly handle '-' for binary files.
+    if output is sys.stdout:
+        output = sys.stdout.buffer
+
+    if not proto_globs:
+        proto_globs = ['**/*.proto']
+
+    protos = list(_expand_globs(proto_globs))
+
+    if not protos:
+        _LOG.critical('No .proto files were found with %s',
+                      ', '.join(proto_globs))
+        _LOG.critical('At least one .proto file is required')
+        return 1
+
+    _LOG.debug('Found %d .proto files found with %s', len(protos),
+               ', '.join(proto_globs))
+
+    _start_ipython_terminal(
+        HdlcRpcClient(serial.Serial(device, baudrate), protos,
+                      lambda data: write_to_file(data, output)))
+    return 0
+
+
+def main() -> int:
+    return console(**vars(_parse_args()))
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/pw_hdlc_lite/py/setup.py b/pw_hdlc_lite/py/setup.py
new file mode 100644
index 0000000..15266ae
--- /dev/null
+++ b/pw_hdlc_lite/py/setup.py
@@ -0,0 +1,29 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""pw_hdlc_lite"""
+
+import setuptools  # type: ignore
+
+setuptools.setup(
+    name='pw_hdlc_lite',
+    version='0.0.1',
+    author='Pigweed Authors',
+    author_email='pigweed-developers@googlegroups.com',
+    description='Tools for Encoding/Decoding data using the HDLC-Lite protocol',
+    packages=setuptools.find_packages(),
+    package_data={'pw_hdlc_lite': ['py.typed']},
+    zip_safe=False,
+    install_requires=['ipython'],
+    tests_require=['pw_build'],
+)
diff --git a/pw_hdlc_lite/rpc_channel_test.cc b/pw_hdlc_lite/rpc_channel_test.cc
new file mode 100644
index 0000000..7e0d0dd
--- /dev/null
+++ b/pw_hdlc_lite/rpc_channel_test.cc
@@ -0,0 +1,122 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_hdlc_lite/rpc_channel.h"
+
+#include <algorithm>
+#include <array>
+#include <cstddef>
+
+#include "gtest/gtest.h"
+#include "pw_bytes/array.h"
+#include "pw_stream/memory_stream.h"
+
+using std::byte;
+
+namespace pw::hdlc_lite {
+namespace {
+
+constexpr byte kFlag = byte{0x7E};
+constexpr uint8_t kAddress = 0x7b;  // 123
+constexpr byte kControl = byte{0};
+
+// Size of the in-memory buffer to use for this test.
+constexpr size_t kSinkBufferSize = 15;
+
+TEST(RpcChannelOutput, 1BytePayload) {
+  std::array<byte, kSinkBufferSize> channel_output_buffer;
+  stream::MemoryWriterBuffer<kSinkBufferSize> memory_writer;
+
+  RpcChannelOutput output(
+      memory_writer, channel_output_buffer, kAddress, "RpcChannelOutput");
+
+  constexpr byte test_data = byte{'A'};
+  std::memcpy(output.AcquireBuffer().data(), &test_data, sizeof(test_data));
+
+  constexpr auto expected = bytes::Concat(
+      kFlag, kAddress, kControl, 'A', uint32_t{0xA63E2FA5}, kFlag);
+
+  EXPECT_EQ(Status::Ok(), output.SendAndReleaseBuffer(sizeof(test_data)));
+
+  ASSERT_EQ(memory_writer.bytes_written(), expected.size());
+  EXPECT_EQ(
+      std::memcmp(
+          memory_writer.data(), expected.data(), memory_writer.bytes_written()),
+      0);
+}
+
+TEST(RpcChannelOutput, EscapingPayloadTest) {
+  std::array<byte, kSinkBufferSize> channel_output_buffer;
+  stream::MemoryWriterBuffer<kSinkBufferSize> memory_writer;
+
+  RpcChannelOutput output(
+      memory_writer, channel_output_buffer, kAddress, "RpcChannelOutput");
+
+  constexpr auto test_data = bytes::Array<0x7D>();
+  std::memcpy(
+      output.AcquireBuffer().data(), test_data.data(), test_data.size());
+
+  constexpr auto expected = bytes::Concat(kFlag,
+                                          kAddress,
+                                          kControl,
+                                          byte{0x7d},
+                                          byte{0x7d} ^ byte{0x20},
+                                          uint32_t{0x89515322},
+                                          kFlag);
+  EXPECT_EQ(Status::Ok(), output.SendAndReleaseBuffer(test_data.size()));
+
+  ASSERT_EQ(memory_writer.bytes_written(), 10u);
+  EXPECT_EQ(
+      std::memcmp(
+          memory_writer.data(), expected.data(), memory_writer.bytes_written()),
+      0);
+}
+
+TEST(RpcChannelOutputBuffer, 1BytePayload) {
+  stream::MemoryWriterBuffer<kSinkBufferSize> memory_writer;
+
+  RpcChannelOutputBuffer<kSinkBufferSize> output(
+      memory_writer, kAddress, "RpcChannelOutput");
+
+  constexpr byte test_data = byte{'A'};
+  std::memcpy(output.AcquireBuffer().data(), &test_data, sizeof(test_data));
+
+  constexpr auto expected = bytes::Concat(
+      kFlag, kAddress, kControl, 'A', uint32_t{0xA63E2FA5}, kFlag);
+
+  EXPECT_EQ(Status::Ok(), output.SendAndReleaseBuffer(sizeof(test_data)));
+
+  ASSERT_EQ(memory_writer.bytes_written(), expected.size());
+  EXPECT_EQ(
+      std::memcmp(
+          memory_writer.data(), expected.data(), memory_writer.bytes_written()),
+      0);
+}
+
+}  // namespace
+}  // namespace pw::hdlc_lite
diff --git a/pw_hdlc_lite/rpc_example/BUILD b/pw_hdlc_lite/rpc_example/BUILD
new file mode 100644
index 0000000..7e6a30b
--- /dev/null
+++ b/pw_hdlc_lite/rpc_example/BUILD
@@ -0,0 +1,38 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_library",
+)
+
+pw_cc_library(
+    name = "rpc_example",
+    srcs = [
+        "hdlc_rpc_server.cc",
+        "main.cc",
+    ],
+    hdrs = [
+        "public/pw_hdlc_lite/decoder.h",
+        "public/pw_hdlc_lite/hdlc_channel.h",
+        "public/pw_hdlc_lite/rpc_server_packets.h",
+    ],
+    deps = [
+        "//pw_hdlc_lite",
+        "//pw_hdlc_lite:pw_rpc",
+        "//pw_rpc:server",
+        "//pw_log",
+    ],
+)
+
diff --git a/pw_hdlc_lite/rpc_example/BUILD.gn b/pw_hdlc_lite/rpc_example/BUILD.gn
new file mode 100644
index 0000000..495b168
--- /dev/null
+++ b/pw_hdlc_lite/rpc_example/BUILD.gn
@@ -0,0 +1,42 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_third_party/nanopb/nanopb.gni")
+
+if (dir_pw_third_party_nanopb == "") {
+  group("rpc_example") {
+  }
+} else {
+  pw_executable("rpc_example") {
+    sources = [
+      "hdlc_rpc_server.cc",
+      "main.cc",
+    ]
+    deps = [
+      "$dir_pw_rpc:server",
+      "$dir_pw_rpc/nanopb:echo_service",
+      "..:pw_rpc",
+      dir_pw_hdlc_lite,
+      dir_pw_log,
+    ]
+  }
+}
+
+pw_python_script("example_script") {
+  sources = [ "example_script.py" ]
+}
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_hdlc_lite/rpc_example/CMakeLists.txt
similarity index 73%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_hdlc_lite/rpc_example/CMakeLists.txt
index 3c3be32..9c2d32c 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_hdlc_lite/rpc_example/CMakeLists.txt
@@ -12,8 +12,13 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+
+add_executable(pw_hdlc_lite.rpc_example hdlc_rpc_server.cc main.cc)
+
+target_link_libraries(pw_hdlc_lite.rpc_example
+  PRIVATE
+    pw_hdlc_lite
+    pw_log
+    pw_rpc.nanopb.echo_service
+    pw_rpc.server
+)
diff --git a/pw_hdlc_lite/rpc_example/docs.rst b/pw_hdlc_lite/rpc_example/docs.rst
new file mode 100644
index 0000000..492ec0e
--- /dev/null
+++ b/pw_hdlc_lite/rpc_example/docs.rst
@@ -0,0 +1,100 @@
+.. _module-pw_hdlc_lite-rpc-example:
+
+=============================
+RPC over HDLC example project
+=============================
+The :ref:`module-pw_hdlc_lite` module includes an example of bringing up a
+:ref:`module-pw_rpc` server that can be used to invoke RPCs. The example code
+is located at ``pw_hdlc_lite/rpc_example``. This section walks through invoking
+RPCs interactively and with a script using the RPC over HDLC example.
+
+These instructions assume the STM32F429i Discovery board, but they work with
+any target with :ref:`pw::sys_io <module-pw_sys_io>` implemented.
+
+---------------------
+Getting started guide
+---------------------
+
+1. Set up your board
+====================
+Connect the board you'll be communicating with. For the Discovery board, connect
+the mini USB port, and note which serial device it appears as (e.g.
+``/dev/ttyACM0``).
+
+2. Build Pigweed
+================
+Activate the Pigweed environment and run the default build.
+
+.. code-block:: sh
+
+  source activate.sh
+  gn gen out
+  ninja -C out
+
+3. Flash the firmware image
+===========================
+After a successful build, the binary for the example will be located at
+``out/<toolchain>/obj/pw_hdlc_lite/rpc_example/bin/rpc_example.elf``.
+
+Flash this image to your board. If you are using the STM32F429i Discovery Board,
+you can flash the image with `OpenOCD <http://openocd.org>`_.
+
+.. code-block:: sh
+
+ openocd -f targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/openocd_stm32f4xx.cfg \
+     -c "program out/stm32f429i_disc1_debug/obj/pw_hdlc_lite/rpc_example/bin/rpc_example.elf"
+
+4. Invoke RPCs from in an interactive console
+=============================================
+The RPC console uses `IPython <https://ipython.org>`_ to make a rich interactive
+console for working with pw_rpc. Run the RPC console with the following command,
+replacing ``/dev/ttyACM0`` with the correct serial device for your board.
+
+.. code-block:: text
+
+  $ python -m pw_hdlc_lite.rpc_console --device /dev/ttyACM0
+
+  Console for interacting with pw_rpc over HDLC.
+
+  To start the console, provide a serial port as the --device argument and paths
+  or globs for .proto files that define the RPC services to support:
+
+    python -m pw_hdlc_lite.rpc_console --device /dev/ttyUSB0 sample.proto
+
+  This starts an IPython console for communicating with the connected device. A
+  few variables are predefined in the interactive console. These include:
+
+      rpcs   - used to invoke RPCs
+      device - the serial device used for communication
+      client - the pw_rpc.Client
+
+  An example echo RPC command:
+
+    rpcs.pw.rpc.EchoService.Echo(msg="hello!")
+
+  In [1]:
+
+RPCs may be accessed through the predefined ``rpcs`` variable. RPCs are
+organized by their protocol buffer package and RPC service, as defined in a
+.proto file. To call the ``Echo`` method is part of the ``EchoService``, which
+is in the ``pw.rpc`` package. To invoke it synchronously, call
+``rpcs.pw.rpc.EchoService.Echo``:
+
+.. code-block:: python
+
+    In [1]: rpcs.pw.rpc.EchoService.Echo(msg="Your message here!")
+    Out[1]: (<Status.OK: 0>, msg: "Your message here!")
+
+5. Invoke RPCs with a script
+============================
+RPCs may also be invoked from Python scripts. Close the RPC console if it is
+running, and execute the example script. Set the --device argument to the
+serial port for your device.
+
+.. code-block:: text
+
+  $ pw_hdlc_lite/rpc_example/example_script.py --device /dev/ttyACM0
+  The status was Status.OK
+  The payload was msg: "Hello"
+
+  The device says: Goodbye!
diff --git a/pw_hdlc_lite/rpc_example/example_script.py b/pw_hdlc_lite/rpc_example/example_script.py
new file mode 100755
index 0000000..57726de
--- /dev/null
+++ b/pw_hdlc_lite/rpc_example/example_script.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Simple example script that uses pw_rpc."""
+
+import argparse
+import os
+from pathlib import Path
+
+import serial  # type: ignore
+
+from pw_hdlc_lite.rpc import HdlcRpcClient
+
+# Point the script to the .proto file with our RPC services.
+PROTO = Path(os.environ['PW_ROOT'], 'pw_rpc/pw_rpc_protos/echo.proto')
+
+
+def script(device: str, baud: int) -> None:
+    # Set up a pw_rpc client that uses HDLC.
+    client = HdlcRpcClient(serial.Serial(device, baud), [PROTO])
+
+    # Make a shortcut to the EchoService.
+    echo_service = client.rpcs().pw.rpc.EchoService
+
+    # Call some RPCs and check the results.
+    status, payload = echo_service.Echo(msg='Hello')
+
+    if status.ok():
+        print('The status was', status)
+        print('The payload was', payload)
+    else:
+        print('Uh oh, this RPC returned', status)
+
+    status, payload = echo_service.Echo(msg='Goodbye!')
+
+    print('The device says:', payload.msg)
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        description=__doc__,
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+    parser.add_argument('--device',
+                        '-d',
+                        default='/dev/ttyACM0',
+                        help='serial device to use')
+    parser.add_argument('--baud',
+                        '-b',
+                        type=int,
+                        default=115200,
+                        help='baud rate for the serial device')
+    script(**vars(parser.parse_args()))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/pw_hdlc_lite/rpc_example/hdlc_rpc_server.cc b/pw_hdlc_lite/rpc_example/hdlc_rpc_server.cc
new file mode 100644
index 0000000..319ee1b
--- /dev/null
+++ b/pw_hdlc_lite/rpc_example/hdlc_rpc_server.cc
@@ -0,0 +1,72 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <array>
+#include <span>
+#include <string_view>
+
+#include "pw_hdlc_lite/encoder.h"
+#include "pw_hdlc_lite/rpc_channel.h"
+#include "pw_hdlc_lite/rpc_packets.h"
+#include "pw_hdlc_lite/sys_io_stream.h"
+#include "pw_log/log.h"
+#include "pw_rpc/echo_service_nanopb.h"
+#include "pw_rpc/server.h"
+
+namespace hdlc_example {
+namespace {
+
+using std::byte;
+
+constexpr size_t kMaxTransmissionUnit = 256;
+
+// Used to write HDLC data to pw::sys_io.
+pw::stream::SysIoWriter writer;
+
+// Set up the output channel for the pw_rpc server to use to use.
+pw::hdlc_lite::RpcChannelOutputBuffer<kMaxTransmissionUnit> hdlc_channel_output(
+    writer, pw::hdlc_lite::kDefaultRpcAddress, "HDLC channel");
+
+pw::rpc::Channel channels[] = {
+    pw::rpc::Channel::Create<1>(&hdlc_channel_output)};
+
+// Declare the pw_rpc server with the HDLC channel.
+pw::rpc::Server server(channels);
+
+pw::rpc::EchoService echo_service;
+
+void RegisterServices() { server.RegisterService(echo_service); }
+
+}  // namespace
+
+void Start() {
+  // Send log messages to HDLC address 1. This prevents logs from interfering
+  // with pw_rpc communications.
+  pw::log_basic::SetOutput([](std::string_view log) {
+    pw::hdlc_lite::WriteInformationFrame(
+        1, std::as_bytes(std::span(log)), writer);
+  });
+
+  // Set up the server and start processing data.
+  RegisterServices();
+
+  // Declare a buffer for decoding incoming HDLC frames.
+  std::array<std::byte, kMaxTransmissionUnit> input_buffer;
+
+  PW_LOG_INFO("Starting pw_rpc server");
+  pw::hdlc_lite::ReadAndProcessPackets(
+      server, hdlc_channel_output, input_buffer);
+}
+
+}  // namespace hdlc_example
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_hdlc_lite/rpc_example/main.cc
similarity index 82%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to pw_hdlc_lite/rpc_example/main.cc
index 1670b7d..2b6f656 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_hdlc_lite/rpc_example/main.cc
@@ -12,6 +12,13 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-#include "pw_boot_armv7m/boot.h"
+namespace hdlc_example {
 
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+void Start();
+
+}  // namespace hdlc_example
+
+int main() {
+  hdlc_example::Start();
+  return 0;
+}
diff --git a/pw_hdlc_lite/rpc_packets.cc b/pw_hdlc_lite/rpc_packets.cc
new file mode 100644
index 0000000..95e95b3
--- /dev/null
+++ b/pw_hdlc_lite/rpc_packets.cc
@@ -0,0 +1,41 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_hdlc_lite/rpc_packets.h"
+
+#include "pw_status/try.h"
+#include "pw_sys_io/sys_io.h"
+
+namespace pw::hdlc_lite {
+
+Status ReadAndProcessPackets(rpc::Server& server,
+                             rpc::ChannelOutput& output,
+                             std::span<std::byte> decode_buffer,
+                             unsigned rpc_address) {
+  Decoder decoder(decode_buffer);
+
+  while (true) {
+    std::byte data;
+    PW_TRY(sys_io::ReadByte(&data));
+
+    if (auto result = decoder.Process(data); result.ok()) {
+      Frame& frame = result.value();
+      if (frame.address() == rpc_address) {
+        server.ProcessPacket(frame.data(), output);
+      }
+    }
+  }
+}
+
+}  // namespace pw::hdlc_lite
diff --git a/pw_hex_dump/BUILD.gn b/pw_hex_dump/BUILD.gn
index 380dc21..2ad280e 100644
--- a/pw_hex_dump/BUILD.gn
+++ b/pw_hex_dump/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
diff --git a/pw_hex_dump/docs.rst b/pw_hex_dump/docs.rst
index e198d45..60d5044 100644
--- a/pw_hex_dump/docs.rst
+++ b/pw_hex_dump/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-hex-dump:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_hex_dump:
 
 -----------
 pw_hex_dump
@@ -111,4 +107,4 @@
 ============
 * pw_bytes
 * pw_span
-* pw_status
\ No newline at end of file
+* pw_status
diff --git a/pw_hex_dump/hex_dump.cc b/pw_hex_dump/hex_dump.cc
index fef1a90..ae979df 100644
--- a/pw_hex_dump/hex_dump.cc
+++ b/pw_hex_dump/hex_dump.cc
@@ -48,11 +48,11 @@
 
 Status DumpAddr(std::span<char> dest, uintptr_t addr) {
   if (dest.data() == nullptr) {
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
   // Include null terminator.
   if (dest.size() < kHexAddrStringSize + 1) {
-    return Status::RESOURCE_EXHAUSTED;
+    return Status::ResourceExhausted();
   }
   dest[0] = '0';
   dest[1] = 'x';
@@ -123,11 +123,11 @@
 
 Status FormattedHexDumper::DumpLine() {
   if (source_data_.empty()) {
-    return Status::RESOURCE_EXHAUSTED;
+    return Status::ResourceExhausted();
   }
 
   if (!ValidateBufferSize().ok() || dest_.data() == nullptr) {
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
 
   if (dest_[0] == 0 && flags.show_header) {
@@ -212,22 +212,23 @@
 
 Status FormattedHexDumper::SetLineBuffer(std::span<char> dest) {
   if (dest.data() == nullptr || dest.size_bytes() == 0) {
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
   dest_ = dest;
-  return ValidateBufferSize().ok() ? Status::OK : Status::RESOURCE_EXHAUSTED;
+  return ValidateBufferSize().ok() ? Status::Ok() : Status::ResourceExhausted();
 }
 
 Status FormattedHexDumper::BeginDump(ConstByteSpan data) {
   current_offset_ = 0;
   source_data_ = data;
   if (data.data() == nullptr) {
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
   if (dest_.data() != nullptr && dest_.size_bytes() > 0) {
     dest_[0] = 0;
   }
-  return ValidateBufferSize().ok() ? Status::OK : Status::FAILED_PRECONDITION;
+  return ValidateBufferSize().ok() ? Status::Ok()
+                                   : Status::FailedPrecondition();
 }
 
 Status FormattedHexDumper::ValidateBufferSize() {
@@ -250,10 +251,10 @@
   }
 
   if (dest_.size_bytes() < required_size) {
-    return Status::RESOURCE_EXHAUSTED;
+    return Status::ResourceExhausted();
   }
 
-  return Status::OK;
+  return Status::Ok();
 }
 
 }  // namespace pw::dump
diff --git a/pw_hex_dump/hex_dump_test.cc b/pw_hex_dump/hex_dump_test.cc
index 51ad7fa..1b10251 100644
--- a/pw_hex_dump/hex_dump_test.cc
+++ b/pw_hex_dump/hex_dump_test.cc
@@ -100,7 +100,7 @@
 TEST_F(HexDump, DumpAddr_ZeroSizeT) {
   constexpr const char* expected = EXPECTED_SIGNIFICANT_BYTES("00000000");
   size_t zero = 0;
-  EXPECT_EQ(DumpAddr(dest_, zero), Status::OK);
+  EXPECT_EQ(DumpAddr(dest_, zero), Status::Ok());
   EXPECT_STREQ(expected, dest_.data());
 }
 
@@ -156,7 +156,7 @@
   for (size_t i = 0; i < source_data.size(); i += kTestBytesPerLine) {
     EXPECT_TRUE(dumper_.DumpLine().ok());
   }
-  EXPECT_EQ(dumper_.DumpLine(), Status::RESOURCE_EXHAUSTED);
+  EXPECT_EQ(dumper_.DumpLine(), Status::ResourceExhausted());
 }
 
 // This test is provided for convenience of debugging, as it actually logs the
@@ -172,7 +172,7 @@
   while (dumper_.DumpLine().ok()) {
     PW_LOG_INFO("%s", dest_.data());
   }
-  EXPECT_EQ(dumper_.DumpLine(), Status::RESOURCE_EXHAUSTED);
+  EXPECT_EQ(dumper_.DumpLine(), Status::ResourceExhausted());
 }
 
 TEST_F(HexDump, FormattedHexDump_NoSpaces) {
@@ -351,7 +351,7 @@
   default_flags_.bytes_per_line = 13;
   dumper_ = FormattedHexDumper(dest_, default_flags_);
 
-  EXPECT_EQ(dumper_.BeginDump(source_data), Status::FAILED_PRECONDITION);
+  EXPECT_EQ(dumper_.BeginDump(source_data), Status::FailedPrecondition());
   EXPECT_FALSE(dumper_.DumpLine().ok());
   EXPECT_STREQ(expected, dest_.data());
 }
@@ -363,7 +363,7 @@
   default_flags_.group_every = 1;
   dumper_ = FormattedHexDumper(dest_, default_flags_);
 
-  EXPECT_EQ(dumper_.BeginDump(source_data), Status::FAILED_PRECONDITION);
+  EXPECT_EQ(dumper_.BeginDump(source_data), Status::FailedPrecondition());
   EXPECT_FALSE(dumper_.DumpLine().ok());
   EXPECT_STREQ(expected, dest_.data());
 }
@@ -375,7 +375,7 @@
   default_flags_.prefix_mode = FormattedHexDumper::AddressMode::kOffset;
   dumper_ = FormattedHexDumper(dest_, default_flags_);
 
-  EXPECT_EQ(dumper_.BeginDump(source_data), Status::FAILED_PRECONDITION);
+  EXPECT_EQ(dumper_.BeginDump(source_data), Status::FailedPrecondition());
   EXPECT_FALSE(dumper_.DumpLine().ok());
   EXPECT_STREQ(expected, dest_.data());
 }
@@ -383,22 +383,22 @@
 TEST(BadBuffer, ZeroSize) {
   char buffer[1] = {static_cast<char>(0xaf)};
   FormattedHexDumper dumper(std::span<char>(buffer, 0));
-  EXPECT_EQ(dumper.BeginDump(source_data), Status::FAILED_PRECONDITION);
-  EXPECT_EQ(dumper.DumpLine(), Status::FAILED_PRECONDITION);
+  EXPECT_EQ(dumper.BeginDump(source_data), Status::FailedPrecondition());
+  EXPECT_EQ(dumper.DumpLine(), Status::FailedPrecondition());
   EXPECT_EQ(buffer[0], static_cast<char>(0xaf));
 }
 
 TEST(BadBuffer, NullPtrDest) {
   FormattedHexDumper dumper;
-  EXPECT_EQ(dumper.SetLineBuffer(std::span<char>()), Status::INVALID_ARGUMENT);
-  EXPECT_EQ(dumper.BeginDump(source_data), Status::FAILED_PRECONDITION);
-  EXPECT_EQ(dumper.DumpLine(), Status::FAILED_PRECONDITION);
+  EXPECT_EQ(dumper.SetLineBuffer(std::span<char>()), Status::InvalidArgument());
+  EXPECT_EQ(dumper.BeginDump(source_data), Status::FailedPrecondition());
+  EXPECT_EQ(dumper.DumpLine(), Status::FailedPrecondition());
 }
 
 TEST(BadBuffer, NullPtrSrc) {
   char buffer[24] = {static_cast<char>(0)};
   FormattedHexDumper dumper(buffer);
-  EXPECT_EQ(dumper.BeginDump(ByteSpan(nullptr, 64)), Status::INVALID_ARGUMENT);
+  EXPECT_EQ(dumper.BeginDump(ByteSpan(nullptr, 64)), Status::InvalidArgument());
   // Don't actually dump nullptr in this test as it could cause a crash.
 }
 
diff --git a/pw_kvs/BUILD b/pw_kvs/BUILD
index ac6038e..fec76d0 100644
--- a/pw_kvs/BUILD
+++ b/pw_kvs/BUILD
@@ -39,7 +39,6 @@
         "public/pw_kvs/internal/sectors.h",
         "public/pw_kvs/internal/span_traits.h",
         "pw_kvs_private/config.h",
-        "pw_kvs_private/macros.h",
         "sectors.cc",
     ],
     hdrs = [
@@ -138,6 +137,21 @@
 )
 
 pw_cc_library(
+    name = "fake_flash_test_key_value_store",
+    srcs = [
+        "fake_flash_test_key_value_store.cc",
+    ],
+    hdrs = [
+        "public/pw_kvs/test_key_value_store.h",
+    ],
+    deps = [
+        ":crc16",
+        ":pw_kvs",
+        ":fake_flash",
+    ],
+)
+
+pw_cc_library(
     name = "test_utils",
     hdrs = [
         "pw_kvs_private/byte_utils.h",
@@ -322,6 +336,19 @@
 )
 
 pw_cc_test(
+    name = "fake_flash_test_key_value_store_test",
+    srcs = ["test_key_value_store_test.cc"],
+    deps = [
+        ":crc16",
+        ":fake_flash_test_key_value_store",
+        ":pw_kvs",
+        "//pw_log:backend",
+        "//pw_status",
+        "//pw_unit_test",
+    ],
+)
+
+pw_cc_test(
     name = "key_value_store_binary_format_test",
     srcs = [
         "key_value_store_binary_format_test.cc",
diff --git a/pw_kvs/BUILD.gn b/pw_kvs/BUILD.gn
index 9a00f02..e3ef953 100644
--- a/pw_kvs/BUILD.gn
+++ b/pw_kvs/BUILD.gn
@@ -12,18 +12,27 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
+import("$dir_pw_build/module_config.gni")
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
-config("default_config") {
+
+declare_args() {
+  # The build target that overrides the default configuration options for this
+  # module. This should point to a source set that provides defines through a
+  # public config (which may -include a file or add defines directly).
+  pw_kvs_CONFIG = pw_build_DEFAULT_MODULE_CONFIG
+}
+
+config("public_include_path") {
   include_dirs = [ "public" ]
+  visibility = [ ":*" ]
 }
 
 pw_source_set("pw_kvs") {
-  public_configs = [ ":default_config" ]
+  public_configs = [ ":public_include_path" ]
   public = [
     "public/pw_kvs/alignment.h",
     "public/pw_kvs/checksum.h",
@@ -47,8 +56,6 @@
     "public/pw_kvs/internal/key_descriptor.h",
     "public/pw_kvs/internal/sectors.h",
     "public/pw_kvs/internal/span_traits.h",
-    "pw_kvs_private/config.h",
-    "pw_kvs_private/macros.h",
     "sectors.cc",
   ]
   public_deps = [
@@ -58,12 +65,19 @@
     dir_pw_status,
   ]
   deps = [
+    ":config",
     dir_pw_checksum,
     dir_pw_log,
   ]
   friend = [ ":*" ]
 }
 
+pw_source_set("config") {
+  public_deps = [ pw_kvs_CONFIG ]
+  public = [ "pw_kvs_private/config.h" ]
+  visibility = [ ":*" ]
+}
+
 pw_source_set("crc16") {
   public = [ "public/pw_kvs/crc16_checksum.h" ]
   public_deps = [
@@ -72,8 +86,18 @@
   ]
 }
 
+pw_source_set("flash_test_partition") {
+  public = [ "public/pw_kvs/flash_test_partition.h" ]
+  public_deps = [ ":pw_kvs" ]
+}
+
+pw_source_set("test_key_value_store") {
+  public = [ "public/pw_kvs/test_key_value_store.h" ]
+  public_deps = [ ":pw_kvs" ]
+}
+
 pw_source_set("fake_flash") {
-  public_configs = [ ":default_config" ]
+  public_configs = [ ":public_include_path" ]
   public = [ "public/pw_kvs/fake_flash_memory.h" ]
   sources = [ "fake_flash_memory.cc" ]
   public_deps = [
@@ -82,13 +106,17 @@
     dir_pw_span,
     dir_pw_status,
   ]
-  deps = [ dir_pw_log ]
+  deps = [
+    ":config",
+    dir_pw_log,
+  ]
 }
 
 pw_source_set("fake_flash_small_partition") {
-  public_configs = [ ":default_config" ]
+  public_configs = [ ":public_include_path" ]
   public = [ "public/pw_kvs/flash_test_partition.h" ]
   sources = [ "fake_flash_test_partition.cc" ]
+  public_deps = [ ":flash_test_partition" ]
   deps = [
     ":fake_flash",
     dir_pw_kvs,
@@ -96,9 +124,10 @@
 }
 
 pw_source_set("fake_flash_64_aligned_partition") {
-  public_configs = [ ":default_config" ]
+  public_configs = [ ":public_include_path" ]
   public = [ "public/pw_kvs/flash_test_partition.h" ]
   sources = [ "fake_flash_test_partition.cc" ]
+  public_deps = [ ":flash_test_partition" ]
   deps = [
     ":fake_flash",
     dir_pw_kvs,
@@ -107,9 +136,10 @@
 }
 
 pw_source_set("fake_flash_256_aligned_partition") {
-  public_configs = [ ":default_config" ]
+  public_configs = [ ":public_include_path" ]
   public = [ "public/pw_kvs/flash_test_partition.h" ]
   sources = [ "fake_flash_test_partition.cc" ]
+  public_deps = [ ":flash_test_partition" ]
   deps = [
     ":fake_flash",
     dir_pw_kvs,
@@ -117,8 +147,21 @@
   defines = [ "PW_FLASH_TEST_ALIGNMENT=256" ]
 }
 
+pw_source_set("fake_flash_test_key_value_store") {
+  public_configs = [ ":public_include_path" ]
+  sources = [ "fake_flash_test_key_value_store.cc" ]
+  public_deps = [ ":test_key_value_store" ]
+  deps = [
+    ":crc16",
+    ":fake_flash",
+    dir_pw_kvs,
+  ]
+}
+
 pw_source_set("flash_partition_test_100_iterations") {
   deps = [
+    ":config",
+    ":flash_test_partition",
     dir_pw_kvs,
     dir_pw_log,
     dir_pw_unit_test,
@@ -129,6 +172,8 @@
 
 pw_source_set("flash_partition_test_2_iterations") {
   deps = [
+    ":config",
+    ":flash_test_partition",
     dir_pw_kvs,
     dir_pw_log,
     dir_pw_unit_test,
@@ -140,8 +185,9 @@
 pw_source_set("key_value_store_initialized_test") {
   deps = [
     ":crc16",
+    ":flash_test_partition",
     ":pw_kvs",
-    ":test_utils",
+    dir_pw_bytes,
     dir_pw_checksum,
     dir_pw_log,
     dir_pw_unit_test,
@@ -149,14 +195,17 @@
   sources = [ "key_value_store_initialized_test.cc" ]
 }
 
-pw_source_set("test_utils") {
-  public_configs = [ ":default_config" ]
-  public = [ "pw_kvs_private/byte_utils.h" ]
-  visibility = [ ":*" ]
+pw_source_set("test_key_value_store_test") {
+  deps = [
+    ":pw_kvs",
+    ":test_key_value_store",
+    dir_pw_unit_test,
+  ]
+  sources = [ "test_key_value_store_test.cc" ]
 }
 
 pw_source_set("test_partition") {
-  public_configs = [ ":default_config" ]
+  public_configs = [ ":public_include_path" ]
   public = [ "public/pw_kvs/flash_partition_with_stats.h" ]
   sources = [ "flash_partition_with_stats.cc" ]
   visibility = [ ":*" ]
@@ -165,6 +214,7 @@
     dir_pw_log,
     dir_pw_status,
   ]
+  deps = [ ":config" ]
 }
 
 pw_test_group("tests") {
@@ -183,6 +233,7 @@
     ":key_value_store_binary_format_test",
     ":key_value_store_fuzz_test",
     ":key_value_store_map_test",
+    ":fake_flash_test_key_value_store_test",
     ":sectors_test",
     ":key_value_store_wear_test",
   ]
@@ -207,7 +258,7 @@
     ":crc16",
     ":fake_flash",
     ":pw_kvs",
-    ":test_utils",
+    dir_pw_bytes,
   ]
   sources = [ "entry_test.cc" ]
 }
@@ -216,7 +267,7 @@
   deps = [
     ":fake_flash",
     ":pw_kvs",
-    ":test_utils",
+    dir_pw_bytes,
   ]
   sources = [ "entry_cache_test.cc" ]
 }
@@ -253,7 +304,7 @@
     ":crc16",
     ":fake_flash",
     ":pw_kvs",
-    ":test_utils",
+    dir_pw_bytes,
     dir_pw_checksum,
     dir_pw_log,
   ]
@@ -286,7 +337,7 @@
     ":crc16",
     ":fake_flash",
     ":pw_kvs",
-    ":test_utils",
+    dir_pw_bytes,
     dir_pw_log,
   ]
   sources = [ "key_value_store_binary_format_test.cc" ]
@@ -302,6 +353,13 @@
   sources = [ "key_value_store_fuzz_test.cc" ]
 }
 
+pw_test("fake_flash_test_key_value_store_test") {
+  deps = [
+    ":fake_flash_test_key_value_store",
+    ":test_key_value_store_test",
+  ]
+}
+
 pw_test("key_value_store_map_test") {
   deps = [
     ":crc16",
diff --git a/pw_kvs/CMakeLists.txt b/pw_kvs/CMakeLists.txt
index d2f3380..3d48927 100644
--- a/pw_kvs/CMakeLists.txt
+++ b/pw_kvs/CMakeLists.txt
@@ -12,12 +12,15 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_auto_add_simple_module(pw_kvs
   PUBLIC_DEPS
     pw_containers
     pw_status
   PRIVATE_DEPS
     pw_assert
+    pw_bytes
     pw_checksum
     pw_log
     pw_string
diff --git a/pw_kvs/alignment.cc b/pw_kvs/alignment.cc
index 198ec14..1192905 100644
--- a/pw_kvs/alignment.cc
+++ b/pw_kvs/alignment.cc
@@ -14,7 +14,7 @@
 
 #include "pw_kvs/alignment.h"
 
-#include "pw_kvs_private/macros.h"
+#include "pw_status/try.h"
 
 namespace pw {
 
@@ -23,7 +23,7 @@
     size_t to_copy = std::min(write_size_ - bytes_in_buffer_, data.size());
 
     std::memcpy(&buffer_[bytes_in_buffer_], data.data(), to_copy);
-    TRY_WITH_SIZE(AddBytesToBuffer(to_copy));
+    PW_TRY_WITH_SIZE(AddBytesToBuffer(to_copy));
     data = data.subspan(to_copy);
   }
 
@@ -58,7 +58,7 @@
     if (!result.ok()) {
       return StatusWithSize(result.status(), bytes_written_);
     }
-    TRY_WITH_SIZE(AddBytesToBuffer(to_read));
+    PW_TRY_WITH_SIZE(AddBytesToBuffer(to_read));
     size -= result.size();
   }
 
diff --git a/pw_kvs/alignment_test.cc b/pw_kvs/alignment_test.cc
index 0f3dc89..536de87 100644
--- a/pw_kvs/alignment_test.cc
+++ b/pw_kvs/alignment_test.cc
@@ -141,26 +141,26 @@
   AlignedWriterBuffer<32> writer(kAlignment, check_against_data);
 
   // Write values smaller than the alignment.
-  EXPECT_EQ(Status::OK, writer.Write(kBytes.subspan(0, 1)).status());
-  EXPECT_EQ(Status::OK, writer.Write(kBytes.subspan(1, 9)).status());
+  EXPECT_EQ(Status::Ok(), writer.Write(kBytes.subspan(0, 1)).status());
+  EXPECT_EQ(Status::Ok(), writer.Write(kBytes.subspan(1, 9)).status());
 
   // Write values larger than the alignment but smaller than the buffer.
-  EXPECT_EQ(Status::OK, writer.Write(kBytes.subspan(10, 11)).status());
+  EXPECT_EQ(Status::Ok(), writer.Write(kBytes.subspan(10, 11)).status());
 
   // Exactly fill the remainder of the buffer.
-  EXPECT_EQ(Status::OK, writer.Write(kBytes.subspan(21, 11)).status());
+  EXPECT_EQ(Status::Ok(), writer.Write(kBytes.subspan(21, 11)).status());
 
   // Fill the buffer more than once.
-  EXPECT_EQ(Status::OK, writer.Write(kBytes.subspan(32, 66)).status());
+  EXPECT_EQ(Status::Ok(), writer.Write(kBytes.subspan(32, 66)).status());
 
   // Write nothing.
-  EXPECT_EQ(Status::OK, writer.Write(kBytes.subspan(98, 0)).status());
+  EXPECT_EQ(Status::Ok(), writer.Write(kBytes.subspan(98, 0)).status());
 
   // Write the remaining data.
-  EXPECT_EQ(Status::OK, writer.Write(kBytes.subspan(98, 2)).status());
+  EXPECT_EQ(Status::Ok(), writer.Write(kBytes.subspan(98, 2)).status());
 
   auto result = writer.Flush();
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(kData.size(), result.size());
 }
 
@@ -202,7 +202,7 @@
         ADD_FAILURE();
         break;
     }
-    return StatusWithSize(Status::UNKNOWN, data.size());
+    return StatusWithSize::Unknown(data.size());
   }
 };
 
@@ -213,7 +213,7 @@
     AlignedWriterBuffer<4> writer(3, output);
     writer.Write(std::as_bytes(std::span("Everything is fine.")));
     output.state = OutputWithErrorInjection::kBreakOnNext;
-    EXPECT_EQ(Status::UNKNOWN,
+    EXPECT_EQ(Status::Unknown(),
               writer.Write(std::as_bytes(std::span("No more writes, okay?")))
                   .status());
     writer.Flush();
@@ -222,7 +222,7 @@
 
 TEST(AlignedWriter, Write_ReturnsTotalBytesWritten) {
   static Status return_status;
-  return_status = Status::OK;
+  return_status = Status::Ok();
 
   OutputToFunction output([](std::span<const byte> data) {
     return StatusWithSize(return_status, data.size());
@@ -232,17 +232,17 @@
 
   StatusWithSize result =
       writer.Write(std::as_bytes(std::span("12345678901"sv)));
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(0u, result.size());  // No writes; haven't filled buffer.
 
   result = writer.Write(std::as_bytes(std::span("2345678901"sv)));
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(20u, result.size());
 
-  return_status = Status::PERMISSION_DENIED;
+  return_status = Status::PermissionDenied();
 
   result = writer.Write(std::as_bytes(std::span("2345678901234567890"sv)));
-  EXPECT_EQ(Status::PERMISSION_DENIED, result.status());
+  EXPECT_EQ(Status::PermissionDenied(), result.status());
   EXPECT_EQ(40u, result.size());
 }
 
@@ -252,17 +252,17 @@
 
   AlignedWriterBuffer<4> writer(2, output);
 
-  EXPECT_EQ(Status::OK,
+  EXPECT_EQ(Status::Ok(),
             writer.Write(std::as_bytes(std::span("12345678901"sv))).status());
 
   StatusWithSize result = writer.Flush();
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(12u, result.size());
 }
 
 TEST(AlignedWriter, Flush_Error_ReturnsTotalBytesWritten) {
   OutputToFunction output([](std::span<const byte> data) {
-    return StatusWithSize(Status::ABORTED, data.size());
+    return StatusWithSize::Aborted(data.size());
   });
 
   AlignedWriterBuffer<20> writer(10, output);
@@ -270,7 +270,7 @@
   EXPECT_EQ(0u, writer.Write(std::as_bytes(std::span("12345678901"sv))).size());
 
   StatusWithSize result = writer.Flush();
-  EXPECT_EQ(Status::ABORTED, result.status());
+  EXPECT_EQ(Status::Aborted(), result.status());
   EXPECT_EQ(20u, result.size());
 }
 
@@ -284,12 +284,12 @@
     EXPECT_LE(index_ + data.size(), kBytes.size());
 
     if (index_ + data.size() > kBytes.size()) {
-      return StatusWithSize::INTERNAL;
+      return StatusWithSize::Internal();
     }
 
     // Check if reading from the index that was programmed to cause an error.
     if (index_ <= break_on_index_ && break_on_index_ <= index_ + data.size()) {
-      return StatusWithSize::ABORTED;
+      return StatusWithSize::Aborted();
     }
 
     std::memcpy(data.data(), kBytes.data(), data.size());
@@ -306,11 +306,11 @@
 
   InputWithErrorInjection input;
   StatusWithSize result = writer.Write(input, kData.size());
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_LE(result.size(), kData.size());  // May not have written it all yet.
 
   result = writer.Flush();
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(kData.size(), result.size());
 }
 
@@ -321,7 +321,7 @@
   input.BreakOnIndex(kAlignment + 2);
 
   StatusWithSize result = writer.Write(input, kData.size());
-  EXPECT_EQ(Status::ABORTED, result.status());
+  EXPECT_EQ(Status::Aborted(), result.status());
   EXPECT_LE(result.size(), kAlignment);  // Wrote the first chunk, nothing more.
 }
 
@@ -333,7 +333,7 @@
   output.state = OutputWithErrorInjection::kBreakOnNext;
 
   StatusWithSize result = writer.Write(input, kData.size());
-  EXPECT_EQ(Status::UNKNOWN, result.status());
+  EXPECT_EQ(Status::Unknown(), result.status());
   EXPECT_EQ(3u, result.size());  // Attempted to write 3 bytes.
 }
 
diff --git a/pw_kvs/checksum.cc b/pw_kvs/checksum.cc
index 0a885f7..08f59b8 100644
--- a/pw_kvs/checksum.cc
+++ b/pw_kvs/checksum.cc
@@ -22,12 +22,12 @@
 
 Status ChecksumAlgorithm::Verify(std::span<const byte> checksum) const {
   if (checksum.size() < size_bytes()) {
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
   if (std::memcmp(state_.data(), checksum.data(), size_bytes()) != 0) {
-    return Status::DATA_LOSS;
+    return Status::DataLoss();
   }
-  return Status::OK;
+  return Status::Ok();
 }
 
 }  // namespace pw::kvs
diff --git a/pw_kvs/checksum_test.cc b/pw_kvs/checksum_test.cc
index 0db5ed2..4fffae0 100644
--- a/pw_kvs/checksum_test.cc
+++ b/pw_kvs/checksum_test.cc
@@ -32,19 +32,20 @@
   ChecksumAlgorithm& algo = crc16_algo;
 
   algo.Update(kString.data(), kString.size());
-  EXPECT_EQ(Status::OK, algo.Verify(std::as_bytes(std::span(&kStringCrc, 1))));
+  EXPECT_EQ(Status::Ok(),
+            algo.Verify(std::as_bytes(std::span(&kStringCrc, 1))));
 }
 
 TEST(Checksum, Verify_Failure) {
   ChecksumCrc16 algo;
-  EXPECT_EQ(Status::DATA_LOSS,
+  EXPECT_EQ(Status::DataLoss(),
             algo.Verify(std::as_bytes(std::span(kString.data(), 2))));
 }
 
 TEST(Checksum, Verify_InvalidSize) {
   ChecksumCrc16 algo;
-  EXPECT_EQ(Status::INVALID_ARGUMENT, algo.Verify({}));
-  EXPECT_EQ(Status::INVALID_ARGUMENT,
+  EXPECT_EQ(Status::InvalidArgument(), algo.Verify({}));
+  EXPECT_EQ(Status::InvalidArgument(),
             algo.Verify(std::as_bytes(std::span(kString.substr(0, 1)))));
 }
 
@@ -55,7 +56,7 @@
 
   algo.Update(std::as_bytes(std::span(kString)));
 
-  EXPECT_EQ(Status::OK, algo.Verify(crc));
+  EXPECT_EQ(Status::Ok(), algo.Verify(crc));
 }
 
 TEST(Checksum, Reset) {
@@ -71,20 +72,20 @@
 TEST(IgnoreChecksum, NeverUpdate_VerifyWithoutData) {
   IgnoreChecksum checksum;
 
-  EXPECT_EQ(Status::OK, checksum.Verify({}));
+  EXPECT_EQ(Status::Ok(), checksum.Verify({}));
 }
 
 TEST(IgnoreChecksum, NeverUpdate_VerifyWithData) {
   IgnoreChecksum checksum;
 
-  EXPECT_EQ(Status::OK, checksum.Verify(std::as_bytes(std::span(kString))));
+  EXPECT_EQ(Status::Ok(), checksum.Verify(std::as_bytes(std::span(kString))));
 }
 
 TEST(IgnoreChecksum, AfterUpdate_Verify) {
   IgnoreChecksum checksum;
 
   checksum.Update(std::as_bytes(std::span(kString)));
-  EXPECT_EQ(Status::OK, checksum.Verify({}));
+  EXPECT_EQ(Status::Ok(), checksum.Verify({}));
 }
 
 constexpr size_t kAlignment = 10;
@@ -143,7 +144,7 @@
   EXPECT_EQ(std::string_view(reinterpret_cast<const char*>(state.data()),
                              state.size()),
             kData);
-  EXPECT_EQ(Status::OK, checksum.Verify(kBytes));
+  EXPECT_EQ(Status::Ok(), checksum.Verify(kBytes));
 }
 
 }  // namespace
diff --git a/pw_kvs/docs.rst b/pw_kvs/docs.rst
index eea1073..b958c63 100644
--- a/pw_kvs/docs.rst
+++ b/pw_kvs/docs.rst
@@ -1,15 +1,178 @@
-.. _chapter-pw-kvs:
-
-.. default-domain:: cpp
-
-.. highlight:: cpp
+.. _module-pw_kvs:
 
 ------
 pw_kvs
 ------
+
+.. note::
+  The documentation for this module is currently under construction.
+
 ``pw_kvs`` is Pigweed's Key Value Store (KVS) library. KVS is a flash-backed
 persistent storage system with integrated wear-leveling that serves as a
 relatively lightweight alternative to a file system.
 
-.. note::
-  The documentation for this module is currently incomplete.
+KeyValueStore
+=============
+
+The KVS system stores key and value data pairs. The key value pairs are stored
+in `flash memory`_ as a `key-value entry`_ (KV entry) that consists of a
+header/metadata, the key data, and value data. KV entries are accessed through
+Put, Get, and Delete operations.
+
+Each flash sector is written sequentially in an append-only manner, with each
+following entry write being at a higher address than all of the previous entry
+writes to that sector since erase. Once information (header, metadata, data,
+etc) is written to flash, that information is not modified or cleared until a
+full sector erase occurs as part of garbage collection.
+
+Individual KV entries are contained within a single flash sector (do not cross
+sector boundaries). Flash sectors can contain as many KV entries as fit in the
+sector.
+
+KVS does not store any data/metadata/state in flash beyond the KV entries. All
+KVS system state can be derived from the stored KV entries. Current KVS system
+state is determined at boot from flash-stored KV entries and then maintained in
+ram by the KVS. The KVS is at all times in a valid state on-flash, so there are
+no windows of vulnerability to unexpected power loss or crash. The old entry
+for a key is maintained until the new entry for that key is written and
+verified.
+
+Each `key-value entry`_ has a unique transaction ID that is incremented for
+each KVS update transaction. When determining system state from flash-stored KV
+entries, the valid entry with the highest transaction ID is considered to be
+the “current” entry of the key. All stored entries of the same key with lower
+transaction ID are considered old or “stale”.
+
+Updates/rewrites of a key that has been previously stored is done as a new KV
+entry with an updated transaction ID and the new value for the key. The KVS
+internal state is updated to reflect the new entry. The previously stored KV
+entries for that key are not modified or removed from flash storage, until
+garbage collection reclaims the “stale” entries.
+
+`Garbage collection`_ is done by coping any currently valid KV entries in the
+sector to be garbage collected to a different sector and then erasing the
+sector.
+
+Flash Memory
+-------------
+
+The flash storage used by KVS is comprised of two layers, FlashMemory and
+FlashPartition.
+
+FlashMemory is the lower level that manages the raw read/write/erase of the
+flash memory device.
+
+FlashPartition is a portion of a FlashMemory. A FlashMemory may have multiple
+FlashPartitions that represent different parts of the FlashMemory - such as
+partitions for KVS, OTA, snapshots/crashlogs, etc. Each FlashPartition has its
+own separate logical address space starting from zero to size of the partition.
+FlashPartition logical address does not always map directly to FlashMemory
+addresses due to partition encryption, sector headers, etc.
+
+Writes to flash must have a start address that is a multiple of the flash
+write alignment. Write size must also be a multiple of flash write alignment.
+Write alignment varies by flash device and partition type. FlashPartitions may
+have a different alignment than the FlashMemory they are part of, so long as
+the partition's alignment is a multiple of the alignment for the FlashMemory.
+Reads from flash do not have any address or size alignment requirement - reads
+always have a minimum alignment of 1.
+
+Flash sectors are the minimum erase size for both FlashMemory and
+FlashPartition. FlashPartitions may have a different logical sector size than
+the FlashMemory they are part of. Partition logical sectors may be smaller due
+to partition overhead (encryption, wear tracking, etc) or larger due to
+combining raw sectors into larger logical sectors.
+
+Storage Allocation
+------------------
+
+KVS requires more storage space than the size of the key-value data stored.
+This is due to the always free sector required for garbage collection and the
+"write and garbage collect later" approach KVS uses.
+
+KVS works poorly with stored data being more than 75% of the available
+storage. It works best with stored data being less than 50% of the available
+storage. For applications that prefer/need to do garbage collection at
+scheduled times or that write very heavily can benefit from additional flash
+store space.
+
+The flash storage used by KVS is multiplied by `redundancy`_ used. A redundancy
+of 2 will use twice the storage.
+
+Key-Value Entry
+---------------
+
+Each key-value (KV) entry consists of a header/metadata, the key data, and
+value data. Individual KV entries are contained within a single flash sector
+(do not cross sector boundaries). Because of this the maximum KV entry size is
+the partition sector size.
+
+KV entries are appended as needed to sectors, with append operations spread
+over time. Each individual KV entry is written completely as a single
+high-level operation. KV entries are appended to a sector as long as space is
+available for a given KV entry. Multiple sectors can be active for writing at
+any time.
+
+When a key is rewritten (writing a new KV entry of an existing key), the KV
+entry is stored at a new location that may or may not be located in the same
+sector as the previous entry for that key. The new entry uses a transaction
+ID greater than the previous transaction ID. The previous KV entry for that key
+remains unaltered “on-disk” but is considered “stale”. It is garbage collected
+at some future time.
+
+Redundancy
+----------
+
+KVS supports storing redundant copies of KV entries. For a given redundancy
+level (N), N total copies of each KV entry are stored. Redundant copies are
+always stored in different sectors. This protects against corruption or even
+full sector loss in N-1 sectors without data loss.
+
+Redundancy increases flash usage proportional to the redundancy level. The RAM
+usage for KVS internal state has a small increase with redundancy.
+
+Garbage Collection
+------------------
+
+Storage space occupied by stale KV entries is reclaimed and made available
+for reuse through a garbage collection process. The base garbage collection
+operation is done to reclaim one sector at a time.
+
+KVS always keeps at least one sector free at all times to ensure the ability to
+garbage collect. This free sector is used to copy valid entries from the sector
+to be garbage collected before erasing the sector to be garbage collected. The
+always free sector is rotated as part of the KVS wear leveling.
+
+Full Maintenance does garbage collection of all sectors except those that have
+current valid KV entries.
+
+Heavy Maintenance does garbage collection of all sectors. Use strong caution
+when doing Heavy Maintenance as it can, compared to Full Maintenance, result
+in a significant amount of moving valid entries,
+
+Garbage collection can be performed by request of higher level software or
+automatically as needed to make space available to write new entries.
+
+Flash wear management
+---------------------
+
+Wear leveling is accomplished by cycling selection of the next sector to write
+to. This cycling spreads flash wear across all free sectors so that no one
+sector is prematurely worn out.
+
+Wear leveling through cycling selection of next sector to write
+
+* Location of new writes/rewrites of key-values will prefer sectors already
+  in-use (partially filled), with new (blank) sectors used when no in-use
+  sectors have large enough available space for the new write
+* New (blank) sectors selected cycle sequentially between available free
+  sectors
+* Search for the first available sector, starting from current write sector + 1
+  and wrap around to start at the end of partition.
+* This spreads the erase/write cycles for heavily written/rewritten key-values
+  across all free sectors, reducing wear on any single sector
+* Erase count is not considered as part of the wear leveling decision making
+  process
+* Sectors with already written key-values that are not modified will remain in
+  the original sector and not participate in wear-leveling, so long as the
+  key-values in the sector remain unchanged
diff --git a/pw_kvs/entry.cc b/pw_kvs/entry.cc
index cbc07d7..658c7df 100644
--- a/pw_kvs/entry.cc
+++ b/pw_kvs/entry.cc
@@ -13,6 +13,7 @@
 // the License.
 
 #define PW_LOG_MODULE_NAME "KVS"
+#define PW_LOG_LEVEL PW_KVS_LOG_LEVEL
 
 #include "pw_kvs/internal/entry.h"
 
@@ -20,8 +21,8 @@
 #include <cstring>
 
 #include "pw_kvs_private/config.h"
-#include "pw_kvs_private/macros.h"
 #include "pw_log/log.h"
+#include "pw_status/try.h"
 
 namespace pw::kvs::internal {
 
@@ -40,13 +41,13 @@
                    const internal::EntryFormats& formats,
                    Entry* entry) {
   EntryHeader header;
-  TRY(partition.Read(address, sizeof(header), &header));
+  PW_TRY(partition.Read(address, sizeof(header), &header));
 
   if (partition.AppearsErased(std::as_bytes(std::span(&header.magic, 1)))) {
-    return Status::NOT_FOUND;
+    return Status::NotFound();
   }
   if (header.key_length_bytes > kMaxKeyLength) {
-    return Status::DATA_LOSS;
+    return Status::DataLoss();
   }
 
   const EntryFormat* format = formats.Find(header.magic);
@@ -54,11 +55,11 @@
     PW_LOG_ERROR("Found corrupt magic: %" PRIx32 " at address %u",
                  header.magic,
                  unsigned(address));
-    return Status::DATA_LOSS;
+    return Status::DataLoss();
   }
 
   *entry = Entry(&partition, address, *format, header);
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status Entry::ReadKey(FlashPartition& partition,
@@ -66,7 +67,7 @@
                       size_t key_length,
                       char* key) {
   if (key_length == 0u || key_length > kMaxKeyLength) {
-    return Status::DATA_LOSS;
+    return Status::DataLoss();
   }
 
   return partition.Read(address + sizeof(EntryHeader), key_length, key)
@@ -134,18 +135,18 @@
 
   // Use this object's header rather than the header in flash of flash, since
   // this Entry may have been updated.
-  TRY_WITH_SIZE(writer.Write(&header_, sizeof(header_)));
+  PW_TRY_WITH_SIZE(writer.Write(&header_, sizeof(header_)));
 
   // Write only the key and value from the original entry.
   FlashPartition::Input input(partition(), address() + sizeof(EntryHeader));
-  TRY_WITH_SIZE(writer.Write(input, key_length() + value_size()));
+  PW_TRY_WITH_SIZE(writer.Write(input, key_length() + value_size()));
   return writer.Flush();
 }
 
 StatusWithSize Entry::ReadValue(std::span<byte> buffer,
                                 size_t offset_bytes) const {
   if (offset_bytes > value_size()) {
-    return StatusWithSize::OUT_OF_RANGE;
+    return StatusWithSize::OutOfRange();
   }
 
   const size_t remaining_bytes = value_size() - offset_bytes;
@@ -154,17 +155,17 @@
   StatusWithSize result = partition().Read(
       address_ + sizeof(EntryHeader) + key_length() + offset_bytes,
       buffer.subspan(0, read_size));
-  TRY_WITH_SIZE(result);
+  PW_TRY_WITH_SIZE(result);
 
   if (read_size != remaining_bytes) {
-    return StatusWithSize(Status::RESOURCE_EXHAUSTED, read_size);
+    return StatusWithSize::ResourceExhausted(read_size);
   }
   return StatusWithSize(read_size);
 }
 
 Status Entry::ValueMatches(std::span<const std::byte> value) const {
   if (value_size() != value.size_bytes()) {
-    return Status::NOT_FOUND;
+    return Status::NotFound();
   }
 
   Address address = address_ + sizeof(EntryHeader) + key_length();
@@ -174,23 +175,23 @@
   std::array<std::byte, 2 * kMinAlignmentBytes> buffer;
   while (address < end) {
     const size_t read_size = std::min(size_t(end - address), buffer.size());
-    TRY(partition_->Read(address, std::span(buffer).first(read_size)));
+    PW_TRY(partition_->Read(address, std::span(buffer).first(read_size)));
 
     if (std::memcmp(buffer.data(), value_ptr, read_size) != 0) {
-      return Status::NOT_FOUND;
+      return Status::NotFound();
     }
 
     address += read_size;
     value_ptr += read_size;
   }
 
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status Entry::VerifyChecksum(string_view key,
                              std::span<const byte> value) const {
   if (checksum_algo_ == nullptr) {
-    return header_.checksum == 0 ? Status::OK : Status::DATA_LOSS;
+    return header_.checksum == 0 ? Status::Ok() : Status::DataLoss();
   }
   CalculateChecksum(key, value);
   return checksum_algo_->Verify(checksum_bytes());
@@ -210,17 +211,17 @@
   Address read_address = address_;
 
   // Read the first chunk, which includes the header, and compare the checksum.
-  TRY(partition().Read(read_address, read_size, buffer));
+  PW_TRY(partition().Read(read_address, read_size, buffer));
 
   if (header_to_verify.checksum != header_.checksum) {
     PW_LOG_ERROR("Expected checksum 0x%08" PRIx32 ", found 0x%08" PRIx32,
                  header_.checksum,
                  header_to_verify.checksum);
-    return Status::DATA_LOSS;
+    return Status::DataLoss();
   }
 
   if (checksum_algo_ == nullptr) {
-    return header_.checksum == 0 ? Status::OK : Status::DATA_LOSS;
+    return header_.checksum == 0 ? Status::Ok() : Status::DataLoss();
   }
 
   // The checksum is calculated as if the header's checksum field were 0.
@@ -240,7 +241,7 @@
     // Read the next chunk into the buffer.
     read_address += read_size;
     read_size = std::min(sizeof(buffer), bytes_to_read);
-    TRY(partition().Read(read_address, read_size, buffer));
+    PW_TRY(partition().Read(read_address, read_size, buffer));
   }
 
   checksum_algo_->Finish();
@@ -281,7 +282,7 @@
   header_.checksum = 0;
 
   if (checksum_algo_ == nullptr) {
-    return Status::OK;
+    return Status::Ok();
   }
 
   checksum_algo_->Reset();
@@ -295,7 +296,7 @@
   std::array<std::byte, 2 * kMinAlignmentBytes> buffer;
   while (address < end) {
     const size_t read_size = std::min(size_t(end - address), buffer.size());
-    TRY(partition_->Read(address, std::span(buffer).first(read_size)));
+    PW_TRY(partition_->Read(address, std::span(buffer).first(read_size)));
 
     checksum_algo_->Update(buffer.data(), read_size);
     address += read_size;
@@ -307,7 +308,7 @@
   std::memcpy(&header_.checksum,
               checksum.data(),
               std::min(checksum.size(), sizeof(header_.checksum)));
-  return Status::OK;
+  return Status::Ok();
 }
 
 void Entry::AddPaddingBytesToChecksum() const {
diff --git a/pw_kvs/entry_cache.cc b/pw_kvs/entry_cache.cc
index 116f27f..9fa93a2 100644
--- a/pw_kvs/entry_cache.cc
+++ b/pw_kvs/entry_cache.cc
@@ -13,6 +13,7 @@
 // the License.
 
 #define PW_LOG_MODULE_NAME "KVS"
+#define PW_LOG_LEVEL PW_KVS_LOG_LEVEL
 
 #include "pw_kvs/internal/entry_cache.h"
 
@@ -21,7 +22,7 @@
 #include "pw_kvs/flash_memory.h"
 #include "pw_kvs/internal/entry.h"
 #include "pw_kvs/internal/hash.h"
-#include "pw_kvs_private/macros.h"
+#include "pw_kvs_private/config.h"
 #include "pw_log/log.h"
 
 namespace pw::kvs::internal {
@@ -104,18 +105,18 @@
 
       if (!key_found) {
         PW_LOG_ERROR("No valid entries for key. Data has been lost!");
-        return StatusWithSize(Status::DATA_LOSS, error_val);
+        return StatusWithSize::DataLoss(error_val);
       } else if (key == read_key) {
         PW_LOG_DEBUG("Found match for key hash 0x%08" PRIx32, hash);
         *metadata = EntryMetadata(descriptors_[i], addresses(i));
-        return StatusWithSize(Status::OK, error_val);
+        return StatusWithSize::Ok(error_val);
       } else {
         PW_LOG_WARN("Found key hash collision for 0x%08" PRIx32, hash);
-        return StatusWithSize(Status::ALREADY_EXISTS, error_val);
+        return StatusWithSize::AlreadyExists(error_val);
       }
     }
   }
-  return StatusWithSize::NOT_FOUND;
+  return StatusWithSize::NotFound();
 }
 
 EntryMetadata EntryCache::AddNew(const KeyDescriptor& descriptor,
@@ -140,17 +141,17 @@
   // Write a new entry if there is room.
   if (index == -1) {
     if (full()) {
-      return Status::RESOURCE_EXHAUSTED;
+      return Status::ResourceExhausted();
     }
     AddNew(descriptor, address);
-    return Status::OK;
+    return Status::Ok();
   }
 
   // Existing entry is old; replace the existing entry with the new one.
   if (descriptor.transaction_id > descriptors_[index].transaction_id) {
     descriptors_[index] = descriptor;
     ResetAddresses(index, address);
-    return Status::OK;
+    return Status::Ok();
   }
 
   // If the entries have a duplicate transaction ID, add the new (redundant)
@@ -161,7 +162,7 @@
                    " with transaction ID %" PRIu32 " has non-matching hash",
                    descriptor.key_hash,
                    descriptor.transaction_id);
-      return Status::DATA_LOSS;
+      return Status::DataLoss();
     }
 
     // Verify that this entry is not in the same sector as an existing copy of
@@ -170,7 +171,7 @@
       if (existing_address / sector_size_bytes == address / sector_size_bytes) {
         PW_LOG_DEBUG("Multiple Redundant entries in same sector %u",
                      unsigned(address / sector_size_bytes));
-        return Status::DATA_LOSS;
+        return Status::DataLoss();
       }
     }
 
@@ -178,7 +179,7 @@
   } else {
     PW_LOG_DEBUG("Found stale entry when appending; ignoring");
   }
-  return Status::OK;
+  return Status::Ok();
 }
 
 size_t EntryCache::present_entries() const {
diff --git a/pw_kvs/entry_cache_test.cc b/pw_kvs/entry_cache_test.cc
index 60fa736..7d6a25f 100644
--- a/pw_kvs/entry_cache_test.cc
+++ b/pw_kvs/entry_cache_test.cc
@@ -15,11 +15,11 @@
 #include "pw_kvs/internal/entry_cache.h"
 
 #include "gtest/gtest.h"
+#include "pw_bytes/array.h"
 #include "pw_kvs/fake_flash_memory.h"
 #include "pw_kvs/flash_memory.h"
 #include "pw_kvs/internal/hash.h"
 #include "pw_kvs/internal/key_descriptor.h"
-#include "pw_kvs_private/byte_utils.h"
 
 namespace pw::kvs::internal {
 namespace {
@@ -83,7 +83,7 @@
 }
 
 TEST_F(EmptyEntryCache, AddNewOrUpdateExisting_NewEntry) {
-  ASSERT_EQ(Status::OK,
+  ASSERT_EQ(Status::Ok(),
             entries_.AddNewOrUpdateExisting(kDescriptor, 1000, 2000));
 
   EXPECT_EQ(1u, entries_.present_entries());
@@ -98,13 +98,13 @@
 TEST_F(EmptyEntryCache, AddNewOrUpdateExisting_NewEntry_Full) {
   for (uint32_t i = 0; i < kMaxEntries; ++i) {
     ASSERT_EQ(  // Fill up the cache
-        Status::OK,
+        Status::Ok(),
         entries_.AddNewOrUpdateExisting({i, i, EntryState::kValid}, i, 1));
   }
   ASSERT_EQ(kMaxEntries, entries_.total_entries());
   ASSERT_TRUE(entries_.full());
 
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED,
+  EXPECT_EQ(Status::ResourceExhausted(),
             entries_.AddNewOrUpdateExisting(kDescriptor, 1000, 1));
   EXPECT_EQ(kMaxEntries, entries_.total_entries());
 }
@@ -113,7 +113,7 @@
   KeyDescriptor kd = kDescriptor;
   kd.transaction_id += 3;
 
-  ASSERT_EQ(Status::OK, entries_.AddNewOrUpdateExisting(kd, 3210, 2000));
+  ASSERT_EQ(Status::Ok(), entries_.AddNewOrUpdateExisting(kd, 3210, 2000));
 
   EXPECT_EQ(1u, entries_.present_entries());
 
@@ -125,15 +125,15 @@
 }
 
 TEST_F(EmptyEntryCache, AddNewOrUpdateExisting_AddDuplicateEntry) {
-  ASSERT_EQ(Status::OK,
+  ASSERT_EQ(Status::Ok(),
             entries_.AddNewOrUpdateExisting(kDescriptor, 1000, 2000));
-  ASSERT_EQ(Status::OK,
+  ASSERT_EQ(Status::Ok(),
             entries_.AddNewOrUpdateExisting(kDescriptor, 3000, 2000));
-  ASSERT_EQ(Status::OK,
+  ASSERT_EQ(Status::Ok(),
             entries_.AddNewOrUpdateExisting(kDescriptor, 7000, 2000));
 
   // Duplicates beyond the redundancy are ignored.
-  ASSERT_EQ(Status::OK,
+  ASSERT_EQ(Status::Ok(),
             entries_.AddNewOrUpdateExisting(kDescriptor, 9000, 2000));
 
   EXPECT_EQ(1u, entries_.present_entries());
@@ -150,9 +150,9 @@
 }
 
 TEST_F(EmptyEntryCache, AddNewOrUpdateExisting_AddDuplicateEntryInSameSector) {
-  ASSERT_EQ(Status::OK,
+  ASSERT_EQ(Status::Ok(),
             entries_.AddNewOrUpdateExisting(kDescriptor, 1000, 1000));
-  EXPECT_EQ(Status::DATA_LOSS,
+  EXPECT_EQ(Status::DataLoss(),
             entries_.AddNewOrUpdateExisting(kDescriptor, 1950, 1000));
 
   EXPECT_EQ(1u, entries_.present_entries());
@@ -198,13 +198,14 @@
 constexpr uint32_t kMagic = 0xa14ae726;
 // For KVS entry magic value always use a random 32 bit integer rather than a
 // human readable 4 bytes. See pw_kvs/format.h for more information.
-constexpr auto kTheEntry = AsBytes(uint32_t(kMagic),  // magic
-                                   uint32_t(0),       // checksum
-                                   uint8_t(0),        // alignment (16 B)
-                                   uint8_t(sizeof(kTheKey) - 1),  // key length
-                                   uint16_t(0),                   // value size
-                                   uint32_t(123),  // transaction ID
-                                   ByteStr(kTheKey));
+constexpr auto kTheEntry =
+    bytes::Concat(uint32_t(kMagic),              // magic
+                  uint32_t(0),                   // checksum
+                  uint8_t(0),                    // alignment (16 B)
+                  uint8_t(sizeof(kTheKey) - 1),  // key length
+                  uint16_t(0),                   // value size
+                  uint32_t(123),                 // transaction ID
+                  bytes::String(kTheKey));
 constexpr std::array<byte, kSectorSize - kTheEntry.size() % kSectorSize>
     kPadding1{};
 constexpr size_t kSize1 = kTheEntry.size() + kPadding1.size();
@@ -215,13 +216,13 @@
 // For KVS entry magic value always use a random 32 bit integer rather than a
 // human readable 4 bytes. See pw_kvs/format.h for more information.
 constexpr auto kCollisionEntry =
-    AsBytes(uint32_t(kMagic),                  // magic
-            uint32_t(0),                       // checksum
-            uint8_t(0),                        // alignment (16 B)
-            uint8_t(sizeof(kCollision1) - 1),  // key length
-            uint16_t(0),                       // value size
-            uint32_t(123),                     // transaction ID
-            ByteStr(kCollision1));
+    bytes::Concat(uint32_t(kMagic),                  // magic
+                  uint32_t(0),                       // checksum
+                  uint8_t(0),                        // alignment (16 B)
+                  uint8_t(sizeof(kCollision1) - 1),  // key length
+                  uint16_t(0),                       // value size
+                  uint32_t(123),                     // transaction ID
+                  bytes::String(kCollision1));
 constexpr std::array<byte, kSectorSize - kCollisionEntry.size() % kSectorSize>
     kPadding2{};
 constexpr size_t kSize2 = kCollisionEntry.size() + kPadding2.size();
@@ -229,13 +230,13 @@
 // For KVS entry magic value always use a random 32 bit integer rather than a
 // human readable 4 bytes. See pw_kvs/format.h for more information.
 constexpr auto kDeletedEntry =
-    AsBytes(uint32_t(kMagic),                 // magic
-            uint32_t(0),                      // checksum
-            uint8_t(0),                       // alignment (16 B)
-            uint8_t(sizeof("delorted") - 1),  // key length
-            uint16_t(0xffff),                 // value size (deleted)
-            uint32_t(123),                    // transaction ID
-            ByteStr("delorted"));
+    bytes::Concat(uint32_t(kMagic),                 // magic
+                  uint32_t(0),                      // checksum
+                  uint8_t(0),                       // alignment (16 B)
+                  uint8_t(sizeof("delorted") - 1),  // key length
+                  uint16_t(0xffff),                 // value size (deleted)
+                  uint32_t(123),                    // transaction ID
+                  bytes::String("delorted"));
 constexpr std::array<byte, kSectorSize - kDeletedEntry.size() % kSectorSize>
     kPadding3{};
 
@@ -248,14 +249,14 @@
   static_assert(Hash(kCollision1) == Hash(kCollision2));
 
   InitializedEntryCache()
-      : flash_(AsBytes(kTheEntry,
-                       kPadding1,
-                       kTheEntry,
-                       kPadding1,
-                       kCollisionEntry,
-                       kPadding2,
-                       kDeletedEntry,
-                       kPadding3)),
+      : flash_(bytes::Concat(kTheEntry,
+                             kPadding1,
+                             kTheEntry,
+                             kPadding1,
+                             kCollisionEntry,
+                             kPadding2,
+                             kDeletedEntry,
+                             kPadding3)),
         partition_(&flash_),
         sectors_(sector_descriptors_, partition_, nullptr),
         format_(kFormat) {
@@ -319,7 +320,7 @@
   StatusWithSize result =
       entries_.Find(partition_, sectors_, format_, kTheKey, &metadata);
 
-  ASSERT_EQ(Status::OK, result.status());
+  ASSERT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(0u, result.size());
   EXPECT_EQ(Hash(kTheKey), metadata.hash());
   EXPECT_EQ(EntryState::kValid, metadata.state());
@@ -329,14 +330,14 @@
 TEST_F(InitializedEntryCache, Find_PresentEntryWithSingleReadError) {
   // Inject 2 read errors so that the initial key read and the follow-up full
   // read of the first entry fail.
-  flash_.InjectReadError(FlashError::Unconditional(Status::INTERNAL, 2));
+  flash_.InjectReadError(FlashError::Unconditional(Status::Internal(), 2));
 
   EntryMetadata metadata;
 
   StatusWithSize result =
       entries_.Find(partition_, sectors_, format_, kTheKey, &metadata);
 
-  ASSERT_EQ(Status::OK, result.status());
+  ASSERT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(1u, result.size());
   EXPECT_EQ(Hash(kTheKey), metadata.hash());
   EXPECT_EQ(EntryState::kValid, metadata.state());
@@ -344,14 +345,14 @@
 }
 
 TEST_F(InitializedEntryCache, Find_PresentEntryWithMultiReadError) {
-  flash_.InjectReadError(FlashError::Unconditional(Status::INTERNAL, 4));
+  flash_.InjectReadError(FlashError::Unconditional(Status::Internal(), 4));
 
   EntryMetadata metadata;
 
   StatusWithSize result =
       entries_.Find(partition_, sectors_, format_, kTheKey, &metadata);
 
-  ASSERT_EQ(Status::DATA_LOSS, result.status());
+  ASSERT_EQ(Status::DataLoss(), result.status());
   EXPECT_EQ(1u, result.size());
   CheckForCorruptSectors(&sectors_.FromAddress(0),
                          &sectors_.FromAddress(kSize1));
@@ -363,7 +364,7 @@
   StatusWithSize result =
       entries_.Find(partition_, sectors_, format_, "delorted", &metadata);
 
-  ASSERT_EQ(Status::OK, result.status());
+  ASSERT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(0u, result.size());
   EXPECT_EQ(Hash("delorted"), metadata.hash());
   EXPECT_EQ(EntryState::kDeleted, metadata.state());
@@ -376,7 +377,7 @@
   StatusWithSize result =
       entries_.Find(partition_, sectors_, format_, "3.141", &metadata);
 
-  ASSERT_EQ(Status::NOT_FOUND, result.status());
+  ASSERT_EQ(Status::NotFound(), result.status());
   EXPECT_EQ(0u, result.size());
   CheckForCorruptSectors();
 }
@@ -386,7 +387,7 @@
 
   StatusWithSize result =
       entries_.Find(partition_, sectors_, format_, kCollision2, &metadata);
-  EXPECT_EQ(Status::ALREADY_EXISTS, result.status());
+  EXPECT_EQ(Status::AlreadyExists(), result.status());
   EXPECT_EQ(0u, result.size());
   CheckForCorruptSectors();
 }
diff --git a/pw_kvs/entry_test.cc b/pw_kvs/entry_test.cc
index a1afab2..8ac1d1a 100644
--- a/pw_kvs/entry_test.cc
+++ b/pw_kvs/entry_test.cc
@@ -18,13 +18,13 @@
 #include <string_view>
 
 #include "gtest/gtest.h"
+#include "pw_bytes/array.h"
 #include "pw_kvs/alignment.h"
 #include "pw_kvs/checksum.h"
 #include "pw_kvs/crc16_checksum.h"
 #include "pw_kvs/fake_flash_memory.h"
 #include "pw_kvs/flash_memory.h"
 #include "pw_kvs/format.h"
-#include "pw_kvs_private/byte_utils.h"
 
 namespace pw::kvs::internal {
 namespace {
@@ -92,20 +92,20 @@
 constexpr uint32_t kMagicWithChecksum = 0xad165142;
 constexpr uint32_t kTransactionId1 = 0x96979899;
 
-constexpr auto kKey1 = ByteStr("key45");
-constexpr auto kValue1 = ByteStr("VALUE!");
-constexpr auto kPadding1 = ByteStr("\0\0\0\0\0");
+constexpr auto kKey1 = bytes::String("key45");
+constexpr auto kValue1 = bytes::String("VALUE!");
+constexpr auto kPadding1 = bytes::String("\0\0\0\0\0");
 
-constexpr auto kHeader1 = AsBytes(kMagicWithChecksum,
-                                  uint32_t(0x23aa),          // checksum (CRC16)
-                                  uint8_t(1),                // alignment (32 B)
-                                  uint8_t(kKey1.size()),     // key length
-                                  uint16_t(kValue1.size()),  // value size
-                                  kTransactionId1            // transaction ID
+constexpr auto kHeader1 = bytes::Concat(kMagicWithChecksum,
+                                        uint32_t(0x23aa),  // checksum (CRC16)
+                                        uint8_t(1),        // alignment (32 B)
+                                        uint8_t(kKey1.size()),     // key length
+                                        uint16_t(kValue1.size()),  // value size
+                                        kTransactionId1  // transaction ID
 );
 
-constexpr auto kEntryWithoutPadding1 = AsBytes(kHeader1, kKey1, kValue1);
-constexpr auto kEntry1 = AsBytes(kEntryWithoutPadding1, kPadding1);
+constexpr auto kEntryWithoutPadding1 = bytes::Concat(kHeader1, kKey1, kValue1);
+constexpr auto kEntry1 = bytes::Concat(kEntryWithoutPadding1, kPadding1);
 static_assert(kEntry1.size() == 32);
 
 ChecksumCrc16 default_checksum;
@@ -116,7 +116,7 @@
 class ValidEntryInFlash : public ::testing::Test {
  protected:
   ValidEntryInFlash() : flash_(kEntry1), partition_(&flash_) {
-    EXPECT_EQ(Status::OK, Entry::Read(partition_, 0, kFormats, &entry_));
+    EXPECT_EQ(Status::Ok(), Entry::Read(partition_, 0, kFormats, &entry_));
   }
 
   FakeFlashMemoryBuffer<1024, 4> flash_;
@@ -125,8 +125,8 @@
 };
 
 TEST_F(ValidEntryInFlash, PassesChecksumVerification) {
-  EXPECT_EQ(Status::OK, entry_.VerifyChecksumInFlash());
-  EXPECT_EQ(Status::OK, entry_.VerifyChecksum("key45", kValue1));
+  EXPECT_EQ(Status::Ok(), entry_.VerifyChecksumInFlash());
+  EXPECT_EQ(Status::Ok(), entry_.VerifyChecksum("key45", kValue1));
 }
 
 TEST_F(ValidEntryInFlash, HeaderContents) {
@@ -141,7 +141,7 @@
   Entry::KeyBuffer key = {};
   auto result = entry_.ReadKey(key);
 
-  ASSERT_EQ(Status::OK, result.status());
+  ASSERT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(result.size(), entry_.key_length());
   EXPECT_STREQ(key.data(), "key45");
 }
@@ -150,7 +150,7 @@
   char value[32] = {};
   auto result = entry_.ReadValue(std::as_writable_bytes(std::span(value)));
 
-  ASSERT_EQ(Status::OK, result.status());
+  ASSERT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(result.size(), entry_.value_size());
   EXPECT_STREQ(value, "VALUE!");
 }
@@ -159,7 +159,7 @@
   char value[3] = {};
   auto result = entry_.ReadValue(std::as_writable_bytes(std::span(value)));
 
-  ASSERT_EQ(Status::RESOURCE_EXHAUSTED, result.status());
+  ASSERT_EQ(Status::ResourceExhausted(), result.status());
   EXPECT_EQ(3u, result.size());
   EXPECT_EQ(value[0], 'V');
   EXPECT_EQ(value[1], 'A');
@@ -170,7 +170,7 @@
   char value[3] = {};
   auto result = entry_.ReadValue(std::as_writable_bytes(std::span(value)), 3);
 
-  ASSERT_EQ(Status::OK, result.status());
+  ASSERT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(3u, result.size());
   EXPECT_EQ(value[0], 'U');
   EXPECT_EQ(value[1], 'E');
@@ -181,7 +181,7 @@
   char value[1] = {};
   auto result = entry_.ReadValue(std::as_writable_bytes(std::span(value)), 4);
 
-  ASSERT_EQ(Status::RESOURCE_EXHAUSTED, result.status());
+  ASSERT_EQ(Status::ResourceExhausted(), result.status());
   EXPECT_EQ(1u, result.size());
   EXPECT_EQ(value[0], 'E');
 }
@@ -190,7 +190,7 @@
   char value[16] = {'?'};
   auto result = entry_.ReadValue(std::as_writable_bytes(std::span(value)), 6);
 
-  ASSERT_EQ(Status::OK, result.status());
+  ASSERT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(0u, result.size());
   EXPECT_EQ(value[0], '?');
 }
@@ -199,7 +199,7 @@
   char value[16] = {};
   auto result = entry_.ReadValue(std::as_writable_bytes(std::span(value)), 7);
 
-  EXPECT_EQ(Status::OUT_OF_RANGE, result.status());
+  EXPECT_EQ(Status::OutOfRange(), result.status());
   EXPECT_EQ(0u, result.size());
 }
 
@@ -208,16 +208,16 @@
   FlashPartition partition(&flash, 0, flash.sector_count(), 32);
 
   Entry entry = Entry::Valid(
-      partition, 53, kFormatWithChecksum, "key45", kValue1, kTransactionId1);
+      partition, 64, kFormatWithChecksum, "key45", kValue1, kTransactionId1);
 
   auto result = entry.Write("key45", kValue1);
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(32u, result.size());
-  EXPECT_EQ(std::memcmp(&flash.buffer()[53], kEntry1.data(), kEntry1.size()),
+  EXPECT_EQ(std::memcmp(&flash.buffer()[64], kEntry1.data(), kEntry1.size()),
             0);
 }
 
-constexpr auto kHeader2 = ByteStr(
+constexpr auto kHeader2 = bytes::String(
     "\x42\x51\x16\xad"  // magic
     "\xba\xb3\x00\x00"  // checksum (CRC16)
     "\x00"              // alignment
@@ -226,13 +226,14 @@
     "\x00\x01\x02\x03"  // transaction ID
 );
 
-constexpr auto kKeyAndPadding2 = ByteStr("K\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0");
+constexpr auto kKeyAndPadding2 =
+    bytes::String("K\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0");
 
 class TombstoneEntryInFlash : public ::testing::Test {
  protected:
   TombstoneEntryInFlash()
-      : flash_(AsBytes(kHeader2, kKeyAndPadding2)), partition_(&flash_) {
-    EXPECT_EQ(Status::OK, Entry::Read(partition_, 0, kFormats, &entry_));
+      : flash_(bytes::Concat(kHeader2, kKeyAndPadding2)), partition_(&flash_) {
+    EXPECT_EQ(Status::Ok(), Entry::Read(partition_, 0, kFormats, &entry_));
   }
 
   FakeFlashMemoryBuffer<1024, 4> flash_;
@@ -241,8 +242,8 @@
 };
 
 TEST_F(TombstoneEntryInFlash, PassesChecksumVerification) {
-  EXPECT_EQ(Status::OK, entry_.VerifyChecksumInFlash());
-  EXPECT_EQ(Status::OK, entry_.VerifyChecksum("K", {}));
+  EXPECT_EQ(Status::Ok(), entry_.VerifyChecksumInFlash());
+  EXPECT_EQ(Status::Ok(), entry_.VerifyChecksum("K", {}));
 }
 
 TEST_F(TombstoneEntryInFlash, HeaderContents) {
@@ -257,7 +258,7 @@
   Entry::KeyBuffer key = {};
   auto result = entry_.ReadKey(key);
 
-  ASSERT_EQ(Status::OK, result.status());
+  ASSERT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(result.size(), entry_.key_length());
   EXPECT_STREQ(key.data(), "K");
 }
@@ -266,7 +267,7 @@
   char value[32] = {};
   auto result = entry_.ReadValue(std::as_writable_bytes(std::span(value)));
 
-  ASSERT_EQ(Status::OK, result.status());
+  ASSERT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(0u, result.size());
 }
 
@@ -279,10 +280,10 @@
       Entry::Tombstone(partition, 16, kFormatWithChecksum, "K", 0x03020100);
 
   auto result = entry.Write("K", {});
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(32u, result.size());
   EXPECT_EQ(std::memcmp(&flash.buffer()[16],
-                        AsBytes(kHeader2, kKeyAndPadding2).data(),
+                        bytes::Concat(kHeader2, kKeyAndPadding2).data(),
                         kEntry1.size()),
             0);
 }
@@ -295,36 +296,36 @@
   const EntryFormat format{kMagicWithChecksum, nullptr};
   const internal::EntryFormats formats(format);
 
-  ASSERT_EQ(Status::OK, Entry::Read(partition, 0, formats, &entry));
+  ASSERT_EQ(Status::Ok(), Entry::Read(partition, 0, formats, &entry));
 
-  EXPECT_EQ(Status::DATA_LOSS, entry.VerifyChecksumInFlash());
-  EXPECT_EQ(Status::DATA_LOSS, entry.VerifyChecksum({}, {}));
+  EXPECT_EQ(Status::DataLoss(), entry.VerifyChecksumInFlash());
+  EXPECT_EQ(Status::DataLoss(), entry.VerifyChecksum({}, {}));
 
   std::memset(&flash.buffer()[4], 0, 4);  // set the checksum field to 0
-  ASSERT_EQ(Status::OK, Entry::Read(partition, 0, formats, &entry));
-  EXPECT_EQ(Status::OK, entry.VerifyChecksumInFlash());
-  EXPECT_EQ(Status::OK, entry.VerifyChecksum({}, {}));
+  ASSERT_EQ(Status::Ok(), Entry::Read(partition, 0, formats, &entry));
+  EXPECT_EQ(Status::Ok(), entry.VerifyChecksumInFlash());
+  EXPECT_EQ(Status::Ok(), entry.VerifyChecksum({}, {}));
 }
 
 TEST(Entry, Checksum_ChecksPadding) {
   FakeFlashMemoryBuffer<1024, 4> flash(
-      AsBytes(kHeader1, kKey1, kValue1, ByteStr("\0\0\0\0\1")));
+      bytes::Concat(kHeader1, kKey1, kValue1, bytes::String("\0\0\0\0\1")));
   FlashPartition partition(&flash);
   Entry entry;
-  ASSERT_EQ(Status::OK, Entry::Read(partition, 0, kFormats, &entry));
+  ASSERT_EQ(Status::Ok(), Entry::Read(partition, 0, kFormats, &entry));
 
   // Last byte in padding is a 1; should fail.
-  EXPECT_EQ(Status::DATA_LOSS, entry.VerifyChecksumInFlash());
+  EXPECT_EQ(Status::DataLoss(), entry.VerifyChecksumInFlash());
 
   // The in-memory verification fills in 0s for the padding.
-  EXPECT_EQ(Status::OK, entry.VerifyChecksum("key45", kValue1));
+  EXPECT_EQ(Status::Ok(), entry.VerifyChecksum("key45", kValue1));
 
   flash.buffer()[kEntry1.size() - 1] = byte{0};
-  EXPECT_EQ(Status::OK, entry.VerifyChecksumInFlash());
+  EXPECT_EQ(Status::Ok(), entry.VerifyChecksumInFlash());
 }
 
 TEST_F(ValidEntryInFlash, Update_SameFormat_TransactionIdIsUpdated) {
-  ASSERT_EQ(Status::OK,
+  ASSERT_EQ(Status::Ok(),
             entry_.Update(kFormatWithChecksum, kTransactionId1 + 3));
 
   EXPECT_EQ(kFormatWithChecksum.magic, entry_.magic());
@@ -335,7 +336,7 @@
 
 TEST_F(ValidEntryInFlash,
        Update_DifferentFormat_MagicAndTransactionIdAreUpdated) {
-  ASSERT_EQ(Status::OK, entry_.Update(kFormat, kTransactionId1 + 6));
+  ASSERT_EQ(Status::Ok(), entry_.Update(kFormat, kTransactionId1 + 6));
 
   EXPECT_EQ(kFormat.magic, entry_.magic());
   EXPECT_EQ(0u, entry_.address());
@@ -344,9 +345,9 @@
 }
 
 TEST_F(ValidEntryInFlash, Update_ReadError_WithChecksumIsError) {
-  flash_.InjectReadError(FlashError::Unconditional(Status::ABORTED));
+  flash_.InjectReadError(FlashError::Unconditional(Status::Aborted()));
 
-  EXPECT_EQ(Status::ABORTED,
+  EXPECT_EQ(Status::Aborted(),
             entry_.Update(kFormatWithChecksum, kTransactionId1 + 1));
 }
 
@@ -356,15 +357,16 @@
                                         .checksum = nullptr};
 
 TEST_F(ValidEntryInFlash, Update_ReadError_NoChecksumIsOkay) {
-  flash_.InjectReadError(FlashError::Unconditional(Status::ABORTED));
+  flash_.InjectReadError(FlashError::Unconditional(Status::Aborted()));
 
-  EXPECT_EQ(Status::OK, entry_.Update(kNoChecksumFormat, kTransactionId1 + 1));
+  EXPECT_EQ(Status::Ok(),
+            entry_.Update(kNoChecksumFormat, kTransactionId1 + 1));
 }
 
 TEST_F(ValidEntryInFlash, Copy) {
   auto result = entry_.Copy(123);
 
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(entry_.size(), result.size());
   EXPECT_EQ(0,
             std::memcmp(
@@ -372,9 +374,9 @@
 }
 
 TEST_F(ValidEntryInFlash, Copy_ReadError) {
-  flash_.InjectReadError(FlashError::Unconditional(Status::UNIMPLEMENTED));
+  flash_.InjectReadError(FlashError::Unconditional(Status::Unimplemented()));
   auto result = entry_.Copy(kEntry1.size());
-  EXPECT_EQ(Status::UNIMPLEMENTED, result.status());
+  EXPECT_EQ(Status::Unimplemented(), result.status());
   EXPECT_EQ(0u, result.size());
 }
 
@@ -415,28 +417,29 @@
   constexpr size_t size = AlignUp(kEntryWithoutPadding1.size(), alignment);
 
   constexpr uint32_t checksum =
-      ByteSum(AsBytes(kFormatWithSum.magic)) + 0 /* checksum */ +
+      ByteSum(bytes::Concat(kFormatWithSum.magic)) + 0 /* checksum */ +
       alignment_units + kKey1.size() + kValue1.size() +
-      ByteSum(AsBytes(kTransactionId1 + 1)) + ByteSum(kKey1) +
+      ByteSum(bytes::Concat(kTransactionId1 + 1)) + ByteSum(kKey1) +
       ByteSum(kValue1) + size /* +1 for each byte in the checksum */;
 
   constexpr auto kNewHeader1 =
-      AsBytes(kFormatWithSum.magic,      // magic
-              checksum,                  // checksum (byte sum)
-              alignment_units,           // alignment (in 16 B units)
-              uint8_t(kKey1.size()),     // key length
-              uint16_t(kValue1.size()),  // value size
-              kTransactionId1 + 1);      // transaction ID
+      bytes::Concat(kFormatWithSum.magic,      // magic
+                    checksum,                  // checksum (byte sum)
+                    alignment_units,           // alignment (in 16 B units)
+                    uint8_t(kKey1.size()),     // key length
+                    uint16_t(kValue1.size()),  // value size
+                    kTransactionId1 + 1);      // transaction ID
   constexpr size_t padding = Padding(kEntryWithoutPadding1.size(), alignment);
-  return AsBytes(kNewHeader1, kKey1, kValue1, InitializedBytes<padding>(0));
+  return bytes::Concat(
+      kNewHeader1, kKey1, kValue1, bytes::Initialized<padding>(0));
 }
 
 TEST_F(ValidEntryInFlash, UpdateAndCopy_DifferentFormatSmallerAlignment) {
   // Uses 16-bit alignment, smaller than the original entry's alignment.
-  ASSERT_EQ(Status::OK, entry_.Update(kFormatWithSum, kTransactionId1 + 1));
+  ASSERT_EQ(Status::Ok(), entry_.Update(kFormatWithSum, kTransactionId1 + 1));
 
   StatusWithSize result = entry_.Copy(kEntry1.size());
-  ASSERT_EQ(Status::OK, result.status());
+  ASSERT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(kEntry1.size(), result.size());
 
   constexpr auto new_data = MakeNewFormatWithSumEntry<16>();
@@ -447,9 +450,9 @@
       std::memcmp(
           &flash_.buffer()[kEntry1.size()], new_data.data(), new_data.size()));
   Entry new_entry;
-  ASSERT_EQ(Status::OK,
+  ASSERT_EQ(Status::Ok(),
             Entry::Read(partition_, 32, kFormatsWithSum, &new_entry));
-  EXPECT_EQ(Status::OK, new_entry.VerifyChecksumInFlash());
+  EXPECT_EQ(Status::Ok(), new_entry.VerifyChecksumInFlash());
   EXPECT_EQ(kFormatWithSum.magic, new_entry.magic());
   EXPECT_EQ(kTransactionId1 + 1, new_entry.transaction_id());
 }
@@ -459,12 +462,12 @@
   FakeFlashMemoryBuffer<1024, 4> flash(kEntry1);
   FlashPartition partition(&flash, 0, 4, 32);
   Entry entry;
-  ASSERT_EQ(Status::OK, Entry::Read(partition, 0, kFormats, &entry));
+  ASSERT_EQ(Status::Ok(), Entry::Read(partition, 0, kFormats, &entry));
 
-  ASSERT_EQ(Status::OK, entry.Update(kFormatWithSum, kTransactionId1 + 1));
+  ASSERT_EQ(Status::Ok(), entry.Update(kFormatWithSum, kTransactionId1 + 1));
 
   StatusWithSize result = entry.Copy(32);
-  ASSERT_EQ(Status::OK, result.status());
+  ASSERT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(AlignUp(kEntry1.size(), 32), result.size());
 
   constexpr auto new_data = MakeNewFormatWithSumEntry<32>();
@@ -474,9 +477,9 @@
             std::memcmp(&flash.buffer()[32], new_data.data(), new_data.size()));
 
   Entry new_entry;
-  ASSERT_EQ(Status::OK,
+  ASSERT_EQ(Status::Ok(),
             Entry::Read(partition, 32, kFormatsWithSum, &new_entry));
-  EXPECT_EQ(Status::OK, new_entry.VerifyChecksumInFlash());
+  EXPECT_EQ(Status::Ok(), new_entry.VerifyChecksumInFlash());
   EXPECT_EQ(kTransactionId1 + 1, new_entry.transaction_id());
 }
 
@@ -485,12 +488,12 @@
   FakeFlashMemoryBuffer<1024, 4> flash(kEntry1);
   FlashPartition partition(&flash, 0, 4, 64);
   Entry entry;
-  ASSERT_EQ(Status::OK, Entry::Read(partition, 0, kFormats, &entry));
+  ASSERT_EQ(Status::Ok(), Entry::Read(partition, 0, kFormats, &entry));
 
-  ASSERT_EQ(Status::OK, entry.Update(kFormatWithSum, kTransactionId1 + 1));
+  ASSERT_EQ(Status::Ok(), entry.Update(kFormatWithSum, kTransactionId1 + 1));
 
   StatusWithSize result = entry.Copy(64);
-  ASSERT_EQ(Status::OK, result.status());
+  ASSERT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(AlignUp(kEntry1.size(), 64), result.size());
 
   constexpr auto new_data = MakeNewFormatWithSumEntry<64>();
@@ -500,9 +503,9 @@
             std::memcmp(&flash.buffer()[64], new_data.data(), new_data.size()));
 
   Entry new_entry;
-  ASSERT_EQ(Status::OK,
+  ASSERT_EQ(Status::Ok(),
             Entry::Read(partition, 64, kFormatsWithSum, &new_entry));
-  EXPECT_EQ(Status::OK, new_entry.VerifyChecksumInFlash());
+  EXPECT_EQ(Status::Ok(), new_entry.VerifyChecksumInFlash());
   EXPECT_EQ(kTransactionId1 + 1, new_entry.transaction_id());
 }
 
@@ -511,20 +514,21 @@
   // readable 4 bytes. See pw_kvs/format.h for more information.
   constexpr EntryFormat no_checksum{.magic = 0x43fae18f, .checksum = nullptr};
 
-  ASSERT_EQ(Status::OK, entry_.Update(no_checksum, kTransactionId1 + 1));
+  ASSERT_EQ(Status::Ok(), entry_.Update(no_checksum, kTransactionId1 + 1));
 
   auto result = entry_.Copy(kEntry1.size());
-  ASSERT_EQ(Status::OK, result.status());
+  ASSERT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(kEntry1.size(), result.size());
 
   constexpr auto kNewHeader1 =
-      AsBytes(no_checksum.magic,         // magic
-              uint32_t(0),               // checksum (none)
-              uint8_t(0),                // alignment (changed to 16 B from 32)
-              uint8_t(kKey1.size()),     // key length
-              uint16_t(kValue1.size()),  // value size
-              kTransactionId1 + 1);      // transaction ID
-  constexpr auto kNewEntry1 = AsBytes(kNewHeader1, kKey1, kValue1, kPadding1);
+      bytes::Concat(no_checksum.magic,  // magic
+                    uint32_t(0),        // checksum (none)
+                    uint8_t(0),         // alignment (changed to 16 B from 32)
+                    uint8_t(kKey1.size()),     // key length
+                    uint16_t(kValue1.size()),  // value size
+                    kTransactionId1 + 1);      // transaction ID
+  constexpr auto kNewEntry1 =
+      bytes::Concat(kNewHeader1, kKey1, kValue1, kPadding1);
 
   EXPECT_EQ(0,
             std::memcmp(&flash_.buffer()[kEntry1.size()],
@@ -533,23 +537,23 @@
 }
 
 TEST_F(ValidEntryInFlash, UpdateAndCopyMultple_DifferentFormat) {
-  ASSERT_EQ(Status::OK, entry_.Update(kFormatWithSum, kTransactionId1 + 6));
+  ASSERT_EQ(Status::Ok(), entry_.Update(kFormatWithSum, kTransactionId1 + 6));
 
   FlashPartition::Address new_address = entry_.size();
 
   for (int i = 0; i < 10; i++) {
     StatusWithSize copy_result = entry_.Copy(new_address + (i * entry_.size()));
-    ASSERT_EQ(Status::OK, copy_result.status());
+    ASSERT_EQ(Status::Ok(), copy_result.status());
     ASSERT_EQ(kEntry1.size(), copy_result.size());
   }
 
   for (int j = 0; j < 10; j++) {
     Entry entry;
     FlashPartition::Address read_address = (new_address + (j * entry_.size()));
-    ASSERT_EQ(Status::OK,
+    ASSERT_EQ(Status::Ok(),
               Entry::Read(partition_, read_address, kFormatsWithSum, &entry));
 
-    EXPECT_EQ(Status::OK, entry.VerifyChecksumInFlash());
+    EXPECT_EQ(Status::Ok(), entry.VerifyChecksumInFlash());
     EXPECT_EQ(kFormatWithSum.magic, entry.magic());
     EXPECT_EQ(read_address, entry.address());
     EXPECT_EQ(kTransactionId1 + 6, entry.transaction_id());
@@ -558,26 +562,27 @@
 }
 
 TEST_F(ValidEntryInFlash, DifferentFormat_UpdatedCopy_FailsWithWrongMagic) {
-  ASSERT_EQ(Status::OK, entry_.Update(kFormatWithSum, kTransactionId1 + 6));
+  ASSERT_EQ(Status::Ok(), entry_.Update(kFormatWithSum, kTransactionId1 + 6));
 
   FlashPartition::Address new_address = entry_.size();
 
   StatusWithSize copy_result = entry_.Copy(new_address);
-  ASSERT_EQ(Status::OK, copy_result.status());
+  ASSERT_EQ(Status::Ok(), copy_result.status());
   ASSERT_EQ(kEntry1.size(), copy_result.size());
 
   Entry entry;
-  ASSERT_EQ(Status::DATA_LOSS,
+  ASSERT_EQ(Status::DataLoss(),
             Entry::Read(partition_, new_address, kFormats, &entry));
 }
 
 TEST_F(ValidEntryInFlash, UpdateAndCopy_WriteError) {
-  flash_.InjectWriteError(FlashError::Unconditional(Status::CANCELLED));
+  flash_.InjectWriteError(FlashError::Unconditional(Status::Cancelled()));
 
-  ASSERT_EQ(Status::OK, entry_.Update(kNoChecksumFormat, kTransactionId1 + 1));
+  ASSERT_EQ(Status::Ok(),
+            entry_.Update(kNoChecksumFormat, kTransactionId1 + 1));
 
   auto result = entry_.Copy(kEntry1.size());
-  EXPECT_EQ(Status::CANCELLED, result.status());
+  EXPECT_EQ(Status::Cancelled(), result.status());
   EXPECT_EQ(kEntry1.size(), result.size());
 }
 
diff --git a/pw_kvs/fake_flash_memory.cc b/pw_kvs/fake_flash_memory.cc
index 217870c..7796a72 100644
--- a/pw_kvs/fake_flash_memory.cc
+++ b/pw_kvs/fake_flash_memory.cc
@@ -13,9 +13,11 @@
 // the License.
 
 #define PW_LOG_MODULE_NAME "KVS"
+#define PW_LOG_LEVEL PW_KVS_LOG_LEVEL
 
 #include "pw_kvs/fake_flash_memory.h"
 
+#include "pw_kvs_private/config.h"
 #include "pw_log/log.h"
 
 namespace pw::kvs {
@@ -29,23 +31,23 @@
     }
   }
 
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status FlashError::Check(FlashMemory::Address start_address, size_t size) {
   // Check if the event overlaps with this address range.
   if (begin_ != kAnyAddress &&
       (start_address >= end_ || (start_address + size) <= begin_)) {
-    return Status::OK;
+    return Status::Ok();
   }
 
   if (delay_ > 0u) {
     delay_ -= 1;
-    return Status::OK;
+    return Status::Ok();
   }
 
   if (remaining_ == 0u) {
-    return Status::OK;
+    return Status::Ok();
   }
 
   if (remaining_ != kAlways) {
@@ -60,7 +62,7 @@
     PW_LOG_ERROR(
         "Attempted to erase sector at non-sector aligned boundary; address %x",
         unsigned(address));
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
   const size_t sector_id = address / sector_size_bytes();
   if (address / sector_size_bytes() + num_sectors > sector_count()) {
@@ -69,18 +71,18 @@
         "address: %x, sector implied: %u",
         unsigned(address),
         unsigned(sector_id));
-    return Status::OUT_OF_RANGE;
+    return Status::OutOfRange();
   }
 
   std::memset(
       &buffer_[address], int(kErasedValue), sector_size_bytes() * num_sectors);
-  return Status::OK;
+  return Status::Ok();
 }
 
 StatusWithSize FakeFlashMemory::Read(Address address,
                                      std::span<std::byte> output) {
   if (address + output.size() >= sector_count() * size_bytes()) {
-    return StatusWithSize::OUT_OF_RANGE;
+    return StatusWithSize::OutOfRange();
   }
 
   // Check for injected read errors
@@ -97,14 +99,14 @@
                  unsigned(address),
                  unsigned(data.size()),
                  unsigned(alignment_bytes()));
-    return StatusWithSize::INVALID_ARGUMENT;
+    return StatusWithSize::InvalidArgument();
   }
 
   if (data.size() > sector_size_bytes() - (address % sector_size_bytes())) {
     PW_LOG_ERROR("Write crosses sector boundary; address %x, size %u B",
                  unsigned(address),
                  unsigned(data.size()));
-    return StatusWithSize::INVALID_ARGUMENT;
+    return StatusWithSize::InvalidArgument();
   }
 
   if (address + data.size() > sector_count() * sector_size_bytes()) {
@@ -113,7 +115,7 @@
         unsigned(address),
         unsigned(data.size()),
         unsigned(sector_count() * sector_size_bytes()));
-    return StatusWithSize::OUT_OF_RANGE;
+    return StatusWithSize::OutOfRange();
   }
 
   // Check in erased state
@@ -121,7 +123,7 @@
     if (buffer_[address + i] != kErasedValue) {
       PW_LOG_ERROR("Writing to previously written address: %x",
                    unsigned(address));
-      return StatusWithSize::UNKNOWN;
+      return StatusWithSize::Unknown();
     }
   }
 
diff --git a/pw_kvs/fake_flash_test_key_value_store.cc b/pw_kvs/fake_flash_test_key_value_store.cc
new file mode 100644
index 0000000..49a22cd
--- /dev/null
+++ b/pw_kvs/fake_flash_test_key_value_store.cc
@@ -0,0 +1,76 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_kvs/crc16_checksum.h"
+#include "pw_kvs/fake_flash_memory.h"
+#include "pw_kvs/flash_memory.h"
+#include "pw_kvs/key_value_store.h"
+#include "pw_kvs/test_key_value_store.h"
+
+namespace pw::kvs {
+
+namespace {
+
+#ifndef PW_FLASH_TEST_SECTORS
+#define PW_FLASH_TEST_SECTORS 8U
+#endif  // PW_FLASH_TEST_SECTORS
+
+#ifndef PW_FLASH_TEST_SECTOR_SIZE
+#define PW_FLASH_TEST_SECTOR_SIZE (4 * 1024U)
+#endif  // PW_FLASH_TEST_SECTOR_SIZE
+
+#ifndef PW_FLASH_TEST_ALIGNMENT
+#define PW_FLASH_TEST_ALIGNMENT 16U
+#endif  // PW_FLASH_TEST_ALIGNMENT
+
+#ifndef PW_KVS_TEST_MAX_ENTIRES
+#define PW_KVS_TEST_MAX_ENTIRES 32U
+#endif  // PW_KVS_TEST_MAX_ENTIRES
+
+#ifndef PW_KVS_TEST_REDUNDANCY
+#define PW_KVS_TEST_REDUNDANCY 1U
+#endif  // PW_KVS_TEST_REDUNDANCY
+
+constexpr size_t kFlashTestSectors = PW_FLASH_TEST_SECTORS;
+constexpr size_t kFlashTestSectorSize = PW_FLASH_TEST_SECTOR_SIZE;
+constexpr size_t kFlashTestAlignment = PW_FLASH_TEST_ALIGNMENT;
+
+constexpr size_t kKvsTestMaxEntries = PW_KVS_TEST_MAX_ENTIRES;
+constexpr size_t kKvsTestRedundancy = PW_KVS_TEST_REDUNDANCY;
+
+// Default to 8 x 4k sectors, 16 byte alignment.
+FakeFlashMemoryBuffer<kFlashTestSectorSize, kFlashTestSectors> test_flash(
+    kFlashTestAlignment);
+FlashPartition test_partition(&test_flash);
+
+ChecksumCrc16 kvs_checksum;
+
+// For KVS magic value always use a random 32 bit integer rather than a human
+// readable 4 bytes. See pw_kvs/format.h for more information.
+constexpr EntryFormat kvs_format = {.magic = 0xc40fd8a8,
+                                    .checksum = &kvs_checksum};
+
+KeyValueStoreBuffer<kKvsTestMaxEntries, kFlashTestSectors, kKvsTestRedundancy>
+    test_kvs(&test_partition, kvs_format);
+
+}  // namespace
+
+KeyValueStore& TestKvs() {
+  if (!test_kvs.initialized()) {
+    test_kvs.Init();
+  }
+
+  return test_kvs;
+}
+}  // namespace pw::kvs
diff --git a/pw_kvs/flash_memory.cc b/pw_kvs/flash_memory.cc
index aba923b..93e08d7 100644
--- a/pw_kvs/flash_memory.cc
+++ b/pw_kvs/flash_memory.cc
@@ -13,6 +13,7 @@
 // the License.
 
 #define PW_LOG_MODULE_NAME "KVS"
+#define PW_LOG_LEVEL PW_KVS_LOG_LEVEL
 
 #include "pw_kvs/flash_memory.h"
 
@@ -20,17 +21,18 @@
 #include <cinttypes>
 #include <cstring>
 
+#include "pw_assert/assert.h"
 #include "pw_kvs_private/config.h"
-#include "pw_kvs_private/macros.h"
 #include "pw_log/log.h"
 #include "pw_status/status_with_size.h"
+#include "pw_status/try.h"
 
 namespace pw::kvs {
 
 using std::byte;
 
 StatusWithSize FlashPartition::Output::DoWrite(std::span<const byte> data) {
-  TRY_WITH_SIZE(flash_.Write(address_, data));
+  PW_TRY_WITH_SIZE(flash_.Write(address_, data));
   address_ += data.size();
   return StatusWithSize(data.size());
 }
@@ -41,26 +43,55 @@
   return result;
 }
 
+FlashPartition::FlashPartition(
+    FlashMemory* flash,
+    uint32_t start_sector_index,
+    uint32_t sector_count,
+    uint32_t alignment_bytes,  // Defaults to flash alignment
+    PartitionPermission permission)
+
+    : flash_(*flash),
+      start_sector_index_(start_sector_index),
+      sector_count_(sector_count),
+      alignment_bytes_(
+          alignment_bytes == 0
+              ? flash_.alignment_bytes()
+              : std::max(alignment_bytes, uint32_t(flash_.alignment_bytes()))),
+      permission_(permission) {
+  uint32_t misalignment = (alignment_bytes_ % flash_.alignment_bytes());
+  PW_DCHECK_UINT_EQ(misalignment,
+                    0,
+                    "Flash partition alignmentmust be a multiple of the flash "
+                    "memory alignment");
+}
+
 Status FlashPartition::Erase(Address address, size_t num_sectors) {
   if (permission_ == PartitionPermission::kReadOnly) {
-    return Status::PERMISSION_DENIED;
+    return Status::PermissionDenied();
   }
 
-  TRY(CheckBounds(address, num_sectors * sector_size_bytes()));
+  PW_TRY(CheckBounds(address, num_sectors * sector_size_bytes()));
+  const size_t address_sector_offset = address % sector_size_bytes();
+  PW_CHECK_UINT_EQ(address_sector_offset, 0u);
+
   return flash_.Erase(PartitionToFlashAddress(address), num_sectors);
 }
 
 StatusWithSize FlashPartition::Read(Address address, std::span<byte> output) {
-  TRY_WITH_SIZE(CheckBounds(address, output.size()));
+  PW_TRY_WITH_SIZE(CheckBounds(address, output.size()));
   return flash_.Read(PartitionToFlashAddress(address), output);
 }
 
 StatusWithSize FlashPartition::Write(Address address,
                                      std::span<const byte> data) {
   if (permission_ == PartitionPermission::kReadOnly) {
-    return StatusWithSize::PERMISSION_DENIED;
+    return StatusWithSize::PermissionDenied();
   }
-  TRY_WITH_SIZE(CheckBounds(address, data.size()));
+  PW_TRY_WITH_SIZE(CheckBounds(address, data.size()));
+  const size_t address_alignment_offset = address % alignment_bytes();
+  PW_CHECK_UINT_EQ(address_alignment_offset, 0u);
+  const size_t size_alignment_offset = data.size() % alignment_bytes();
+  PW_CHECK_UINT_EQ(size_alignment_offset, 0u);
   return flash_.Write(PartitionToFlashAddress(address), data);
 }
 
@@ -69,7 +100,7 @@
                                       bool* is_erased) {
   // Relying on Read() to check address and len arguments.
   if (is_erased == nullptr) {
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
 
   // TODO(pwbug/214): Currently using a single flash alignment to do both the
@@ -79,7 +110,7 @@
   const size_t alignment = alignment_bytes();
   if (alignment > kMaxFlashAlignment || kMaxFlashAlignment % alignment ||
       length % alignment) {
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
 
   byte buffer[kMaxFlashAlignment];
@@ -89,12 +120,12 @@
   while (length > 0u) {
     // Check earlier that length is aligned, no need to round up
     size_t read_size = std::min(sizeof(buffer), length);
-    TRY(Read(source_flash_address + offset, read_size, buffer).status());
+    PW_TRY(Read(source_flash_address + offset, read_size, buffer).status());
 
     for (byte b : std::span(buffer, read_size)) {
       if (b != erased_byte) {
         // Detected memory chunk is not entirely erased
-        return Status::OK;
+        return Status::Ok();
       }
     }
 
@@ -102,7 +133,7 @@
     length -= read_size;
   }
   *is_erased = true;
-  return Status::OK;
+  return Status::Ok();
 }
 
 bool FlashPartition::AppearsErased(std::span<const byte> data) const {
@@ -121,9 +152,9 @@
         "Attempted out-of-bound flash memory access (address: %u length: %u)",
         unsigned(address),
         unsigned(length));
-    return Status::OUT_OF_RANGE;
+    return Status::OutOfRange();
   }
-  return Status::OK;
+  return Status::Ok();
 }
 
 }  // namespace pw::kvs
diff --git a/pw_kvs/flash_partition_test.cc b/pw_kvs/flash_partition_test.cc
index 760f814..b9a2299 100644
--- a/pw_kvs/flash_partition_test.cc
+++ b/pw_kvs/flash_partition_test.cc
@@ -37,7 +37,7 @@
 
   const size_t alignment = partition.alignment_bytes();
 
-  ASSERT_EQ(Status::OK, partition.Erase(0, partition.sector_count()));
+  ASSERT_EQ(Status::Ok(), partition.Erase(0, partition.sector_count()));
 
   const size_t chunks_per_sector = partition.sector_size_bytes() / alignment;
 
@@ -53,7 +53,7 @@
          chunk_index++) {
       StatusWithSize status =
           partition.Write(address, as_bytes(std::span(test_data, alignment)));
-      ASSERT_EQ(Status::OK, status.status());
+      ASSERT_EQ(Status::Ok(), status.status());
       ASSERT_EQ(alignment, status.size());
       address += alignment;
     }
@@ -71,7 +71,7 @@
       memset(test_data, 0, sizeof(test_data));
       StatusWithSize status = partition.Read(address, alignment, test_data);
 
-      EXPECT_EQ(Status::OK, status.status());
+      EXPECT_EQ(Status::Ok(), status.status());
       EXPECT_EQ(alignment, status.size());
       if (!status.ok() || (alignment != status.size())) {
         error_count++;
@@ -140,7 +140,8 @@
       std::min(sizeof(test_data), test_partition.sector_size_bytes());
   auto data_span = std::span(test_data, block_size);
 
-  ASSERT_EQ(Status::OK, test_partition.Erase(0, test_partition.sector_count()));
+  ASSERT_EQ(Status::Ok(),
+            test_partition.Erase(0, test_partition.sector_count()));
 
   // Write to the first page of each sector.
   for (size_t sector_index = 0; sector_index < test_partition.sector_count();
@@ -149,20 +150,20 @@
         sector_index * test_partition.sector_size_bytes();
 
     StatusWithSize status = test_partition.Write(address, as_bytes(data_span));
-    ASSERT_EQ(Status::OK, status.status());
+    ASSERT_EQ(Status::Ok(), status.status());
     ASSERT_EQ(block_size, status.size());
   }
 
   // Preset the flag to make sure the check actually sets it.
   bool is_erased = true;
-  ASSERT_EQ(Status::OK, test_partition.IsErased(&is_erased));
+  ASSERT_EQ(Status::Ok(), test_partition.IsErased(&is_erased));
   ASSERT_EQ(false, is_erased);
 
-  ASSERT_EQ(Status::OK, test_partition.Erase());
+  ASSERT_EQ(Status::Ok(), test_partition.Erase());
 
   // Preset the flag to make sure the check actually sets it.
   is_erased = false;
-  ASSERT_EQ(Status::OK, test_partition.IsErased(&is_erased));
+  ASSERT_EQ(Status::Ok(), test_partition.IsErased(&is_erased));
   ASSERT_EQ(true, is_erased);
 
   // Read the first page of each sector and make sure it has been erased.
@@ -173,7 +174,7 @@
 
     StatusWithSize status =
         test_partition.Read(address, data_span.size_bytes(), data_span.data());
-    EXPECT_EQ(Status::OK, status.status());
+    EXPECT_EQ(Status::Ok(), status.status());
     EXPECT_EQ(data_span.size_bytes(), status.size());
 
     EXPECT_EQ(true, test_partition.AppearsErased(as_bytes(data_span)));
@@ -186,11 +187,55 @@
   const size_t sector_size_bytes = test_partition.sector_size_bytes();
 
   EXPECT_LE(alignment, kMaxFlashAlignment);
+  EXPECT_GT(alignment, 0u);
   EXPECT_EQ(kMaxFlashAlignment % alignment, 0U);
   EXPECT_LE(kMaxFlashAlignment, sector_size_bytes);
   EXPECT_LE(sector_size_bytes % kMaxFlashAlignment, 0U);
 }
 
+#define TESTING_CHECK_FAILURES_IS_SUPPORTED 0
+#if TESTING_CHECK_FAILURES_IS_SUPPORTED
+// TODO: Ensure that this test triggers an assert.
+TEST(FlashPartitionTest, BadWriteAddressAlignment) {
+  FlashPartition& test_partition = FlashTestPartition();
+
+  // Can't get bad alignment with alignment of 1.
+  if (test_partition.alignment_bytes() == 1) {
+    return;
+  }
+
+  std::array<std::byte, kMaxFlashAlignment> source_data;
+  test_partition.Write(1, source_data);
+}
+
+// TODO: Ensure that this test triggers an assert.
+TEST(FlashPartitionTest, BadWriteSizeAlignment) {
+  FlashPartition& test_partition = FlashTestPartition();
+
+  // Can't get bad alignment with alignment of 1.
+  if (test_partition.alignment_bytes() == 1) {
+    return;
+  }
+
+  std::array<std::byte, 1> source_data;
+  test_partition.Write(0, source_data);
+}
+
+// TODO: Ensure that this test triggers an assert.
+TEST(FlashPartitionTest, BadEraseAddressAlignment) {
+  FlashPartition& test_partition = FlashTestPartition();
+
+  // Can't get bad alignment with sector size of 1.
+  if (test_partition.sector_size_bytes() == 1) {
+    return;
+  }
+
+  // Try Erase at address 1 for 1 sector.
+  test_partition.Erase(1, 1);
+}
+
+#endif  // TESTING_CHECK_FAILURES_IS_SUPPORTED
+
 TEST(FlashPartitionTest, IsErased) {
   FlashPartition& test_partition = FlashTestPartition();
   const size_t alignment = test_partition.alignment_bytes();
@@ -198,10 +243,10 @@
   // Make sure the partition is big enough to do this test.
   ASSERT_GE(test_partition.size_bytes(), 3 * kMaxFlashAlignment);
 
-  ASSERT_EQ(Status::OK, test_partition.Erase());
+  ASSERT_EQ(Status::Ok(), test_partition.Erase());
 
   bool is_erased = true;
-  ASSERT_EQ(Status::OK, test_partition.IsErased(&is_erased));
+  ASSERT_EQ(Status::Ok(), test_partition.IsErased(&is_erased));
   ASSERT_EQ(true, is_erased);
 
   static const uint8_t fill_byte = 0x55;
@@ -211,26 +256,26 @@
 
   // Write the chunk with fill byte.
   StatusWithSize status = test_partition.Write(alignment, as_bytes(data_span));
-  ASSERT_EQ(Status::OK, status.status());
+  ASSERT_EQ(Status::Ok(), status.status());
   ASSERT_EQ(data_span.size_bytes(), status.size());
 
-  EXPECT_EQ(Status::OK, test_partition.IsErased(&is_erased));
+  EXPECT_EQ(Status::Ok(), test_partition.IsErased(&is_erased));
   EXPECT_EQ(false, is_erased);
 
   // Check the chunk that was written.
-  EXPECT_EQ(Status::OK,
+  EXPECT_EQ(Status::Ok(),
             test_partition.IsRegionErased(
                 alignment, data_span.size_bytes(), &is_erased));
   EXPECT_EQ(false, is_erased);
 
   // Check a region that starts erased but later has been written.
-  EXPECT_EQ(Status::OK,
+  EXPECT_EQ(Status::Ok(),
             test_partition.IsRegionErased(0, 2 * alignment, &is_erased));
   EXPECT_EQ(false, is_erased);
 
   // Check erased for a region smaller than kMaxFlashAlignment. This has been a
   // bug in the past.
-  EXPECT_EQ(Status::OK,
+  EXPECT_EQ(Status::Ok(),
             test_partition.IsRegionErased(0, alignment, &is_erased));
   EXPECT_EQ(true, is_erased);
 }
diff --git a/pw_kvs/flash_partition_with_stats.cc b/pw_kvs/flash_partition_with_stats.cc
index 25e60ad..6b2431b 100644
--- a/pw_kvs/flash_partition_with_stats.cc
+++ b/pw_kvs/flash_partition_with_stats.cc
@@ -13,12 +13,14 @@
 // the License.
 
 #define PW_LOG_MODULE_NAME "KVS"
+#define PW_LOG_LEVEL PW_KVS_LOG_LEVEL
 
 #include "pw_kvs/flash_partition_with_stats.h"
 
 #include <cstdio>
 
 #include "pw_kvs/flash_memory.h"
+#include "pw_kvs_private/config.h"
 #include "pw_log/log.h"
 
 namespace pw::kvs {
@@ -27,7 +29,7 @@
                                                  const char* label) {
   // If size is zero saving stats is disabled so do not save any stats.
   if (sector_counters_.size() == 0) {
-    return Status::OK;
+    return Status::Ok();
   }
 
   KeyValueStore::StorageStats stats = kvs.GetStorageStats();
@@ -37,7 +39,7 @@
   std::FILE* out_file = std::fopen(file_name, "a+");
   if (out_file == nullptr) {
     PW_LOG_ERROR("Failed to dump to %s", file_name);
-    return Status::NOT_FOUND;
+    return Status::NotFound();
   }
 
   // If file is empty add the header row.
@@ -65,7 +67,7 @@
 
   std::fprintf(out_file, "\n");
   std::fclose(out_file);
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status FlashPartitionWithStats::Erase(Address address, size_t num_sectors) {
diff --git a/pw_kvs/key_value_store.cc b/pw_kvs/key_value_store.cc
index a106592..534313e 100644
--- a/pw_kvs/key_value_store.cc
+++ b/pw_kvs/key_value_store.cc
@@ -13,6 +13,8 @@
 // the License.
 
 #define PW_LOG_MODULE_NAME "KVS"
+#define PW_LOG_LEVEL PW_KVS_LOG_LEVEL
+#define PW_LOG_USE_ULTRA_SHORT_NAMES 1
 
 #include "pw_kvs/key_value_store.h"
 
@@ -21,10 +23,10 @@
 #include <cstring>
 #include <type_traits>
 
-#define PW_LOG_USE_ULTRA_SHORT_NAMES 1
 #include "pw_assert/assert.h"
-#include "pw_kvs_private/macros.h"
+#include "pw_kvs_private/config.h"
 #include "pw_log/log.h"
+#include "pw_status/try.h"
 
 namespace pw::kvs {
 namespace {
@@ -53,7 +55,7 @@
       options_(options),
       initialized_(InitializationState::kNotInitialized),
       error_detected_(false),
-      error_stats_({}),
+      internal_stats_({}),
       last_transaction_id_(0) {}
 
 Status KeyValueStore::Init() {
@@ -67,14 +69,14 @@
         "large as the number of sectors in the flash partition (=%u)",
         unsigned(sectors_.max_size()),
         unsigned(partition_.sector_count()));
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
 
   if (partition_.sector_count() < 2) {
     ERR("KVS init failed: FlashParition sector count (=%u) must be at 2. KVS "
         "requires at least 1 working sector + 1 free/reserved sector",
         unsigned(partition_.sector_count()));
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
 
   const size_t sector_size_bytes = partition_.sector_size_bytes();
@@ -85,7 +87,7 @@
         "allowed sector size (=%u)",
         unsigned(sector_size_bytes),
         unsigned(SectorDescriptor::max_sector_size()));
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
 
   Status metadata_result = InitializeMetadata();
@@ -97,19 +99,19 @@
 
     if (options_.recovery != ErrorRecovery::kManual) {
       size_t pre_fix_redundancy_errors =
-          error_stats_.missing_redundant_entries_recovered;
+          internal_stats_.missing_redundant_entries_recovered;
       Status recovery_status = FixErrors();
 
       if (recovery_status.ok()) {
-        if (metadata_result == Status::OUT_OF_RANGE) {
-          error_stats_.missing_redundant_entries_recovered =
+        if (metadata_result == Status::OutOfRange()) {
+          internal_stats_.missing_redundant_entries_recovered =
               pre_fix_redundancy_errors;
           INF("KVS init: Redundancy level successfully updated");
         } else {
           WRN("KVS init: Corruption detected and fully repaired");
         }
         initialized_ = InitializationState::kReady;
-      } else if (recovery_status == Status::RESOURCE_EXHAUSTED) {
+      } else if (recovery_status == Status::ResourceExhausted()) {
         WRN("KVS init: Unable to maintain required free sector");
       } else {
         WRN("KVS init: Corruption detected and unable repair");
@@ -130,10 +132,10 @@
   if (error_detected_) {
     WRN("KVS init: Corruption found but not repaired, KVS unavailable until "
         "successful maintenance.");
-    return Status::DATA_LOSS;
+    return Status::DataLoss();
   }
 
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status KeyValueStore::InitializeMetadata() {
@@ -168,7 +170,7 @@
 
       Address next_entry_address;
       Status status = LoadEntry(entry_address, &next_entry_address);
-      if (status == Status::NOT_FOUND) {
+      if (status == Status::NotFound()) {
         DBG("Hit un-written data in sector; moving to the next sector");
         break;
       } else if (!status.ok()) {
@@ -282,7 +284,7 @@
     if (!other_errors && entry_copies_missing == entry_cache_.total_entries()) {
       INF("KVS configuration changed to redundancy of %u total copies per key",
           unsigned(redundancy()));
-      return Status::OUT_OF_RANGE;
+      return Status::OutOfRange();
     }
   }
 
@@ -292,18 +294,19 @@
         unsigned(total_corrupt_bytes),
         unsigned(corrupt_entries),
         unsigned(entry_copies_missing));
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
-  return Status::OK;
+  return Status::Ok();
 }
 
 KeyValueStore::StorageStats KeyValueStore::GetStorageStats() const {
   StorageStats stats{};
   const size_t sector_size = partition_.sector_size_bytes();
   bool found_empty_sector = false;
-  stats.corrupt_sectors_recovered = error_stats_.corrupt_sectors_recovered;
+  stats.sector_erase_count = internal_stats_.sector_erase_count;
+  stats.corrupt_sectors_recovered = internal_stats_.corrupt_sectors_recovered;
   stats.missing_redundant_entries_recovered =
-      error_stats_.missing_redundant_entries_recovered;
+      internal_stats_.missing_redundant_entries_recovered;
 
   for (const SectorDescriptor& sector : sectors_) {
     stats.in_use_bytes += sector.valid_bytes();
@@ -352,14 +355,14 @@
 Status KeyValueStore::LoadEntry(Address entry_address,
                                 Address* next_entry_address) {
   Entry entry;
-  TRY(Entry::Read(partition_, entry_address, formats_, &entry));
+  PW_TRY(Entry::Read(partition_, entry_address, formats_, &entry));
 
   // Read the key from flash & validate the entry (which reads the value).
   Entry::KeyBuffer key_buffer;
-  TRY_ASSIGN(size_t key_length, entry.ReadKey(key_buffer));
+  PW_TRY_ASSIGN(size_t key_length, entry.ReadKey(key_buffer));
   const string_view key(key_buffer.data(), key_length);
 
-  TRY(entry.VerifyChecksumInFlash());
+  PW_TRY(entry.VerifyChecksumInFlash());
 
   // A valid entry was found, so update the next entry address before doing any
   // of the checks that happen in AddNewOrUpdateExisting.
@@ -392,26 +395,26 @@
     if (formats_.KnownMagic(magic)) {
       DBG("Found entry magic at address %u", unsigned(address));
       *next_entry_address = address;
-      return Status::OK;
+      return Status::Ok();
     }
   }
 
-  return Status::NOT_FOUND;
+  return Status::NotFound();
 }
 
 StatusWithSize KeyValueStore::Get(string_view key,
                                   std::span<byte> value_buffer,
                                   size_t offset_bytes) const {
-  TRY_WITH_SIZE(CheckReadOperation(key));
+  PW_TRY_WITH_SIZE(CheckReadOperation(key));
 
   EntryMetadata metadata;
-  TRY_WITH_SIZE(FindExisting(key, &metadata));
+  PW_TRY_WITH_SIZE(FindExisting(key, &metadata));
 
   return Get(key, metadata, value_buffer, offset_bytes);
 }
 
 Status KeyValueStore::PutBytes(string_view key, std::span<const byte> value) {
-  TRY(CheckWriteOperation(key));
+  PW_TRY(CheckWriteOperation(key));
   DBG("Writing key/value; key length=%u, value length=%u",
       unsigned(key.size()),
       unsigned(value.size()));
@@ -420,7 +423,7 @@
     DBG("%u B value with %u B key cannot fit in one sector",
         unsigned(value.size()),
         unsigned(key.size()));
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
 
   EntryMetadata metadata;
@@ -435,7 +438,7 @@
     return WriteEntryForExistingKey(metadata, EntryState::kValid, key, value);
   }
 
-  if (status == Status::NOT_FOUND) {
+  if (status == Status::NotFound()) {
     return WriteEntryForNewKey(key, value);
   }
 
@@ -443,10 +446,10 @@
 }
 
 Status KeyValueStore::Delete(string_view key) {
-  TRY(CheckWriteOperation(key));
+  PW_TRY(CheckWriteOperation(key));
 
   EntryMetadata metadata;
-  TRY(FindExisting(key, &metadata));
+  PW_TRY(FindExisting(key, &metadata));
 
   // TODO: figure out logging how to support multiple addresses.
   DBG("Writing tombstone for key 0x%08x in %u sectors including %u",
@@ -484,10 +487,10 @@
 }
 
 StatusWithSize KeyValueStore::ValueSize(string_view key) const {
-  TRY_WITH_SIZE(CheckReadOperation(key));
+  PW_TRY_WITH_SIZE(CheckReadOperation(key));
 
   EntryMetadata metadata;
-  TRY_WITH_SIZE(FindExisting(key, &metadata));
+  PW_TRY_WITH_SIZE(FindExisting(key, &metadata));
 
   return ValueSize(metadata);
 }
@@ -495,7 +498,7 @@
 Status KeyValueStore::ReadEntry(const EntryMetadata& metadata,
                                 Entry& entry) const {
   // Try to read an entry
-  Status read_result = Status::DATA_LOSS;
+  Status read_result = Status::DataLoss();
   for (Address address : metadata.addresses()) {
     read_result = Entry::Read(partition_, address, formats_, &entry);
     if (read_result.ok()) {
@@ -528,9 +531,9 @@
 
   // If the key's hash collides with an existing key or if the key is deleted,
   // treat it as if it is not in the KVS.
-  if (status == Status::ALREADY_EXISTS ||
+  if (status == Status::AlreadyExists() ||
       (status.ok() && metadata->state() == EntryState::kDeleted)) {
-    return Status::NOT_FOUND;
+    return Status::NotFound();
   }
   return status;
 }
@@ -541,7 +544,7 @@
                                   size_t offset_bytes) const {
   Entry entry;
 
-  TRY_WITH_SIZE(ReadEntry(metadata, entry));
+  PW_TRY_WITH_SIZE(ReadEntry(metadata, entry));
 
   StatusWithSize result = entry.ReadValue(value_buffer, offset_bytes);
   if (result.ok() && options_.verify_on_read && offset_bytes == 0u) {
@@ -560,10 +563,10 @@
 Status KeyValueStore::FixedSizeGet(std::string_view key,
                                    void* value,
                                    size_t size_bytes) const {
-  TRY(CheckWriteOperation(key));
+  PW_TRY(CheckWriteOperation(key));
 
   EntryMetadata metadata;
-  TRY(FindExisting(key, &metadata));
+  PW_TRY(FindExisting(key, &metadata));
 
   return FixedSizeGet(key, metadata, value, size_bytes);
 }
@@ -574,13 +577,13 @@
                                    size_t size_bytes) const {
   // Ensure that the size of the stored value matches the size of the type.
   // Otherwise, report error. This check avoids potential memory corruption.
-  TRY_ASSIGN(const size_t actual_size, ValueSize(metadata));
+  PW_TRY_ASSIGN(const size_t actual_size, ValueSize(metadata));
 
   if (actual_size != size_bytes) {
     DBG("Requested %u B read, but value is %u B",
         unsigned(size_bytes),
         unsigned(actual_size));
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
 
   StatusWithSize result =
@@ -591,34 +594,34 @@
 
 StatusWithSize KeyValueStore::ValueSize(const EntryMetadata& metadata) const {
   Entry entry;
-  TRY_WITH_SIZE(ReadEntry(metadata, entry));
+  PW_TRY_WITH_SIZE(ReadEntry(metadata, entry));
 
   return StatusWithSize(entry.value_size());
 }
 
 Status KeyValueStore::CheckWriteOperation(string_view key) const {
   if (InvalidKey(key)) {
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
 
   // For normal write operation the KVS must be fully ready.
   if (!initialized()) {
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status KeyValueStore::CheckReadOperation(string_view key) const {
   if (InvalidKey(key)) {
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
 
   // Operations that are explicitly read-only can be done after init() has been
   // called but not fully ready (when needing maintenance).
   if (initialized_ == InitializationState::kNotInitialized) {
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status KeyValueStore::WriteEntryForExistingKey(EntryMetadata& metadata,
@@ -627,7 +630,7 @@
                                                std::span<const byte> value) {
   // Read the original entry to get the size for sector accounting purposes.
   Entry entry;
-  TRY(ReadEntry(metadata, entry));
+  PW_TRY(ReadEntry(metadata, entry));
 
   return WriteEntry(key, value, new_state, &metadata, &entry);
 }
@@ -637,7 +640,7 @@
   if (entry_cache_.full()) {
     WRN("KVS full: trying to store a new entry, but can't. Have %u entries",
         unsigned(entry_cache_.total_entries()));
-    return Status::RESOURCE_EXHAUSTED;
+    return Status::ResourceExhausted();
   }
 
   return WriteEntry(key, value, EntryState::kValid);
@@ -659,7 +662,7 @@
     // keep the existing entry.
     DBG("Write for key 0x%08x with matching value skipped",
         unsigned(prior_metadata->hash()));
-    return Status::OK;
+    return Status::Ok();
   }
 
   // List of addresses for sectors with space for this entry.
@@ -668,11 +671,11 @@
   // Find addresses to write the entry to. This may involve garbage collecting
   // one or more sectors.
   const size_t entry_size = Entry::size(partition_, key, value);
-  TRY(GetAddressesForWrite(reserved_addresses, entry_size));
+  PW_TRY(GetAddressesForWrite(reserved_addresses, entry_size));
 
   // Write the entry at the first address that was found.
   Entry entry = CreateEntry(reserved_addresses[0], key, value, new_state);
-  TRY(AppendEntry(entry, key, value));
+  PW_TRY(AppendEntry(entry, key, value));
 
   // After writing the first entry successfully, update the key descriptors.
   // Once a single new the entry is written, the old entries are invalidated.
@@ -683,10 +686,10 @@
   // Write the additional copies of the entry, if redundancy is greater than 1.
   for (size_t i = 1; i < redundancy(); ++i) {
     entry.set_address(reserved_addresses[i]);
-    TRY(AppendEntry(entry, key, value));
+    PW_TRY(AppendEntry(entry, key, value));
     new_metadata.AddNewAddress(reserved_addresses[i]);
   }
-  return Status::OK;
+  return Status::Ok();
 }
 
 KeyValueStore::EntryMetadata KeyValueStore::CreateOrUpdateKeyDescriptor(
@@ -721,7 +724,8 @@
                                            size_t write_size) {
   for (size_t i = 0; i < redundancy(); i++) {
     SectorDescriptor* sector;
-    TRY(GetSectorForWrite(&sector, write_size, std::span(write_addresses, i)));
+    PW_TRY(
+        GetSectorForWrite(&sector, write_size, std::span(write_addresses, i)));
     write_addresses[i] = sectors_.NextWritableAddress(*sector);
 
     DBG("Found space for entry in sector %u at address %u",
@@ -729,7 +733,7 @@
         unsigned(write_addresses[i]));
   }
 
-  return Status::OK;
+  return Status::Ok();
 }
 
 // Finds a sector to use for writing a new entry to. Does automatic garbage
@@ -746,7 +750,7 @@
   bool do_auto_gc = options_.gc_on_write != GargbageCollectOnWrite::kDisabled;
 
   // Do garbage collection as needed, so long as policy allows.
-  while (result == Status::RESOURCE_EXHAUSTED && do_auto_gc) {
+  while (result == Status::ResourceExhausted() && do_auto_gc) {
     if (options_.gc_on_write == GargbageCollectOnWrite::kOneSector) {
       // If GC config option is kOneSector clear the flag to not do any more
       // GC after this try.
@@ -755,9 +759,9 @@
     // Garbage collect and then try again to find the best sector.
     Status gc_status = GarbageCollect(reserved);
     if (!gc_status.ok()) {
-      if (gc_status == Status::NOT_FOUND) {
+      if (gc_status == Status::NotFound()) {
         // Not enough space, and no reclaimable bytes, this KVS is full!
-        return Status::RESOURCE_EXHAUSTED;
+        return Status::ResourceExhausted();
       }
       return gc_status;
     }
@@ -771,7 +775,7 @@
     // that have copies of the key trying to be written.
     if (gc_sector_count > (partition_.sector_count() + 2)) {
       ERR("Did more GC sectors than total sectors!!!!");
-      return Status::RESOURCE_EXHAUSTED;
+      return Status::ResourceExhausted();
     }
   }
 
@@ -803,16 +807,16 @@
         unsigned(entry.size()),
         unsigned(entry.address()),
         unsigned(result.size()));
-    TRY(MarkSectorCorruptIfNotOk(result.status(), &sector));
+    PW_TRY(MarkSectorCorruptIfNotOk(result.status(), &sector));
   }
 
   if (options_.verify_on_write) {
-    TRY(MarkSectorCorruptIfNotOk(entry.VerifyChecksumInFlash(), &sector));
+    PW_TRY(MarkSectorCorruptIfNotOk(entry.VerifyChecksumInFlash(), &sector));
   }
 
   sector.RemoveWritableBytes(result.size());
   sector.AddValidBytes(result.size());
-  return Status::OK;
+  return Status::Ok();
 }
 
 StatusWithSize KeyValueStore::CopyEntryToSector(Entry& entry,
@@ -820,16 +824,16 @@
                                                 Address new_address) {
   const StatusWithSize result = entry.Copy(new_address);
 
-  TRY_WITH_SIZE(MarkSectorCorruptIfNotOk(result.status(), new_sector));
+  PW_TRY_WITH_SIZE(MarkSectorCorruptIfNotOk(result.status(), new_sector));
 
   if (options_.verify_on_write) {
     Entry new_entry;
-    TRY_WITH_SIZE(MarkSectorCorruptIfNotOk(
+    PW_TRY_WITH_SIZE(MarkSectorCorruptIfNotOk(
         Entry::Read(partition_, new_address, formats_, &new_entry),
         new_sector));
     // TODO: add test that catches doing the verify on the old entry.
-    TRY_WITH_SIZE(MarkSectorCorruptIfNotOk(new_entry.VerifyChecksumInFlash(),
-                                           new_sector));
+    PW_TRY_WITH_SIZE(MarkSectorCorruptIfNotOk(new_entry.VerifyChecksumInFlash(),
+                                              new_sector));
   }
   // Entry was written successfully; update descriptor's address and the sector
   // descriptors to reflect the new entry.
@@ -844,7 +848,7 @@
     KeyValueStore::Address& address,
     std::span<const Address> reserved_addresses) {
   Entry entry;
-  TRY(ReadEntry(metadata, entry));
+  PW_TRY(ReadEntry(metadata, entry));
 
   // Find a new sector for the entry and write it to the new location. For
   // relocation the find should not not be a sector already containing the key
@@ -854,21 +858,21 @@
   // an immediate extra relocation).
   SectorDescriptor* new_sector;
 
-  TRY(sectors_.FindSpaceDuringGarbageCollection(
+  PW_TRY(sectors_.FindSpaceDuringGarbageCollection(
       &new_sector, entry.size(), metadata.addresses(), reserved_addresses));
 
   Address new_address = sectors_.NextWritableAddress(*new_sector);
-  TRY_ASSIGN(const size_t result_size,
-             CopyEntryToSector(entry, new_sector, new_address));
+  PW_TRY_ASSIGN(const size_t result_size,
+                CopyEntryToSector(entry, new_sector, new_address));
   sectors_.FromAddress(address).RemoveValidBytes(result_size);
   address = new_address;
 
-  return Status::OK;
+  return Status::Ok();
 }
 
-Status KeyValueStore::FullMaintenance() {
+Status KeyValueStore::FullMaintenanceHelper(MaintenanceType maintenance_type) {
   if (initialized_ == InitializationState::kNotInitialized) {
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
 
   // Full maintenance can be a potentially heavy operation, and should be
@@ -877,7 +881,7 @@
   CheckForErrors();
 
   if (error_detected_) {
-    TRY(Repair());
+    PW_TRY(Repair());
   }
   StatusWithSize update_status = UpdateEntriesToPrimaryFormat();
   Status overall_status = update_status.status();
@@ -896,7 +900,8 @@
   // Is bytes in use over the threshold.
   StorageStats stats = GetStorageStats();
   bool over_usage_threshold = stats.in_use_bytes > threshold_bytes;
-  bool force_gc = over_usage_threshold || (update_status.size() > 0);
+  bool heavy = (maintenance_type == MaintenanceType::kHeavy);
+  bool force_gc = heavy || over_usage_threshold || (update_status.size() > 0);
 
   // TODO: look in to making an iterator method for cycling through sectors
   // starting from last_new_sector_.
@@ -930,13 +935,13 @@
 
 Status KeyValueStore::PartialMaintenance() {
   if (initialized_ == InitializationState::kNotInitialized) {
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
 
   CheckForErrors();
   // Do automatic repair, if KVS options allow for it.
   if (error_detected_ && options_.recovery != ErrorRecovery::kManual) {
-    TRY(Repair());
+    PW_TRY(Repair());
   }
   return GarbageCollect(std::span<const Address>());
 }
@@ -954,7 +959,7 @@
 
   if (sector_to_gc == nullptr) {
     // Nothing to GC.
-    return Status::NOT_FOUND;
+    return Status::NotFound();
   }
 
   // Step 2: Garbage collect the selected sector.
@@ -970,21 +975,22 @@
       DBG("  Relocate entry for Key 0x%08" PRIx32 ", sector %u",
           metadata.hash(),
           sectors_.Index(sectors_.FromAddress(address)));
-      TRY(RelocateEntry(metadata, address, reserved_addresses));
+      PW_TRY(RelocateEntry(metadata, address, reserved_addresses));
     }
   }
 
-  return Status::OK;
+  return Status::Ok();
 };
 
 Status KeyValueStore::GarbageCollectSector(
     SectorDescriptor& sector_to_gc,
     std::span<const Address> reserved_addresses) {
   DBG("  Garbage Collect sector %u", sectors_.Index(sector_to_gc));
+
   // Step 1: Move any valid entries in the GC sector to other sectors
   if (sector_to_gc.valid_bytes() != 0) {
     for (EntryMetadata& metadata : entry_cache_) {
-      TRY(RelocateKeyAddressesInSector(
+      PW_TRY(RelocateKeyAddressesInSector(
           sector_to_gc, metadata, reserved_addresses));
     }
   }
@@ -993,23 +999,26 @@
     ERR("  Failed to relocate valid entries from sector being garbage "
         "collected, %u valid bytes remain",
         unsigned(sector_to_gc.valid_bytes()));
-    return Status::INTERNAL;
+    return Status::Internal();
   }
 
   // Step 2: Reinitialize the sector
-  sector_to_gc.mark_corrupt();
-  TRY(partition_.Erase(sectors_.BaseAddress(sector_to_gc), 1));
-  sector_to_gc.set_writable_bytes(partition_.sector_size_bytes());
+  if (!sector_to_gc.Empty(partition_.sector_size_bytes())) {
+    sector_to_gc.mark_corrupt();
+    internal_stats_.sector_erase_count++;
+    PW_TRY(partition_.Erase(sectors_.BaseAddress(sector_to_gc), 1));
+    sector_to_gc.set_writable_bytes(partition_.sector_size_bytes());
+  }
 
   DBG("  Garbage Collect sector %u complete", sectors_.Index(sector_to_gc));
-  return Status::OK;
+  return Status::Ok();
 }
 
 StatusWithSize KeyValueStore::UpdateEntriesToPrimaryFormat() {
   size_t entries_updated = 0;
   for (EntryMetadata& prior_metadata : entry_cache_) {
     Entry entry;
-    TRY_WITH_SIZE(ReadEntry(prior_metadata, entry));
+    PW_TRY_WITH_SIZE(ReadEntry(prior_metadata, entry));
     if (formats_.primary().magic == entry.magic()) {
       // Ignore entries that are already on the primary format.
       continue;
@@ -1024,16 +1033,16 @@
     entries_updated++;
 
     last_transaction_id_ += 1;
-    TRY_WITH_SIZE(entry.Update(formats_.primary(), last_transaction_id_));
+    PW_TRY_WITH_SIZE(entry.Update(formats_.primary(), last_transaction_id_));
 
     // List of addresses for sectors with space for this entry.
     Address* reserved_addresses = entry_cache_.TempReservedAddressesForWrite();
 
     // Find addresses to write the entry to. This may involve garbage collecting
     // one or more sectors.
-    TRY_WITH_SIZE(GetAddressesForWrite(reserved_addresses, entry.size()));
+    PW_TRY_WITH_SIZE(GetAddressesForWrite(reserved_addresses, entry.size()));
 
-    TRY_WITH_SIZE(
+    PW_TRY_WITH_SIZE(
         CopyEntryToSector(entry,
                           &sectors_.FromAddress(reserved_addresses[0]),
                           reserved_addresses[0]));
@@ -1046,7 +1055,7 @@
     // Write the additional copies of the entry, if redundancy is greater
     // than 1.
     for (size_t i = 1; i < redundancy(); ++i) {
-      TRY_WITH_SIZE(
+      PW_TRY_WITH_SIZE(
           CopyEntryToSector(entry,
                             &sectors_.FromAddress(reserved_addresses[i]),
                             reserved_addresses[i]));
@@ -1060,19 +1069,19 @@
 // Add any missing redundant entries/copies for a key.
 Status KeyValueStore::AddRedundantEntries(EntryMetadata& metadata) {
   Entry entry;
-  TRY(ReadEntry(metadata, entry));
-  TRY(entry.VerifyChecksumInFlash());
+  PW_TRY(ReadEntry(metadata, entry));
+  PW_TRY(entry.VerifyChecksumInFlash());
 
   while (metadata.addresses().size() < redundancy()) {
     SectorDescriptor* new_sector;
-    TRY(GetSectorForWrite(&new_sector, entry.size(), metadata.addresses()));
+    PW_TRY(GetSectorForWrite(&new_sector, entry.size(), metadata.addresses()));
 
     Address new_address = sectors_.NextWritableAddress(*new_sector);
-    TRY(CopyEntryToSector(entry, new_sector, new_address));
+    PW_TRY(CopyEntryToSector(entry, new_sector, new_address));
 
     metadata.AddNewAddress(new_address);
   }
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status KeyValueStore::RepairCorruptSectors() {
@@ -1080,15 +1089,15 @@
   // sector failed on the first pass, then do a second pass, since a later
   // sector might have cleared up space or otherwise unblocked the earlier
   // failed sector.
-  Status repair_status = Status::OK;
+  Status repair_status = Status::Ok();
 
   size_t loop_count = 0;
   do {
     loop_count++;
     // Error of RESOURCE_EXHAUSTED indicates no space found for relocation.
     // Reset back to OK for the next pass.
-    if (repair_status == Status::RESOURCE_EXHAUSTED) {
-      repair_status = Status::OK;
+    if (repair_status == Status::ResourceExhausted()) {
+      repair_status = Status::Ok();
     }
 
     DBG("   Pass %u", unsigned(loop_count));
@@ -1097,9 +1106,9 @@
         DBG("   Found sector %u with corruption", sectors_.Index(sector));
         Status sector_status = GarbageCollectSector(sector, {});
         if (sector_status.ok()) {
-          error_stats_.corrupt_sectors_recovered += 1;
+          internal_stats_.corrupt_sectors_recovered += 1;
         } else if (repair_status.ok() ||
-                   repair_status == Status::RESOURCE_EXHAUSTED) {
+                   repair_status == Status::ResourceExhausted()) {
           repair_status = sector_status;
         }
       }
@@ -1111,7 +1120,7 @@
 }
 
 Status KeyValueStore::EnsureFreeSectorExists() {
-  Status repair_status = Status::OK;
+  Status repair_status = Status::Ok();
   bool empty_sector_found = false;
 
   DBG("   Find empty sector");
@@ -1135,11 +1144,11 @@
 }
 
 Status KeyValueStore::EnsureEntryRedundancy() {
-  Status repair_status = Status::OK;
+  Status repair_status = Status::Ok();
 
   if (redundancy() == 1) {
     DBG("   Redundancy not in use, nothting to check");
-    return Status::OK;
+    return Status::Ok();
   }
 
   DBG("   Write any needed additional duplicate copies of keys to fulfill %u"
@@ -1155,7 +1164,7 @@
         unsigned(redundancy()));
     Status fill_status = AddRedundantEntries(metadata);
     if (fill_status.ok()) {
-      error_stats_.missing_redundant_entries_recovered += 1;
+      internal_stats_.missing_redundant_entries_recovered += 1;
       DBG("   Key missing copies added");
     } else {
       DBG("   Failed to add key missing copies");
diff --git a/pw_kvs/key_value_store_binary_format_test.cc b/pw_kvs/key_value_store_binary_format_test.cc
index cf8ec41..76c0667 100644
--- a/pw_kvs/key_value_store_binary_format_test.cc
+++ b/pw_kvs/key_value_store_binary_format_test.cc
@@ -17,12 +17,12 @@
 #include <string_view>
 
 #include "gtest/gtest.h"
+#include "pw_bytes/array.h"
 #include "pw_kvs/crc16_checksum.h"
 #include "pw_kvs/fake_flash_memory.h"
 #include "pw_kvs/format.h"
 #include "pw_kvs/internal/hash.h"
 #include "pw_kvs/key_value_store.h"
-#include "pw_kvs_private/byte_utils.h"
 
 namespace pw::kvs {
 namespace {
@@ -80,15 +80,16 @@
                               const std::array<byte, kValueSize>& value) {
   constexpr size_t kKeyLength = kKeyLengthWithNull - 1;
 
-  auto data = AsBytes(magic,
-                      uint32_t(0),
-                      uint8_t(kAlignmentBytes / 16 - 1),
-                      uint8_t(kKeyLength),
-                      uint16_t(kValueSize),
-                      id,
-                      ByteStr(key),
-                      std::span(value),
-                      EntryPadding<kAlignmentBytes, kKeyLength, kValueSize>());
+  auto data =
+      bytes::Concat(magic,
+                    uint32_t(0),
+                    uint8_t(kAlignmentBytes / 16 - 1),
+                    uint8_t(kKeyLength),
+                    uint16_t(kValueSize),
+                    id,
+                    bytes::String(key),
+                    std::span(value),
+                    EntryPadding<kAlignmentBytes, kKeyLength, kValueSize>());
 
   // Calculate the checksum
   uint32_t checksum = kChecksum(data, 0);
@@ -110,14 +111,14 @@
                                 const char (&key)[kKeyLengthWithNull]) {
   constexpr size_t kKeyLength = kKeyLengthWithNull - 1;
 
-  auto data = AsBytes(magic,
-                      uint32_t(0),
-                      uint8_t(kAlignmentBytes / 16 - 1),
-                      uint8_t(kKeyLength),
-                      uint16_t(0xFFFF),
-                      id,
-                      ByteStr(key),
-                      EntryPadding<kAlignmentBytes, kKeyLength>());
+  auto data = bytes::Concat(magic,
+                            uint32_t(0),
+                            uint8_t(kAlignmentBytes / 16 - 1),
+                            uint8_t(kKeyLength),
+                            uint16_t(0xFFFF),
+                            id,
+                            bytes::String(key),
+                            EntryPadding<kAlignmentBytes, kKeyLength>());
 
   // Calculate the checksum
   uint32_t checksum = kChecksum(data, 0);
@@ -154,12 +155,16 @@
     .verify_on_write = true,
 };
 
-constexpr auto kEntry1 = MakeValidEntry(kMagic, 1, "key1", ByteStr("value1"));
-constexpr auto kEntry2 = MakeValidEntry(kMagic, 3, "k2", ByteStr("value2"));
-constexpr auto kEntry3 = MakeValidEntry(kMagic, 4, "k3y", ByteStr("value3"));
-constexpr auto kEntry4 = MakeValidEntry(kMagic, 5, "4k", ByteStr("value4"));
+constexpr auto kEntry1 =
+    MakeValidEntry(kMagic, 1, "key1", bytes::String("value1"));
+constexpr auto kEntry2 =
+    MakeValidEntry(kMagic, 3, "k2", bytes::String("value2"));
+constexpr auto kEntry3 =
+    MakeValidEntry(kMagic, 4, "k3y", bytes::String("value3"));
+constexpr auto kEntry4 =
+    MakeValidEntry(kMagic, 5, "4k", bytes::String("value4"));
 
-constexpr auto kEmpty32Bytes = InitializedBytes<32>(0xff);
+constexpr auto kEmpty32Bytes = bytes::Initialized<32>(0xff);
 static_assert(sizeof(kEmpty32Bytes) == 32);
 
 EntryFormat default_format = {.magic = kMagic, .checksum = &default_checksum};
@@ -182,33 +187,33 @@
 };
 
 TEST_F(KvsErrorHandling, Init_Ok) {
-  InitFlashTo(AsBytes(kEntry1, kEntry2));
+  InitFlashTo(bytes::Concat(kEntry1, kEntry2));
 
-  EXPECT_EQ(Status::OK, kvs_.Init());
+  EXPECT_EQ(Status::Ok(), kvs_.Init());
   byte buffer[64];
-  EXPECT_EQ(Status::OK, kvs_.Get("key1", buffer).status());
-  EXPECT_EQ(Status::OK, kvs_.Get("k2", buffer).status());
+  EXPECT_EQ(Status::Ok(), kvs_.Get("key1", buffer).status());
+  EXPECT_EQ(Status::Ok(), kvs_.Get("k2", buffer).status());
 }
 
 TEST_F(KvsErrorHandling, Init_DuplicateEntries_ReturnsDataLossButReadsEntry) {
-  InitFlashTo(AsBytes(kEntry1, kEntry1));
+  InitFlashTo(bytes::Concat(kEntry1, kEntry1));
 
-  EXPECT_EQ(Status::DATA_LOSS, kvs_.Init());
+  EXPECT_EQ(Status::DataLoss(), kvs_.Init());
   byte buffer[64];
-  EXPECT_EQ(Status::OK, kvs_.Get("key1", buffer).status());
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.Get("k2", buffer).status());
+  EXPECT_EQ(Status::Ok(), kvs_.Get("key1", buffer).status());
+  EXPECT_EQ(Status::NotFound(), kvs_.Get("k2", buffer).status());
 }
 
 TEST_F(KvsErrorHandling, Init_CorruptEntry_FindsSubsequentValidEntry) {
   // Corrupt each byte in the first entry once.
   for (size_t i = 0; i < kEntry1.size(); ++i) {
-    InitFlashTo(AsBytes(kEntry1, kEntry2));
+    InitFlashTo(bytes::Concat(kEntry1, kEntry2));
     flash_.buffer()[i] = byte(int(flash_.buffer()[i]) + 1);
 
-    ASSERT_EQ(Status::DATA_LOSS, kvs_.Init());
+    ASSERT_EQ(Status::DataLoss(), kvs_.Init());
     byte buffer[64];
-    ASSERT_EQ(Status::NOT_FOUND, kvs_.Get("key1", buffer).status());
-    ASSERT_EQ(Status::OK, kvs_.Get("k2", buffer).status());
+    ASSERT_EQ(Status::NotFound(), kvs_.Get("key1", buffer).status());
+    ASSERT_EQ(Status::Ok(), kvs_.Get("k2", buffer).status());
 
     auto stats = kvs_.GetStorageStats();
     // One valid entry.
@@ -219,21 +224,21 @@
 }
 
 TEST_F(KvsErrorHandling, Init_CorruptEntry_CorrectlyAccountsForSectorSize) {
-  InitFlashTo(AsBytes(kEntry1, kEntry2, kEntry3, kEntry4));
+  InitFlashTo(bytes::Concat(kEntry1, kEntry2, kEntry3, kEntry4));
 
   // Corrupt the first and third entries.
   flash_.buffer()[9] = byte(0xef);
   flash_.buffer()[67] = byte(0xef);
 
-  ASSERT_EQ(Status::DATA_LOSS, kvs_.Init());
+  ASSERT_EQ(Status::DataLoss(), kvs_.Init());
 
   EXPECT_EQ(2u, kvs_.size());
 
   byte buffer[64];
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.Get("key1", buffer).status());
-  EXPECT_EQ(Status::OK, kvs_.Get("k2", buffer).status());
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.Get("k3y", buffer).status());
-  EXPECT_EQ(Status::OK, kvs_.Get("4k", buffer).status());
+  EXPECT_EQ(Status::NotFound(), kvs_.Get("key1", buffer).status());
+  EXPECT_EQ(Status::Ok(), kvs_.Get("k2", buffer).status());
+  EXPECT_EQ(Status::NotFound(), kvs_.Get("k3y", buffer).status());
+  EXPECT_EQ(Status::Ok(), kvs_.Get("4k", buffer).status());
 
   auto stats = kvs_.GetStorageStats();
   ASSERT_EQ(64u, stats.in_use_bytes);
@@ -242,17 +247,17 @@
 }
 
 TEST_F(KvsErrorHandling, Init_ReadError_InitializedWithSingleEntryError) {
-  InitFlashTo(AsBytes(kEntry1, kEntry2));
+  InitFlashTo(bytes::Concat(kEntry1, kEntry2));
 
   flash_.InjectReadError(
-      FlashError::InRange(Status::UNAUTHENTICATED, kEntry1.size()));
+      FlashError::InRange(Status::Unauthenticated(), kEntry1.size()));
 
-  EXPECT_EQ(Status::DATA_LOSS, kvs_.Init());
+  EXPECT_EQ(Status::DataLoss(), kvs_.Init());
   EXPECT_FALSE(kvs_.initialized());
 }
 
 TEST_F(KvsErrorHandling, Init_CorruptSectors_ShouldBeUnwritable) {
-  InitFlashTo(AsBytes(kEntry1, kEntry2));
+  InitFlashTo(bytes::Concat(kEntry1, kEntry2));
 
   // Corrupt 3 of the 4 512-byte flash sectors. Corrupt sectors should be
   // unwritable, and the KVS must maintain one empty sector at all times.
@@ -262,15 +267,16 @@
   flash_.buffer()[513] = byte(0xef);
   flash_.buffer()[1025] = byte(0xef);
 
-  ASSERT_EQ(Status::DATA_LOSS, kvs_.Init());
-  EXPECT_EQ(Status::FAILED_PRECONDITION, kvs_.Put("hello", ByteStr("world")));
-  EXPECT_EQ(Status::FAILED_PRECONDITION, kvs_.Put("a", ByteStr("b")));
+  ASSERT_EQ(Status::DataLoss(), kvs_.Init());
+  EXPECT_EQ(Status::FailedPrecondition(),
+            kvs_.Put("hello", bytes::String("world")));
+  EXPECT_EQ(Status::FailedPrecondition(), kvs_.Put("a", bytes::String("b")));
 
   // Existing valid entries should still be readable.
   EXPECT_EQ(1u, kvs_.size());
   byte buffer[64];
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.Get("key1", buffer).status());
-  EXPECT_EQ(Status::OK, kvs_.Get("k2", buffer).status());
+  EXPECT_EQ(Status::NotFound(), kvs_.Get("key1", buffer).status());
+  EXPECT_EQ(Status::Ok(), kvs_.Get("k2", buffer).status());
 
   auto stats = kvs_.GetStorageStats();
   EXPECT_EQ(32u, stats.in_use_bytes);
@@ -279,7 +285,7 @@
 }
 
 TEST_F(KvsErrorHandling, Init_CorruptSectors_ShouldRecoverOne) {
-  InitFlashTo(AsBytes(kEntry1, kEntry2));
+  InitFlashTo(bytes::Concat(kEntry1, kEntry2));
 
   // Corrupt all of the 4 512-byte flash sectors. Leave the pre-init entries
   // intact. The KVS should be unavailable because recovery is set to full
@@ -289,7 +295,7 @@
   flash_.buffer()[1025] = byte(0xef);
   flash_.buffer()[1537] = byte(0xef);
 
-  ASSERT_EQ(Status::DATA_LOSS, kvs_.Init());
+  ASSERT_EQ(Status::DataLoss(), kvs_.Init());
 
   auto stats = kvs_.GetStorageStats();
   EXPECT_EQ(64u, stats.in_use_bytes);
@@ -302,14 +308,14 @@
 // result in missing keys that are actually written after a write error in
 // flash.
 TEST_F(KvsErrorHandling, DISABLED_Init_OkWithWriteErrorOnFlash) {
-  InitFlashTo(AsBytes(kEntry1, kEmpty32Bytes, kEntry2));
+  InitFlashTo(bytes::Concat(kEntry1, kEmpty32Bytes, kEntry2));
 
-  EXPECT_EQ(Status::DATA_LOSS, kvs_.Init());
+  EXPECT_EQ(Status::DataLoss(), kvs_.Init());
   byte buffer[64];
   EXPECT_EQ(2u, kvs_.size());
   EXPECT_EQ(true, kvs_.error_detected());
-  EXPECT_EQ(Status::OK, kvs_.Get("key1", buffer).status());
-  EXPECT_EQ(Status::OK, kvs_.Get("k2", buffer).status());
+  EXPECT_EQ(Status::Ok(), kvs_.Get("key1", buffer).status());
+  EXPECT_EQ(Status::Ok(), kvs_.Get("k2", buffer).status());
 
   auto stats = kvs_.GetStorageStats();
   EXPECT_EQ(64u, stats.in_use_bytes);
@@ -319,23 +325,23 @@
 
 TEST_F(KvsErrorHandling, Init_CorruptKey_RevertsToPreviousVersion) {
   constexpr auto kVersion7 =
-      MakeValidEntry(kMagic, 7, "my_key", ByteStr("version 7"));
+      MakeValidEntry(kMagic, 7, "my_key", bytes::String("version 7"));
   constexpr auto kVersion8 =
-      MakeValidEntry(kMagic, 8, "my_key", ByteStr("version 8"));
+      MakeValidEntry(kMagic, 8, "my_key", bytes::String("version 8"));
 
-  InitFlashTo(AsBytes(kVersion7, kVersion8));
+  InitFlashTo(bytes::Concat(kVersion7, kVersion8));
 
   // Corrupt a byte of entry version 8 (addresses 32-63).
   flash_.buffer()[34] = byte(0xef);
 
-  ASSERT_EQ(Status::DATA_LOSS, kvs_.Init());
+  ASSERT_EQ(Status::DataLoss(), kvs_.Init());
 
   char buffer[64] = {};
 
   EXPECT_EQ(1u, kvs_.size());
 
   auto result = kvs_.Get("my_key", std::as_writable_bytes(std::span(buffer)));
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(sizeof("version 7") - 1, result.size());
   EXPECT_STREQ("version 7", buffer);
 
@@ -346,12 +352,12 @@
 // the KvsErrorRecovery and KvsErrorHandling test fixtures (different KVS
 // configurations).
 TEST_F(KvsErrorHandling, Put_WriteFailure_EntryNotAddedButBytesMarkedWritten) {
-  ASSERT_EQ(Status::OK, kvs_.Init());
-  flash_.InjectWriteError(FlashError::Unconditional(Status::UNAVAILABLE, 1));
+  ASSERT_EQ(Status::Ok(), kvs_.Init());
+  flash_.InjectWriteError(FlashError::Unconditional(Status::Unavailable(), 1));
 
-  EXPECT_EQ(Status::UNAVAILABLE, kvs_.Put("key1", ByteStr("value1")));
+  EXPECT_EQ(Status::Unavailable(), kvs_.Put("key1", bytes::String("value1")));
 
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.Get("key1", std::span<byte>()).status());
+  EXPECT_EQ(Status::NotFound(), kvs_.Get("key1", std::span<byte>()).status());
   ASSERT_TRUE(kvs_.empty());
 
   auto stats = kvs_.GetStorageStats();
@@ -361,7 +367,7 @@
 
   // The bytes were marked used, so a new key should not overlap with the bytes
   // from the failed Put.
-  EXPECT_EQ(Status::OK, kvs_.Put("key1", ByteStr("value1")));
+  EXPECT_EQ(Status::Ok(), kvs_.Put("key1", bytes::String("value1")));
 
   stats = kvs_.GetStorageStats();
   EXPECT_EQ(stats.in_use_bytes, (32u * kvs_.redundancy()));
@@ -389,36 +395,36 @@
 };
 
 TEST_F(KvsErrorRecovery, Init_Ok) {
-  InitFlashTo(AsBytes(kEntry1, kEntry2));
+  InitFlashTo(bytes::Concat(kEntry1, kEntry2));
 
-  EXPECT_EQ(Status::OK, kvs_.Init());
+  EXPECT_EQ(Status::Ok(), kvs_.Init());
   byte buffer[64];
-  EXPECT_EQ(Status::OK, kvs_.Get("key1", buffer).status());
-  EXPECT_EQ(Status::OK, kvs_.Get("k2", buffer).status());
+  EXPECT_EQ(Status::Ok(), kvs_.Get("key1", buffer).status());
+  EXPECT_EQ(Status::Ok(), kvs_.Get("k2", buffer).status());
 }
 
 TEST_F(KvsErrorRecovery, Init_DuplicateEntries_RecoversDuringInit) {
-  InitFlashTo(AsBytes(kEntry1, kEntry1));
+  InitFlashTo(bytes::Concat(kEntry1, kEntry1));
 
-  EXPECT_EQ(Status::OK, kvs_.Init());
+  EXPECT_EQ(Status::Ok(), kvs_.Init());
   auto stats = kvs_.GetStorageStats();
   EXPECT_EQ(stats.corrupt_sectors_recovered, 1u);
 
   byte buffer[64];
-  EXPECT_EQ(Status::OK, kvs_.Get("key1", buffer).status());
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.Get("k2", buffer).status());
+  EXPECT_EQ(Status::Ok(), kvs_.Get("key1", buffer).status());
+  EXPECT_EQ(Status::NotFound(), kvs_.Get("k2", buffer).status());
 }
 
 TEST_F(KvsErrorRecovery, Init_CorruptEntry_FindsSubsequentValidEntry) {
   // Corrupt each byte in the first entry once.
   for (size_t i = 0; i < kEntry1.size(); ++i) {
-    InitFlashTo(AsBytes(kEntry1, kEntry2));
+    InitFlashTo(bytes::Concat(kEntry1, kEntry2));
     flash_.buffer()[i] = byte(int(flash_.buffer()[i]) + 1);
 
-    ASSERT_EQ(Status::OK, kvs_.Init());
+    ASSERT_EQ(Status::Ok(), kvs_.Init());
     byte buffer[64];
-    ASSERT_EQ(Status::NOT_FOUND, kvs_.Get("key1", buffer).status());
-    ASSERT_EQ(Status::OK, kvs_.Get("k2", buffer).status());
+    ASSERT_EQ(Status::NotFound(), kvs_.Get("key1", buffer).status());
+    ASSERT_EQ(Status::Ok(), kvs_.Get("k2", buffer).status());
 
     auto stats = kvs_.GetStorageStats();
     // One valid entry.
@@ -430,21 +436,21 @@
 }
 
 TEST_F(KvsErrorRecovery, Init_CorruptEntry_CorrectlyAccountsForSectorSize) {
-  InitFlashTo(AsBytes(kEntry1, kEntry2, kEntry3, kEntry4));
+  InitFlashTo(bytes::Concat(kEntry1, kEntry2, kEntry3, kEntry4));
 
   // Corrupt the first and third entries.
   flash_.buffer()[9] = byte(0xef);
   flash_.buffer()[67] = byte(0xef);
 
-  ASSERT_EQ(Status::OK, kvs_.Init());
+  ASSERT_EQ(Status::Ok(), kvs_.Init());
 
   EXPECT_EQ(2u, kvs_.size());
 
   byte buffer[64];
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.Get("key1", buffer).status());
-  EXPECT_EQ(Status::OK, kvs_.Get("k2", buffer).status());
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.Get("k3y", buffer).status());
-  EXPECT_EQ(Status::OK, kvs_.Get("4k", buffer).status());
+  EXPECT_EQ(Status::NotFound(), kvs_.Get("key1", buffer).status());
+  EXPECT_EQ(Status::Ok(), kvs_.Get("k2", buffer).status());
+  EXPECT_EQ(Status::NotFound(), kvs_.Get("k3y", buffer).status());
+  EXPECT_EQ(Status::Ok(), kvs_.Get("4k", buffer).status());
 
   auto stats = kvs_.GetStorageStats();
   ASSERT_EQ(64u, stats.in_use_bytes);
@@ -454,12 +460,12 @@
 }
 
 TEST_F(KvsErrorRecovery, Init_ReadError_InitializedWithSingleEntryError) {
-  InitFlashTo(AsBytes(kEntry1, kEntry2));
+  InitFlashTo(bytes::Concat(kEntry1, kEntry2));
 
   flash_.InjectReadError(
-      FlashError::InRange(Status::UNAUTHENTICATED, kEntry1.size()));
+      FlashError::InRange(Status::Unauthenticated(), kEntry1.size()));
 
-  EXPECT_EQ(Status::OK, kvs_.Init());
+  EXPECT_EQ(Status::Ok(), kvs_.Init());
   EXPECT_TRUE(kvs_.initialized());
   auto stats = kvs_.GetStorageStats();
   ASSERT_EQ(32u, stats.in_use_bytes);
@@ -470,7 +476,7 @@
 }
 
 TEST_F(KvsErrorRecovery, Init_CorruptSectors_ShouldBeUnwritable) {
-  InitFlashTo(AsBytes(kEntry1, kEntry2));
+  InitFlashTo(bytes::Concat(kEntry1, kEntry2));
 
   // Corrupt 3 of the 4 512-byte flash sectors. Corrupt sectors should be
   // recovered via garbage collection.
@@ -478,15 +484,15 @@
   flash_.buffer()[513] = byte(0xef);
   flash_.buffer()[1025] = byte(0xef);
 
-  ASSERT_EQ(Status::OK, kvs_.Init());
-  EXPECT_EQ(Status::OK, kvs_.Put("hello", ByteStr("world")));
-  EXPECT_EQ(Status::OK, kvs_.Put("a", ByteStr("b")));
+  ASSERT_EQ(Status::Ok(), kvs_.Init());
+  EXPECT_EQ(Status::Ok(), kvs_.Put("hello", bytes::String("world")));
+  EXPECT_EQ(Status::Ok(), kvs_.Put("a", bytes::String("b")));
 
   // Existing valid entries should still be readable.
   EXPECT_EQ(3u, kvs_.size());
   byte buffer[64];
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.Get("key1", buffer).status());
-  EXPECT_EQ(Status::OK, kvs_.Get("k2", buffer).status());
+  EXPECT_EQ(Status::NotFound(), kvs_.Get("key1", buffer).status());
+  EXPECT_EQ(Status::Ok(), kvs_.Get("k2", buffer).status());
 
   auto stats = kvs_.GetStorageStats();
   EXPECT_EQ(96u, stats.in_use_bytes);
@@ -496,7 +502,7 @@
 }
 
 TEST_F(KvsErrorRecovery, Init_CorruptSectors_ShouldRecoverOne) {
-  InitFlashTo(AsBytes(kEntry1, kEntry2));
+  InitFlashTo(bytes::Concat(kEntry1, kEntry2));
 
   // Corrupt all of the 4 512-byte flash sectors. Leave the pre-init entries
   // intact. As part of recovery all corrupt sectors should get garbage
@@ -506,7 +512,7 @@
   flash_.buffer()[1025] = byte(0xef);
   flash_.buffer()[1537] = byte(0xef);
 
-  ASSERT_EQ(Status::OK, kvs_.Init());
+  ASSERT_EQ(Status::Ok(), kvs_.Init());
 
   auto stats = kvs_.GetStorageStats();
   EXPECT_EQ(64u, stats.in_use_bytes);
@@ -520,14 +526,14 @@
 // result in missing keys that are actually written after a write error in
 // flash.
 TEST_F(KvsErrorRecovery, DISABLED_Init_OkWithWriteErrorOnFlash) {
-  InitFlashTo(AsBytes(kEntry1, kEmpty32Bytes, kEntry2));
+  InitFlashTo(bytes::Concat(kEntry1, kEmpty32Bytes, kEntry2));
 
-  EXPECT_EQ(Status::OK, kvs_.Init());
+  EXPECT_EQ(Status::Ok(), kvs_.Init());
   byte buffer[64];
   EXPECT_EQ(2u, kvs_.size());
   EXPECT_EQ(false, kvs_.error_detected());
-  EXPECT_EQ(Status::OK, kvs_.Get("key1", buffer).status());
-  EXPECT_EQ(Status::OK, kvs_.Get("k2", buffer).status());
+  EXPECT_EQ(Status::Ok(), kvs_.Get("key1", buffer).status());
+  EXPECT_EQ(Status::Ok(), kvs_.Get("k2", buffer).status());
 
   auto stats = kvs_.GetStorageStats();
   EXPECT_EQ(64u, stats.in_use_bytes);
@@ -539,23 +545,23 @@
 
 TEST_F(KvsErrorRecovery, Init_CorruptKey_RevertsToPreviousVersion) {
   constexpr auto kVersion7 =
-      MakeValidEntry(kMagic, 7, "my_key", ByteStr("version 7"));
+      MakeValidEntry(kMagic, 7, "my_key", bytes::String("version 7"));
   constexpr auto kVersion8 =
-      MakeValidEntry(kMagic, 8, "my_key", ByteStr("version 8"));
+      MakeValidEntry(kMagic, 8, "my_key", bytes::String("version 8"));
 
-  InitFlashTo(AsBytes(kVersion7, kVersion8));
+  InitFlashTo(bytes::Concat(kVersion7, kVersion8));
 
   // Corrupt a byte of entry version 8 (addresses 32-63).
   flash_.buffer()[34] = byte(0xef);
 
-  ASSERT_EQ(Status::OK, kvs_.Init());
+  ASSERT_EQ(Status::Ok(), kvs_.Init());
 
   char buffer[64] = {};
 
   EXPECT_EQ(1u, kvs_.size());
 
   auto result = kvs_.Get("my_key", std::as_writable_bytes(std::span(buffer)));
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(sizeof("version 7") - 1, result.size());
   EXPECT_STREQ("version 7", buffer);
 
@@ -566,13 +572,13 @@
 // the KvsErrorRecovery and KvsErrorHandling test fixtures (different KVS
 // configurations).
 TEST_F(KvsErrorRecovery, Put_WriteFailure_EntryNotAddedButBytesMarkedWritten) {
-  ASSERT_EQ(Status::OK, kvs_.Init());
-  flash_.InjectWriteError(FlashError::Unconditional(Status::UNAVAILABLE, 1));
+  ASSERT_EQ(Status::Ok(), kvs_.Init());
+  flash_.InjectWriteError(FlashError::Unconditional(Status::Unavailable(), 1));
 
-  EXPECT_EQ(Status::UNAVAILABLE, kvs_.Put("key1", ByteStr("value1")));
+  EXPECT_EQ(Status::Unavailable(), kvs_.Put("key1", bytes::String("value1")));
   EXPECT_EQ(true, kvs_.error_detected());
 
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.Get("key1", std::span<byte>()).status());
+  EXPECT_EQ(Status::NotFound(), kvs_.Get("key1", std::span<byte>()).status());
   ASSERT_TRUE(kvs_.empty());
 
   auto stats = kvs_.GetStorageStats();
@@ -584,7 +590,7 @@
 
   // The bytes were marked used, so a new key should not overlap with the bytes
   // from the failed Put.
-  EXPECT_EQ(Status::OK, kvs_.Put("key1", ByteStr("value1")));
+  EXPECT_EQ(Status::Ok(), kvs_.Put("key1", bytes::String("value1")));
 
   stats = kvs_.GetStorageStats();
   EXPECT_EQ(stats.in_use_bytes, (32u * kvs_.redundancy()));
@@ -608,22 +614,22 @@
 ChecksumFunction<uint32_t> alt_checksum(AltChecksum);
 
 constexpr auto kAltEntry =
-    MakeValidEntry<AltChecksum>(kAltMagic, 32, "A Key", ByteStr("XD"));
+    MakeValidEntry<AltChecksum>(kAltMagic, 32, "A Key", bytes::String("XD"));
 
 constexpr uint32_t NoChecksum(std::span<const byte>, uint32_t) { return 0; }
 // For KVS magic value always use a random 32 bit integer rather than a
 // human readable 4 bytes. See pw_kvs/format.h for more information.
 constexpr uint32_t kNoChecksumMagic = 0xd49ba138;
 
-constexpr auto kNoChecksumEntry =
-    MakeValidEntry<NoChecksum>(kNoChecksumMagic, 64, "kee", ByteStr("O_o"));
+constexpr auto kNoChecksumEntry = MakeValidEntry<NoChecksum>(
+    kNoChecksumMagic, 64, "kee", bytes::String("O_o"));
 
 constexpr auto kDeletedEntry =
     MakeDeletedEntry<AltChecksum>(kAltMagic, 128, "gone");
 
 class InitializedRedundantMultiMagicKvs : public ::testing::Test {
  protected:
-  static constexpr auto kInitialContents = AsBytes(
+  static constexpr auto kInitialContents = bytes::Concat(
       kNoChecksumEntry, kEntry1, kAltEntry, kEntry2, kEntry3, kDeletedEntry);
 
   InitializedRedundantMultiMagicKvs()
@@ -641,7 +647,7 @@
                 kInitialContents.data(),
                 kInitialContents.size());
 
-    EXPECT_EQ(Status::OK, kvs_.Init());
+    EXPECT_EQ(Status::Ok(), kvs_.Init());
   }
 
   FakeFlashMemoryBuffer<512, 4, 3> flash_;
@@ -654,7 +660,7 @@
     char val[sizeof(str_value)] = {};                          \
     StatusWithSize stat =                                      \
         kvs_.Get(key, std::as_writable_bytes(std::span(val))); \
-    ASSERT_EQ(Status::OK, stat.status());                      \
+    ASSERT_EQ(Status::Ok(), stat.status());                    \
     ASSERT_EQ(sizeof(str_value) - 1, stat.size());             \
     ASSERT_STREQ(str_value, val);                              \
   } while (0)
@@ -675,7 +681,7 @@
   EXPECT_EQ(stats.corrupt_sectors_recovered, 0u);
   EXPECT_EQ(stats.missing_redundant_entries_recovered, 0u);
 
-  EXPECT_EQ(Status::OK, partition_.Erase(0, 1));
+  EXPECT_EQ(Status::Ok(), partition_.Erase(0, 1));
 
   ASSERT_CONTAINS_ENTRY("key1", "value1");
   ASSERT_CONTAINS_ENTRY("k2", "value2");
@@ -692,7 +698,7 @@
   EXPECT_EQ(stats.corrupt_sectors_recovered, 0u);
   EXPECT_EQ(stats.missing_redundant_entries_recovered, 0u);
 
-  EXPECT_EQ(Status::OK, kvs_.FullMaintenance());
+  EXPECT_EQ(Status::Ok(), kvs_.FullMaintenance());
   stats = kvs_.GetStorageStats();
   EXPECT_EQ(stats.in_use_bytes, (192u * kvs_.redundancy()));
   EXPECT_EQ(stats.reclaimable_bytes, 0u);
@@ -709,7 +715,7 @@
   EXPECT_EQ(stats.corrupt_sectors_recovered, 0u);
   EXPECT_EQ(stats.missing_redundant_entries_recovered, 0u);
 
-  EXPECT_EQ(Status::OK, partition_.Erase(partition_.sector_size_bytes(), 1));
+  EXPECT_EQ(Status::Ok(), partition_.Erase(partition_.sector_size_bytes(), 1));
 
   ASSERT_CONTAINS_ENTRY("key1", "value1");
   ASSERT_CONTAINS_ENTRY("k2", "value2");
@@ -719,7 +725,7 @@
 
   EXPECT_EQ(false, kvs_.error_detected());
 
-  EXPECT_EQ(false, kvs_.Init());
+  EXPECT_EQ(Status::Ok(), kvs_.Init());
   stats = kvs_.GetStorageStats();
   EXPECT_EQ(stats.in_use_bytes, (192u * kvs_.redundancy()));
   EXPECT_EQ(stats.reclaimable_bytes, 0u);
@@ -730,9 +736,9 @@
 
 TEST_F(InitializedRedundantMultiMagicKvs, SingleReadErrors) {
   // Inject 2 read errors, so the first read attempt fully fails.
-  flash_.InjectReadError(FlashError::Unconditional(Status::INTERNAL, 2));
+  flash_.InjectReadError(FlashError::Unconditional(Status::Internal(), 2));
 
-  flash_.InjectReadError(FlashError::Unconditional(Status::INTERNAL, 1, 7));
+  flash_.InjectReadError(FlashError::Unconditional(Status::Internal(), 1, 7));
 
   ASSERT_CONTAINS_ENTRY("key1", "value1");
   ASSERT_CONTAINS_ENTRY("k2", "value2");
@@ -751,9 +757,9 @@
 }
 
 TEST_F(InitializedRedundantMultiMagicKvs, SingleWriteError) {
-  flash_.InjectWriteError(FlashError::Unconditional(Status::INTERNAL, 1, 1));
+  flash_.InjectWriteError(FlashError::Unconditional(Status::Internal(), 1, 1));
 
-  EXPECT_EQ(Status::INTERNAL, kvs_.Put("new key", ByteStr("abcd?")));
+  EXPECT_EQ(Status::Internal(), kvs_.Put("new key", bytes::String("abcd?")));
 
   EXPECT_EQ(true, kvs_.error_detected());
 
@@ -767,10 +773,10 @@
 
   char val[20] = {};
   EXPECT_EQ(
-      Status::OK,
+      Status::Ok(),
       kvs_.Get("new key", std::as_writable_bytes(std::span(val))).status());
 
-  EXPECT_EQ(Status::OK, kvs_.FullMaintenance());
+  EXPECT_EQ(Status::Ok(), kvs_.FullMaintenance());
   stats = kvs_.GetStorageStats();
   EXPECT_EQ(stats.in_use_bytes, (224u * kvs_.redundancy()));
   EXPECT_EQ(stats.reclaimable_bytes, 0u);
@@ -779,23 +785,23 @@
   EXPECT_EQ(stats.missing_redundant_entries_recovered, 0u);
 
   EXPECT_EQ(
-      Status::OK,
+      Status::Ok(),
       kvs_.Get("new key", std::as_writable_bytes(std::span(val))).status());
 }
 
 TEST_F(InitializedRedundantMultiMagicKvs, DataLossAfterLosingBothCopies) {
-  EXPECT_EQ(Status::OK, partition_.Erase(0, 2));
+  EXPECT_EQ(Status::Ok(), partition_.Erase(0, 2));
 
   char val[20] = {};
-  EXPECT_EQ(Status::DATA_LOSS,
+  EXPECT_EQ(Status::DataLoss(),
             kvs_.Get("key1", std::as_writable_bytes(std::span(val))).status());
-  EXPECT_EQ(Status::DATA_LOSS,
+  EXPECT_EQ(Status::DataLoss(),
             kvs_.Get("k2", std::as_writable_bytes(std::span(val))).status());
-  EXPECT_EQ(Status::DATA_LOSS,
+  EXPECT_EQ(Status::DataLoss(),
             kvs_.Get("k3y", std::as_writable_bytes(std::span(val))).status());
-  EXPECT_EQ(Status::DATA_LOSS,
+  EXPECT_EQ(Status::DataLoss(),
             kvs_.Get("A Key", std::as_writable_bytes(std::span(val))).status());
-  EXPECT_EQ(Status::DATA_LOSS,
+  EXPECT_EQ(Status::DataLoss(),
             kvs_.Get("kee", std::as_writable_bytes(std::span(val))).status());
 
   EXPECT_EQ(true, kvs_.error_detected());
@@ -809,10 +815,10 @@
 }
 
 TEST_F(InitializedRedundantMultiMagicKvs, PutNewEntry_UsesFirstFormat) {
-  EXPECT_EQ(Status::OK, kvs_.Put("new key", ByteStr("abcd?")));
+  EXPECT_EQ(Status::Ok(), kvs_.Put("new key", bytes::String("abcd?")));
 
   constexpr auto kNewEntry =
-      MakeValidEntry(kMagic, 129, "new key", ByteStr("abcd?"));
+      MakeValidEntry(kMagic, 129, "new key", bytes::String("abcd?"));
   EXPECT_EQ(0,
             std::memcmp(kNewEntry.data(),
                         flash_.buffer().data() + kInitialContents.size(),
@@ -821,10 +827,10 @@
 }
 
 TEST_F(InitializedRedundantMultiMagicKvs, PutExistingEntry_UsesFirstFormat) {
-  EXPECT_EQ(Status::OK, kvs_.Put("A Key", ByteStr("New value!")));
+  EXPECT_EQ(Status::Ok(), kvs_.Put("A Key", bytes::String("New value!")));
 
   constexpr auto kNewEntry =
-      MakeValidEntry(kMagic, 129, "A Key", ByteStr("New value!"));
+      MakeValidEntry(kMagic, 129, "A Key", bytes::String("New value!"));
   EXPECT_EQ(0,
             std::memcmp(kNewEntry.data(),
                         flash_.buffer().data() + kInitialContents.size(),
@@ -837,20 +843,20 @@
     char val[sizeof(str_value)] = {};                         \
     StatusWithSize stat =                                     \
         kvs.Get(key, std::as_writable_bytes(std::span(val))); \
-    ASSERT_EQ(Status::OK, stat.status());                     \
+    ASSERT_EQ(Status::Ok(), stat.status());                   \
     ASSERT_EQ(sizeof(str_value) - 1, stat.size());            \
     ASSERT_STREQ(str_value, val);                             \
   } while (0)
 
 TEST_F(InitializedRedundantMultiMagicKvs, UpdateEntryFormat) {
-  ASSERT_EQ(Status::OK, kvs_.FullMaintenance());
+  ASSERT_EQ(Status::Ok(), kvs_.FullMaintenance());
 
   KeyValueStoreBuffer<kMaxEntries, kMaxUsableSectors, 2, 1> local_kvs(
       &partition_,
       {.magic = kMagic, .checksum = &default_checksum},
       kNoGcOptions);
 
-  ASSERT_EQ(Status::OK, local_kvs.Init());
+  ASSERT_EQ(Status::Ok(), local_kvs.Init());
   EXPECT_EQ(false, local_kvs.error_detected());
   ASSERT_KVS_CONTAINS_ENTRY(local_kvs, "key1", "value1");
   ASSERT_KVS_CONTAINS_ENTRY(local_kvs, "k2", "value2");
@@ -862,7 +868,7 @@
 class InitializedMultiMagicKvs : public ::testing::Test {
  protected:
   static constexpr auto kInitialContents =
-      AsBytes(kNoChecksumEntry, kEntry1, kAltEntry, kEntry2, kEntry3);
+      bytes::Concat(kNoChecksumEntry, kEntry1, kAltEntry, kEntry2, kEntry3);
 
   InitializedMultiMagicKvs()
       : flash_(internal::Entry::kMinAlignmentBytes),
@@ -879,7 +885,7 @@
                 kInitialContents.data(),
                 kInitialContents.size());
 
-    EXPECT_EQ(Status::OK, kvs_.Init());
+    EXPECT_EQ(Status::Ok(), kvs_.Init());
   }
 
   FakeFlashMemoryBuffer<512, 4, 3> flash_;
@@ -900,14 +906,14 @@
 // Similar to test for InitializedRedundantMultiMagicKvs. Doing similar test
 // with different KVS configuration.
 TEST_F(InitializedMultiMagicKvs, UpdateEntryFormat) {
-  ASSERT_EQ(Status::OK, kvs_.FullMaintenance());
+  ASSERT_EQ(Status::Ok(), kvs_.FullMaintenance());
 
   KeyValueStoreBuffer<kMaxEntries, kMaxUsableSectors, 1, 1> local_kvs(
       &partition_,
       {.magic = kMagic, .checksum = &default_checksum},
       kNoGcOptions);
 
-  ASSERT_EQ(Status::OK, local_kvs.Init());
+  ASSERT_EQ(Status::Ok(), local_kvs.Init());
   EXPECT_EQ(false, local_kvs.error_detected());
   ASSERT_KVS_CONTAINS_ENTRY(local_kvs, "key1", "value1");
   ASSERT_KVS_CONTAINS_ENTRY(local_kvs, "k2", "value2");
@@ -919,7 +925,7 @@
 class InitializedRedundantLazyRecoveryKvs : public ::testing::Test {
  protected:
   static constexpr auto kInitialContents =
-      AsBytes(kEntry1, kEntry2, kEntry3, kEntry4);
+      bytes::Concat(kEntry1, kEntry2, kEntry3, kEntry4);
 
   InitializedRedundantLazyRecoveryKvs()
       : flash_(internal::Entry::kMinAlignmentBytes),
@@ -932,7 +938,7 @@
                 kInitialContents.data(),
                 kInitialContents.size());
 
-    EXPECT_EQ(Status::OK, kvs_.Init());
+    EXPECT_EQ(Status::Ok(), kvs_.Init());
   }
 
   FakeFlashMemoryBuffer<512, 4, 3> flash_;
@@ -941,16 +947,16 @@
 };
 
 TEST_F(InitializedRedundantLazyRecoveryKvs, WriteAfterDataLoss) {
-  EXPECT_EQ(Status::OK, partition_.Erase(0, 4));
+  EXPECT_EQ(Status::Ok(), partition_.Erase(0, 4));
 
   char val[20] = {};
-  EXPECT_EQ(Status::DATA_LOSS,
+  EXPECT_EQ(Status::DataLoss(),
             kvs_.Get("key1", std::as_writable_bytes(std::span(val))).status());
-  EXPECT_EQ(Status::DATA_LOSS,
+  EXPECT_EQ(Status::DataLoss(),
             kvs_.Get("k2", std::as_writable_bytes(std::span(val))).status());
-  EXPECT_EQ(Status::DATA_LOSS,
+  EXPECT_EQ(Status::DataLoss(),
             kvs_.Get("k3y", std::as_writable_bytes(std::span(val))).status());
-  EXPECT_EQ(Status::DATA_LOSS,
+  EXPECT_EQ(Status::DataLoss(),
             kvs_.Get("4k", std::as_writable_bytes(std::span(val))).status());
 
   EXPECT_EQ(true, kvs_.error_detected());
@@ -962,9 +968,9 @@
   EXPECT_EQ(stats.corrupt_sectors_recovered, 0u);
   EXPECT_EQ(stats.missing_redundant_entries_recovered, 0u);
 
-  ASSERT_EQ(Status::DATA_LOSS, kvs_.Put("key1", 1000));
+  ASSERT_EQ(Status::DataLoss(), kvs_.Put("key1", 1000));
 
-  EXPECT_EQ(Status::OK, kvs_.FullMaintenance());
+  EXPECT_EQ(Status::Ok(), kvs_.FullMaintenance());
   stats = kvs_.GetStorageStats();
   EXPECT_EQ(stats.in_use_bytes, 0u);
   EXPECT_EQ(stats.reclaimable_bytes, 0u);
@@ -999,7 +1005,7 @@
   ASSERT_CONTAINS_ENTRY("k3y", "value3");
   ASSERT_CONTAINS_ENTRY("4k", "value4");
 
-  EXPECT_EQ(Status::OK, kvs_.FullMaintenance());
+  EXPECT_EQ(Status::Ok(), kvs_.FullMaintenance());
   stats = kvs_.GetStorageStats();
   EXPECT_EQ(stats.in_use_bytes, (128u * kvs_.redundancy()));
   EXPECT_EQ(stats.reclaimable_bytes, 0u);
@@ -1011,7 +1017,7 @@
 class InitializedLazyRecoveryKvs : public ::testing::Test {
  protected:
   static constexpr auto kInitialContents =
-      AsBytes(kEntry1, kEntry2, kEntry3, kEntry4);
+      bytes::Concat(kEntry1, kEntry2, kEntry3, kEntry4);
 
   InitializedLazyRecoveryKvs()
       : flash_(internal::Entry::kMinAlignmentBytes),
@@ -1024,7 +1030,7 @@
                 kInitialContents.data(),
                 kInitialContents.size());
 
-    EXPECT_EQ(Status::OK, kvs_.Init());
+    EXPECT_EQ(Status::Ok(), kvs_.Init());
   }
 
   FakeFlashMemoryBuffer<512, 8> flash_;
@@ -1060,17 +1066,17 @@
   // Add a near-sector size key entry to fill the KVS with a valid large entry
   // and stale data. Modify the value in between Puts so it actually writes
   // (identical value writes are skipped).
-  EXPECT_EQ(Status::OK, kvs_.Put("big_key", test_data));
+  EXPECT_EQ(Status::Ok(), kvs_.Put("big_key", test_data));
   test_data[0]++;
-  EXPECT_EQ(Status::OK, kvs_.Put("big_key", test_data));
+  EXPECT_EQ(Status::Ok(), kvs_.Put("big_key", test_data));
   test_data[0]++;
-  EXPECT_EQ(Status::OK, kvs_.Put("big_key", test_data));
+  EXPECT_EQ(Status::Ok(), kvs_.Put("big_key", test_data));
   test_data[0]++;
-  EXPECT_EQ(Status::OK, kvs_.Put("big_key", test_data));
+  EXPECT_EQ(Status::Ok(), kvs_.Put("big_key", test_data));
   test_data[0]++;
-  EXPECT_EQ(Status::OK, kvs_.Put("big_key", test_data));
+  EXPECT_EQ(Status::Ok(), kvs_.Put("big_key", test_data));
   test_data[0]++;
-  EXPECT_EQ(Status::OK, kvs_.Put("big_key", test_data));
+  EXPECT_EQ(Status::Ok(), kvs_.Put("big_key", test_data));
 
   // Instantiate a new KVS with redundancy of 2. This KVS should add an extra
   // copy of each valid key as part of the init process. Because there is not
@@ -1080,7 +1086,7 @@
       &partition_,
       {.magic = kMagic, .checksum = &default_checksum},
       kRecoveryLazyGcOptions);
-  ASSERT_EQ(Status::OK, local_kvs.Init());
+  ASSERT_EQ(Status::Ok(), local_kvs.Init());
 
   // Verify no errors found in the new KVS and all the entries are present.
   EXPECT_EQ(false, local_kvs.error_detected());
@@ -1089,7 +1095,7 @@
   ASSERT_KVS_CONTAINS_ENTRY(local_kvs, "k3y", "value3");
   ASSERT_KVS_CONTAINS_ENTRY(local_kvs, "4k", "value4");
   StatusWithSize big_key_size = local_kvs.ValueSize("big_key");
-  EXPECT_EQ(Status::OK, big_key_size.status());
+  EXPECT_EQ(Status::Ok(), big_key_size.status());
   EXPECT_EQ(sizeof(test_data), big_key_size.size());
 
   // Verify that storage stats of the new redundant KVS match expected values.
diff --git a/pw_kvs/key_value_store_fuzz_test.cc b/pw_kvs/key_value_store_fuzz_test.cc
index fcfca4c..4fd86bc 100644
--- a/pw_kvs/key_value_store_fuzz_test.cc
+++ b/pw_kvs/key_value_store_fuzz_test.cc
@@ -46,7 +46,7 @@
   EmptyInitializedKvs()
       : kvs_(&test_partition, {.magic = 0x873a9b50, .checksum = &checksum}) {
     test_partition.Erase(0, test_partition.sector_count());
-    ASSERT_EQ(Status::OK, kvs_.Init());
+    ASSERT_EQ(Status::Ok(), kvs_.Init());
   }
 
   KeyValueStoreBuffer<kMaxEntries, kMaxUsableSectors> kvs_;
@@ -63,7 +63,7 @@
   for (int i = 0; i < kFuzzIterations; ++i) {
     for (unsigned key_size = 1; key_size < sizeof(value); ++key_size) {
       for (unsigned value_size = 0; value_size < sizeof(value); ++value_size) {
-        ASSERT_EQ(Status::OK,
+        ASSERT_EQ(Status::Ok(),
                   kvs_.Put(std::string_view(value, key_size),
                            std::as_bytes(std::span(value, value_size))));
       }
diff --git a/pw_kvs/key_value_store_initialized_test.cc b/pw_kvs/key_value_store_initialized_test.cc
index fb343d6..7ea3fd9 100644
--- a/pw_kvs/key_value_store_initialized_test.cc
+++ b/pw_kvs/key_value_store_initialized_test.cc
@@ -18,14 +18,13 @@
 #include <span>
 
 #include "gtest/gtest.h"
-#include "pw_checksum/ccitt_crc16.h"
+#include "pw_bytes/array.h"
+#include "pw_checksum/crc16_ccitt.h"
 #include "pw_kvs/crc16_checksum.h"
 #include "pw_kvs/flash_memory.h"
 #include "pw_kvs/flash_test_partition.h"
 #include "pw_kvs/internal/entry.h"
 #include "pw_kvs/key_value_store.h"
-#include "pw_kvs_private/byte_utils.h"
-#include "pw_kvs_private/macros.h"
 #include "pw_log/log.h"
 #include "pw_status/status.h"
 #include "pw_string/string_builder.h"
@@ -84,7 +83,7 @@
  protected:
   EmptyInitializedKvs() : kvs_(&test_partition, default_format) {
     test_partition.Erase();
-    ASSERT_EQ(Status::OK, kvs_.Init());
+    ASSERT_EQ(Status::Ok(), kvs_.Init());
   }
 
   // Intention of this is to put and erase key-val to fill up sectors. It's a
@@ -108,7 +107,7 @@
     while (size_to_fill > 0) {
       // Changing buffer value so put actually does something
       buffer[0] = static_cast<byte>(static_cast<uint8_t>(buffer[0]) + 1);
-      ASSERT_EQ(Status::OK,
+      ASSERT_EQ(Status::Ok(),
                 kvs_.Put(key,
                          std::span(buffer.data(),
                                    chunk_len - kvs_attr.ChunkHeaderSize() -
@@ -116,7 +115,7 @@
       size_to_fill -= chunk_len;
       chunk_len = std::min(size_to_fill, kMaxPutSize);
     }
-    ASSERT_EQ(Status::OK, kvs_.Delete(key));
+    ASSERT_EQ(Status::Ok(), kvs_.Delete(key));
   }
 
   KeyValueStoreBuffer<kMaxEntries, kMaxUsableSectors> kvs_;
@@ -128,7 +127,7 @@
   std::array<char, 8> value{'v', 'a', 'l', 'u', 'e', '6', '7', '\0'};
 
   for (int i = 0; i < 1000; ++i) {
-    ASSERT_EQ(Status::OK,
+    ASSERT_EQ(Status::Ok(),
               kvs_.Put("The Key!", std::as_bytes(std::span(value))));
   }
 }
@@ -137,7 +136,7 @@
   std::array<char, 7> value{'v', 'a', 'l', 'u', 'e', '6', '\0'};
 
   for (int i = 0; i < 1000; ++i) {
-    ASSERT_EQ(Status::OK,
+    ASSERT_EQ(Status::Ok(),
               kvs_.Put("The Key!", std::as_bytes(std::span(value))));
   }
 }
@@ -147,27 +146,27 @@
 
   for (int i = 0; i < 100; ++i) {
     for (unsigned size = 0; size < value.size(); ++size) {
-      ASSERT_EQ(Status::OK, kvs_.Put("The Key!", i));
+      ASSERT_EQ(Status::Ok(), kvs_.Put("The Key!", i));
     }
   }
 }
 
 TEST_F(EmptyInitializedKvs, PutAndGetByValue_ConvertibleToSpan) {
   constexpr float input[] = {1.0, -3.5};
-  ASSERT_EQ(Status::OK, kvs_.Put("key", input));
+  ASSERT_EQ(Status::Ok(), kvs_.Put("key", input));
 
   float output[2] = {};
-  ASSERT_EQ(Status::OK, kvs_.Get("key", &output));
+  ASSERT_EQ(Status::Ok(), kvs_.Get("key", &output));
   EXPECT_EQ(input[0], output[0]);
   EXPECT_EQ(input[1], output[1]);
 }
 
 TEST_F(EmptyInitializedKvs, PutAndGetByValue_Span) {
   float input[] = {1.0, -3.5};
-  ASSERT_EQ(Status::OK, kvs_.Put("key", std::span(input)));
+  ASSERT_EQ(Status::Ok(), kvs_.Put("key", std::span(input)));
 
   float output[2] = {};
-  ASSERT_EQ(Status::OK, kvs_.Get("key", &output));
+  ASSERT_EQ(Status::Ok(), kvs_.Get("key", &output));
   EXPECT_EQ(input[0], output[0]);
   EXPECT_EQ(input[1], output[1]);
 }
@@ -179,116 +178,117 @@
   };
   const TestStruct input{-1234.5, true};
 
-  ASSERT_EQ(Status::OK, kvs_.Put("key", input));
+  ASSERT_EQ(Status::Ok(), kvs_.Put("key", input));
 
   TestStruct output;
-  ASSERT_EQ(Status::OK, kvs_.Get("key", &output));
+  ASSERT_EQ(Status::Ok(), kvs_.Get("key", &output));
   EXPECT_EQ(input.a, output.a);
   EXPECT_EQ(input.b, output.b);
 }
 
 TEST_F(EmptyInitializedKvs, Get_Simple) {
-  ASSERT_EQ(Status::OK,
+  ASSERT_EQ(Status::Ok(),
             kvs_.Put("Charles", std::as_bytes(std::span("Mingus"))));
 
   char value[16];
   auto result = kvs_.Get("Charles", std::as_writable_bytes(std::span(value)));
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(sizeof("Mingus"), result.size());
   EXPECT_STREQ("Mingus", value);
 }
 
 TEST_F(EmptyInitializedKvs, Get_WithOffset) {
-  ASSERT_EQ(Status::OK,
+  ASSERT_EQ(Status::Ok(),
             kvs_.Put("Charles", std::as_bytes(std::span("Mingus"))));
 
   char value[16];
   auto result =
       kvs_.Get("Charles", std::as_writable_bytes(std::span(value)), 4);
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(sizeof("Mingus") - 4, result.size());
   EXPECT_STREQ("us", value);
 }
 
 TEST_F(EmptyInitializedKvs, Get_WithOffset_FillBuffer) {
-  ASSERT_EQ(Status::OK,
+  ASSERT_EQ(Status::Ok(),
             kvs_.Put("Charles", std::as_bytes(std::span("Mingus"))));
 
   char value[4] = {};
   auto result =
       kvs_.Get("Charles", std::as_writable_bytes(std::span(value, 3)), 1);
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, result.status());
+  EXPECT_EQ(Status::ResourceExhausted(), result.status());
   EXPECT_EQ(3u, result.size());
   EXPECT_STREQ("ing", value);
 }
 
 TEST_F(EmptyInitializedKvs, Get_WithOffset_PastEnd) {
-  ASSERT_EQ(Status::OK,
+  ASSERT_EQ(Status::Ok(),
             kvs_.Put("Charles", std::as_bytes(std::span("Mingus"))));
 
   char value[16];
   auto result = kvs_.Get("Charles",
                          std::as_writable_bytes(std::span(value)),
                          sizeof("Mingus") + 1);
-  EXPECT_EQ(Status::OUT_OF_RANGE, result.status());
+  EXPECT_EQ(Status::OutOfRange(), result.status());
   EXPECT_EQ(0u, result.size());
 }
 
 TEST_F(EmptyInitializedKvs, GetValue) {
-  ASSERT_EQ(Status::OK, kvs_.Put("key", uint32_t(0xfeedbeef)));
+  ASSERT_EQ(Status::Ok(), kvs_.Put("key", uint32_t(0xfeedbeef)));
 
   uint32_t value = 0;
-  EXPECT_EQ(Status::OK, kvs_.Get("key", &value));
+  EXPECT_EQ(Status::Ok(), kvs_.Get("key", &value));
   EXPECT_EQ(uint32_t(0xfeedbeef), value);
 }
 
 TEST_F(EmptyInitializedKvs, GetValue_TooSmall) {
-  ASSERT_EQ(Status::OK, kvs_.Put("key", uint32_t(0xfeedbeef)));
+  ASSERT_EQ(Status::Ok(), kvs_.Put("key", uint32_t(0xfeedbeef)));
 
   uint8_t value = 0;
-  EXPECT_EQ(Status::INVALID_ARGUMENT, kvs_.Get("key", &value));
+  EXPECT_EQ(Status::InvalidArgument(), kvs_.Get("key", &value));
   EXPECT_EQ(0u, value);
 }
 
 TEST_F(EmptyInitializedKvs, GetValue_TooLarge) {
-  ASSERT_EQ(Status::OK, kvs_.Put("key", uint32_t(0xfeedbeef)));
+  ASSERT_EQ(Status::Ok(), kvs_.Put("key", uint32_t(0xfeedbeef)));
 
   uint64_t value = 0;
-  EXPECT_EQ(Status::INVALID_ARGUMENT, kvs_.Get("key", &value));
+  EXPECT_EQ(Status::InvalidArgument(), kvs_.Get("key", &value));
   EXPECT_EQ(0u, value);
 }
 
 TEST_F(EmptyInitializedKvs, Delete_GetDeletedKey_ReturnsNotFound) {
-  ASSERT_EQ(Status::OK, kvs_.Put("kEy", std::as_bytes(std::span("123"))));
-  ASSERT_EQ(Status::OK, kvs_.Delete("kEy"));
+  ASSERT_EQ(Status::Ok(), kvs_.Put("kEy", std::as_bytes(std::span("123"))));
+  ASSERT_EQ(Status::Ok(), kvs_.Delete("kEy"));
 
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.Get("kEy", {}).status());
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.ValueSize("kEy").status());
+  EXPECT_EQ(Status::NotFound(), kvs_.Get("kEy", {}).status());
+  EXPECT_EQ(Status::NotFound(), kvs_.ValueSize("kEy").status());
 }
 
 TEST_F(EmptyInitializedKvs, Delete_AddBackKey_PersistsAfterInitialization) {
-  ASSERT_EQ(Status::OK, kvs_.Put("kEy", std::as_bytes(std::span("123"))));
-  ASSERT_EQ(Status::OK, kvs_.Delete("kEy"));
+  ASSERT_EQ(Status::Ok(), kvs_.Put("kEy", std::as_bytes(std::span("123"))));
+  ASSERT_EQ(Status::Ok(), kvs_.Delete("kEy"));
 
-  EXPECT_EQ(Status::OK, kvs_.Put("kEy", std::as_bytes(std::span("45678"))));
+  EXPECT_EQ(Status::Ok(), kvs_.Put("kEy", std::as_bytes(std::span("45678"))));
   char data[6] = {};
-  ASSERT_EQ(Status::OK, kvs_.Get("kEy", &data));
+  ASSERT_EQ(Status::Ok(), kvs_.Get("kEy", &data));
   EXPECT_STREQ(data, "45678");
 
   // Ensure that the re-added key is still present after reinitialization.
   KeyValueStoreBuffer<kMaxEntries, kMaxUsableSectors> new_kvs(&test_partition,
                                                               default_format);
-  ASSERT_EQ(Status::OK, new_kvs.Init());
+  ASSERT_EQ(Status::Ok(), new_kvs.Init());
 
-  EXPECT_EQ(Status::OK, new_kvs.Put("kEy", std::as_bytes(std::span("45678"))));
+  EXPECT_EQ(Status::Ok(),
+            new_kvs.Put("kEy", std::as_bytes(std::span("45678"))));
   char new_data[6] = {};
-  EXPECT_EQ(Status::OK, new_kvs.Get("kEy", &new_data));
+  EXPECT_EQ(Status::Ok(), new_kvs.Get("kEy", &new_data));
   EXPECT_STREQ(data, "45678");
 }
 
 TEST_F(EmptyInitializedKvs, Delete_AllItems_KvsIsEmpty) {
-  ASSERT_EQ(Status::OK, kvs_.Put("kEy", std::as_bytes(std::span("123"))));
-  ASSERT_EQ(Status::OK, kvs_.Delete("kEy"));
+  ASSERT_EQ(Status::Ok(), kvs_.Put("kEy", std::as_bytes(std::span("123"))));
+  ASSERT_EQ(Status::Ok(), kvs_.Delete("kEy"));
 
   EXPECT_EQ(0u, kvs_.size());
   EXPECT_TRUE(kvs_.empty());
@@ -299,17 +299,17 @@
   constexpr std::string_view key1 = "D4";
   constexpr std::string_view key2 = "dFU6S";
 
-  ASSERT_EQ(Status::OK, kvs_.Put(key1, 1000));
+  ASSERT_EQ(Status::Ok(), kvs_.Put(key1, 1000));
 
-  EXPECT_EQ(Status::ALREADY_EXISTS, kvs_.Put(key2, 999));
+  EXPECT_EQ(Status::AlreadyExists(), kvs_.Put(key2, 999));
 
   int value = 0;
-  EXPECT_EQ(Status::OK, kvs_.Get(key1, &value));
+  EXPECT_EQ(Status::Ok(), kvs_.Get(key1, &value));
   EXPECT_EQ(1000, value);
 
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.Get(key2, &value));
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.ValueSize(key2).status());
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.Delete(key2));
+  EXPECT_EQ(Status::NotFound(), kvs_.Get(key2, &value));
+  EXPECT_EQ(Status::NotFound(), kvs_.ValueSize(key2).status());
+  EXPECT_EQ(Status::NotFound(), kvs_.Delete(key2));
 }
 
 TEST_F(EmptyInitializedKvs, Collision_WithDeletedKey) {
@@ -317,18 +317,18 @@
   constexpr std::string_view key1 = "1U2";
   constexpr std::string_view key2 = "ahj9d";
 
-  ASSERT_EQ(Status::OK, kvs_.Put(key1, 1000));
-  ASSERT_EQ(Status::OK, kvs_.Delete(key1));
+  ASSERT_EQ(Status::Ok(), kvs_.Put(key1, 1000));
+  ASSERT_EQ(Status::Ok(), kvs_.Delete(key1));
 
   // key2 collides with key1's tombstone.
-  EXPECT_EQ(Status::ALREADY_EXISTS, kvs_.Put(key2, 999));
+  EXPECT_EQ(Status::AlreadyExists(), kvs_.Put(key2, 999));
 
   int value = 0;
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.Get(key1, &value));
+  EXPECT_EQ(Status::NotFound(), kvs_.Get(key1, &value));
 
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.Get(key2, &value));
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.ValueSize(key2).status());
-  EXPECT_EQ(Status::NOT_FOUND, kvs_.Delete(key2));
+  EXPECT_EQ(Status::NotFound(), kvs_.Get(key2, &value));
+  EXPECT_EQ(Status::NotFound(), kvs_.ValueSize(key2).status());
+  EXPECT_EQ(Status::NotFound(), kvs_.Delete(key2));
 }
 
 TEST_F(EmptyInitializedKvs, Iteration_Empty_ByReference) {
@@ -346,62 +346,63 @@
 }
 
 TEST_F(EmptyInitializedKvs, Iteration_OneItem) {
-  ASSERT_EQ(Status::OK, kvs_.Put("kEy", std::as_bytes(std::span("123"))));
+  ASSERT_EQ(Status::Ok(), kvs_.Put("kEy", std::as_bytes(std::span("123"))));
 
   for (KeyValueStore::Item entry : kvs_) {
     EXPECT_STREQ(entry.key(), "kEy");  // Make sure null-terminated.
 
     char temp[sizeof("123")] = {};
-    EXPECT_EQ(Status::OK, entry.Get(&temp));
+    EXPECT_EQ(Status::Ok(), entry.Get(&temp));
     EXPECT_STREQ("123", temp);
   }
 }
 
 TEST_F(EmptyInitializedKvs, Iteration_GetWithOffset) {
-  ASSERT_EQ(Status::OK, kvs_.Put("key", std::as_bytes(std::span("not bad!"))));
+  ASSERT_EQ(Status::Ok(),
+            kvs_.Put("key", std::as_bytes(std::span("not bad!"))));
 
   for (KeyValueStore::Item entry : kvs_) {
     char temp[5];
     auto result = entry.Get(std::as_writable_bytes(std::span(temp)), 4);
-    EXPECT_EQ(Status::OK, result.status());
+    EXPECT_EQ(Status::Ok(), result.status());
     EXPECT_EQ(5u, result.size());
     EXPECT_STREQ("bad!", temp);
   }
 }
 
 TEST_F(EmptyInitializedKvs, Iteration_GetValue) {
-  ASSERT_EQ(Status::OK, kvs_.Put("key", uint32_t(0xfeedbeef)));
+  ASSERT_EQ(Status::Ok(), kvs_.Put("key", uint32_t(0xfeedbeef)));
 
   for (KeyValueStore::Item entry : kvs_) {
     uint32_t value = 0;
-    EXPECT_EQ(Status::OK, entry.Get(&value));
+    EXPECT_EQ(Status::Ok(), entry.Get(&value));
     EXPECT_EQ(uint32_t(0xfeedbeef), value);
   }
 }
 
 TEST_F(EmptyInitializedKvs, Iteration_GetValue_TooSmall) {
-  ASSERT_EQ(Status::OK, kvs_.Put("key", uint32_t(0xfeedbeef)));
+  ASSERT_EQ(Status::Ok(), kvs_.Put("key", uint32_t(0xfeedbeef)));
 
   for (KeyValueStore::Item entry : kvs_) {
     uint8_t value = 0;
-    EXPECT_EQ(Status::INVALID_ARGUMENT, entry.Get(&value));
+    EXPECT_EQ(Status::InvalidArgument(), entry.Get(&value));
     EXPECT_EQ(0u, value);
   }
 }
 
 TEST_F(EmptyInitializedKvs, Iteration_GetValue_TooLarge) {
-  ASSERT_EQ(Status::OK, kvs_.Put("key", uint32_t(0xfeedbeef)));
+  ASSERT_EQ(Status::Ok(), kvs_.Put("key", uint32_t(0xfeedbeef)));
 
   for (KeyValueStore::Item entry : kvs_) {
     uint64_t value = 0;
-    EXPECT_EQ(Status::INVALID_ARGUMENT, entry.Get(&value));
+    EXPECT_EQ(Status::InvalidArgument(), entry.Get(&value));
     EXPECT_EQ(0u, value);
   }
 }
 
 TEST_F(EmptyInitializedKvs, Iteration_EmptyAfterDeletion) {
-  ASSERT_EQ(Status::OK, kvs_.Put("kEy", std::as_bytes(std::span("123"))));
-  ASSERT_EQ(Status::OK, kvs_.Delete("kEy"));
+  ASSERT_EQ(Status::Ok(), kvs_.Put("kEy", std::as_bytes(std::span("123"))));
+  ASSERT_EQ(Status::Ok(), kvs_.Delete("kEy"));
 
   for (KeyValueStore::Item entry : kvs_) {
     static_cast<void>(entry);
@@ -424,10 +425,10 @@
   std::memset(buf2, 2, sizeof(buf2));
 
   // Start with things in KVS
-  ASSERT_EQ(Status::OK, kvs_.Put(key1, buf1));
-  ASSERT_EQ(Status::OK, kvs_.Put(key2, buf2));
+  ASSERT_EQ(Status::Ok(), kvs_.Put(key1, buf1));
+  ASSERT_EQ(Status::Ok(), kvs_.Put(key2, buf2));
   for (size_t j = 0; j < keys.size(); j++) {
-    ASSERT_EQ(Status::OK, kvs_.Put(keys[j], j));
+    ASSERT_EQ(Status::Ok(), kvs_.Put(keys[j], j));
   }
 
   for (size_t i = 0; i < 100; i++) {
@@ -436,28 +437,28 @@
     size_t size2 = (kLargestBufSize) / (100 - i);
     for (size_t j = 0; j < 50; j++) {
       // Rewrite a single key many times, can fill up a sector
-      ASSERT_EQ(Status::OK, kvs_.Put("some_data", j));
+      ASSERT_EQ(Status::Ok(), kvs_.Put("some_data", j));
     }
     // Delete and re-add everything
-    ASSERT_EQ(Status::OK, kvs_.Delete(key1));
-    ASSERT_EQ(Status::OK, kvs_.Put(key1, std::span(buf1, size1)));
-    ASSERT_EQ(Status::OK, kvs_.Delete(key2));
-    ASSERT_EQ(Status::OK, kvs_.Put(key2, std::span(buf2, size2)));
+    ASSERT_EQ(Status::Ok(), kvs_.Delete(key1));
+    ASSERT_EQ(Status::Ok(), kvs_.Put(key1, std::span(buf1, size1)));
+    ASSERT_EQ(Status::Ok(), kvs_.Delete(key2));
+    ASSERT_EQ(Status::Ok(), kvs_.Put(key2, std::span(buf2, size2)));
     for (size_t j = 0; j < keys.size(); j++) {
-      ASSERT_EQ(Status::OK, kvs_.Delete(keys[j]));
-      ASSERT_EQ(Status::OK, kvs_.Put(keys[j], j));
+      ASSERT_EQ(Status::Ok(), kvs_.Delete(keys[j]));
+      ASSERT_EQ(Status::Ok(), kvs_.Put(keys[j], j));
     }
 
     // Re-enable and verify
-    ASSERT_EQ(Status::OK, kvs_.Init());
+    ASSERT_EQ(Status::Ok(), kvs_.Init());
     static byte buf[4 * 1024];
-    ASSERT_EQ(Status::OK, kvs_.Get(key1, std::span(buf, size1)).status());
+    ASSERT_EQ(Status::Ok(), kvs_.Get(key1, std::span(buf, size1)).status());
     ASSERT_EQ(std::memcmp(buf, buf1, size1), 0);
-    ASSERT_EQ(Status::OK, kvs_.Get(key2, std::span(buf, size2)).status());
+    ASSERT_EQ(Status::Ok(), kvs_.Get(key2, std::span(buf, size2)).status());
     ASSERT_EQ(std::memcmp(buf2, buf2, size2), 0);
     for (size_t j = 0; j < keys.size(); j++) {
       size_t ret = 1000;
-      ASSERT_EQ(Status::OK, kvs_.Get(keys[j], &ret));
+      ASSERT_EQ(Status::Ok(), kvs_.Get(keys[j], &ret));
       ASSERT_EQ(ret, j);
     }
   }
@@ -467,28 +468,28 @@
   // Add some data
   uint8_t value1 = 0xDA;
   ASSERT_EQ(
-      Status::OK,
+      Status::Ok(),
       kvs_.Put(keys[0], std::as_bytes(std::span(&value1, sizeof(value1)))));
 
   uint32_t value2 = 0xBAD0301f;
-  ASSERT_EQ(Status::OK, kvs_.Put(keys[1], value2));
+  ASSERT_EQ(Status::Ok(), kvs_.Put(keys[1], value2));
 
   // Verify data
   uint32_t test2;
-  EXPECT_EQ(Status::OK, kvs_.Get(keys[1], &test2));
+  EXPECT_EQ(Status::Ok(), kvs_.Get(keys[1], &test2));
   uint8_t test1;
-  ASSERT_EQ(Status::OK, kvs_.Get(keys[0], &test1));
+  ASSERT_EQ(Status::Ok(), kvs_.Get(keys[0], &test1));
 
   EXPECT_EQ(test1, value1);
   EXPECT_EQ(test2, value2);
 
   // Delete a key
-  EXPECT_EQ(Status::OK, kvs_.Delete(keys[0]));
+  EXPECT_EQ(Status::Ok(), kvs_.Delete(keys[0]));
 
   // Verify it was erased
-  EXPECT_EQ(kvs_.Get(keys[0], &test1), Status::NOT_FOUND);
+  EXPECT_EQ(kvs_.Get(keys[0], &test1), Status::NotFound());
   test2 = 0;
-  ASSERT_EQ(Status::OK,
+  ASSERT_EQ(Status::Ok(),
             kvs_.Get(keys[1],
                      std::span(reinterpret_cast<byte*>(&test2), sizeof(test2)))
                 .status());
diff --git a/pw_kvs/key_value_store_map_test.cc b/pw_kvs/key_value_store_map_test.cc
index 87d0dee..27b961c 100644
--- a/pw_kvs/key_value_store_map_test.cc
+++ b/pw_kvs/key_value_store_map_test.cc
@@ -88,9 +88,9 @@
         // For KVS magic value always use a random 32 bit integer rather than a
         // human readable 4 bytes. See pw_kvs/format.h for more information.
         kvs_(&partition_, {.magic = 0xc857e51d, .checksum = nullptr}) {
-    EXPECT_EQ(Status::OK, partition_.Erase());
+    EXPECT_EQ(Status::Ok(), partition_.Erase());
     Status result = kvs_.Init();
-    EXPECT_EQ(Status::OK, result);
+    EXPECT_EQ(Status::Ok(), result);
 
     if (!result.ok()) {
       std::abort();
@@ -250,7 +250,7 @@
         EXPECT_EQ(map_entry->first, item.key());
 
         char value[kMaxValueLength + 1] = {};
-        EXPECT_EQ(Status::OK,
+        EXPECT_EQ(Status::Ok(),
                   item.Get(std::as_writable_bytes(std::span(value))).status());
         EXPECT_EQ(map_entry->second, std::string(value));
       }
@@ -267,10 +267,10 @@
     Status result = kvs_.Put(key, std::as_bytes(std::span(value)));
 
     if (key.empty() || key.size() > internal::Entry::kMaxKeyLength) {
-      EXPECT_EQ(Status::INVALID_ARGUMENT, result);
+      EXPECT_EQ(Status::InvalidArgument(), result);
     } else if (map_.size() == kvs_.max_size()) {
-      EXPECT_EQ(Status::RESOURCE_EXHAUSTED, result);
-    } else if (result == Status::RESOURCE_EXHAUSTED) {
+      EXPECT_EQ(Status::ResourceExhausted(), result);
+    } else if (result == Status::ResourceExhausted()) {
       EXPECT_FALSE(map_.empty());
     } else if (result.ok()) {
       map_[key] = value;
@@ -290,9 +290,9 @@
     Status result = kvs_.Delete(key);
 
     if (key.empty() || key.size() > internal::Entry::kMaxKeyLength) {
-      EXPECT_EQ(Status::INVALID_ARGUMENT, result);
+      EXPECT_EQ(Status::InvalidArgument(), result);
     } else if (map_.count(key) == 0) {
-      EXPECT_EQ(Status::NOT_FOUND, result);
+      EXPECT_EQ(Status::NotFound(), result);
     } else if (result.ok()) {
       map_.erase(key);
 
@@ -302,7 +302,7 @@
       }
 
       deleted_.insert(key);
-    } else if (result == Status::RESOURCE_EXHAUSTED) {
+    } else if (result == Status::ResourceExhausted()) {
       PW_LOG_WARN("Delete: RESOURCE_EXHAUSTED could not delete key %s",
                   key.c_str());
     } else {
@@ -315,14 +315,14 @@
   void Init() {
     StartOperation("Init");
     Status status = kvs_.Init();
-    EXPECT_EQ(Status::OK, status);
+    EXPECT_EQ(Status::Ok(), status);
     FinishOperation("Init", status);
   }
 
   void GCFull() {
     StartOperation("GCFull");
     Status status = kvs_.FullMaintenance();
-    EXPECT_EQ(Status::OK, status);
+    EXPECT_EQ(Status::Ok(), status);
 
     KeyValueStore::StorageStats post_stats = kvs_.GetStorageStats();
     if (post_stats.in_use_bytes > ((partition_.size_bytes() * 70) / 100)) {
@@ -338,10 +338,10 @@
     Status status = kvs_.PartialMaintenance();
     KeyValueStore::StorageStats post_stats = kvs_.GetStorageStats();
     if (pre_stats.reclaimable_bytes != 0) {
-      EXPECT_EQ(Status::OK, status);
+      EXPECT_EQ(Status::Ok(), status);
       EXPECT_LT(post_stats.reclaimable_bytes, pre_stats.reclaimable_bytes);
     } else {
-      EXPECT_EQ(Status::NOT_FOUND, status);
+      EXPECT_EQ(Status::NotFound(), status);
       EXPECT_EQ(post_stats.reclaimable_bytes, 0U);
     }
     FinishOperation("GCPartial", status);
diff --git a/pw_kvs/key_value_store_test.cc b/pw_kvs/key_value_store_test.cc
index af981f1..140d174 100644
--- a/pw_kvs/key_value_store_test.cc
+++ b/pw_kvs/key_value_store_test.cc
@@ -28,13 +28,12 @@
 #endif  // DUMP_KVS_STATE_TO_FILE
 
 #include "gtest/gtest.h"
-#include "pw_checksum/ccitt_crc16.h"
+#include "pw_bytes/array.h"
+#include "pw_checksum/crc16_ccitt.h"
 #include "pw_kvs/crc16_checksum.h"
 #include "pw_kvs/fake_flash_memory.h"
 #include "pw_kvs/flash_memory.h"
 #include "pw_kvs/internal/entry.h"
-#include "pw_kvs_private/byte_utils.h"
-#include "pw_kvs_private/macros.h"
 #include "pw_log/log.h"
 #include "pw_status/status.h"
 #include "pw_string/string_builder.h"
@@ -48,12 +47,16 @@
 constexpr size_t kMaxEntries = 256;
 constexpr size_t kMaxUsableSectors = 256;
 
-// Test the functions in byte_utils.h. Create a byte array with AsBytes and
-// ByteStr and check that its contents are correct.
+// Test the functions in byte_utils.h. Create a byte array with bytes::Concat
+// and bytes::String and check that its contents are correct.
 constexpr std::array<char, 2> kTestArray = {'a', 'b'};
 
-constexpr auto kAsBytesTest = AsBytes(
-    'a', uint16_t(1), uint8_t(23), kTestArray, ByteStr("c"), uint64_t(-1));
+constexpr auto kAsBytesTest = bytes::Concat('a',
+                                            uint16_t(1),
+                                            uint8_t(23),
+                                            kTestArray,
+                                            bytes::String("c"),
+                                            uint64_t(-1));
 
 static_assert(kAsBytesTest.size() == 15);
 static_assert(kAsBytesTest[0] == std::byte{'a'});
@@ -118,12 +121,12 @@
     std::FILE* out_file = std::fopen(filename, "w+");
     if (out_file == nullptr) {
       PW_LOG_ERROR("Failed to dump to %s", filename);
-      return Status::DATA_LOSS;
+      return Status::DataLoss();
     }
     std::vector<std::byte> out_vec(memory.size_bytes());
     Status status =
         memory.Read(0, std::span<std::byte>(out_vec.data(), out_vec.size()));
-    if (status != Status::OK) {
+    if (status != Status::Ok()) {
       fclose(out_file);
       return status;
     }
@@ -134,17 +137,17 @@
       PW_LOG_ERROR("Failed to dump to %s, written=%u",
                    filename,
                    static_cast<unsigned>(written));
-      status = Status::DATA_LOSS;
+      status = Status::DataLoss();
     } else {
       PW_LOG_INFO("Dumped to %s", filename);
-      status = Status::OK;
+      status = Status::Ok();
     }
 
     fclose(out_file);
     return status;
   }
 #else
-  Status Dump(const char*) { return Status::OK; }
+  Status Dump(const char*) { return Status::Ok(); }
 #endif  // DUMP_KVS_STATE_TO_FILE
 };
 
@@ -176,7 +179,7 @@
   KeyValueStoreBuffer<kMaxEntries, kMaxUsableSectors> kvs(&test_partition,
                                                           format);
 
-  EXPECT_EQ(kvs.Init(), Status::FAILED_PRECONDITION);
+  EXPECT_EQ(kvs.Init(), Status::FailedPrecondition());
 }
 
 TEST(InitCheck, ZeroSectors) {
@@ -192,7 +195,7 @@
   KeyValueStoreBuffer<kMaxEntries, kMaxUsableSectors> kvs(&test_partition,
                                                           format);
 
-  EXPECT_EQ(kvs.Init(), Status::FAILED_PRECONDITION);
+  EXPECT_EQ(kvs.Init(), Status::FailedPrecondition());
 }
 
 TEST(InitCheck, TooManySectors) {
@@ -207,11 +210,11 @@
   constexpr EntryFormat format{.magic = 0x610f6d17, .checksum = nullptr};
   KeyValueStoreBuffer<kMaxEntries, 2> kvs(&test_partition, format);
 
-  EXPECT_EQ(kvs.Init(), Status::FAILED_PRECONDITION);
+  EXPECT_EQ(kvs.Init(), Status::FailedPrecondition());
 }
 
-#define ASSERT_OK(expr) ASSERT_EQ(Status::OK, expr)
-#define EXPECT_OK(expr) EXPECT_EQ(Status::OK, expr)
+#define ASSERT_OK(expr) ASSERT_EQ(Status::Ok(), expr)
+#define EXPECT_OK(expr) EXPECT_EQ(Status::Ok(), expr)
 
 TEST(InMemoryKvs, WriteOneKeyMultipleTimes) {
   // Create and erase the fake flash. It will persist across reloads.
@@ -355,7 +358,7 @@
 
   // Create and erase the fake flash.
   Flash flash;
-  ASSERT_EQ(Status::OK, flash.partition.Erase());
+  ASSERT_EQ(Status::Ok(), flash.partition.Erase());
 
   // Create and initialize the KVS.
   // For KVS magic value always use a random 32 bit integer rather than a
@@ -390,7 +393,7 @@
 TEST(InMemoryKvs, CallingEraseTwice_NothingWrittenToFlash) {
   // Create and erase the fake flash.
   Flash flash;
-  ASSERT_EQ(Status::OK, flash.partition.Erase());
+  ASSERT_EQ(Status::Ok(), flash.partition.Erase());
 
   // Create and initialize the KVS.
   KeyValueStoreBuffer<kMaxEntries, kMaxUsableSectors> kvs(&flash.partition,
@@ -398,22 +401,22 @@
   ASSERT_OK(kvs.Init());
 
   const uint8_t kValue = 0xDA;
-  ASSERT_EQ(Status::OK, kvs.Put(keys[0], kValue));
-  ASSERT_EQ(Status::OK, kvs.Delete(keys[0]));
+  ASSERT_EQ(Status::Ok(), kvs.Put(keys[0], kValue));
+  ASSERT_EQ(Status::Ok(), kvs.Delete(keys[0]));
 
   // Compare before / after checksums to verify that nothing was written.
-  const uint16_t crc = checksum::CcittCrc16(flash.memory.buffer());
+  const uint16_t crc = checksum::Crc16Ccitt::Calculate(flash.memory.buffer());
 
-  EXPECT_EQ(kvs.Delete(keys[0]), Status::NOT_FOUND);
+  EXPECT_EQ(kvs.Delete(keys[0]), Status::NotFound());
 
-  EXPECT_EQ(crc, checksum::CcittCrc16(flash.memory.buffer()));
+  EXPECT_EQ(crc, checksum::Crc16Ccitt::Calculate(flash.memory.buffer()));
 }
 
 class LargeEmptyInitializedKvs : public ::testing::Test {
  protected:
   LargeEmptyInitializedKvs() : kvs_(&large_test_partition, default_format) {
-    ASSERT_EQ(Status::OK, large_test_partition.Erase());
-    ASSERT_EQ(Status::OK, kvs_.Init());
+    ASSERT_EQ(Status::Ok(), large_test_partition.Erase());
+    ASSERT_EQ(Status::Ok(), kvs_.Init());
   }
 
   KeyValueStoreBuffer<kMaxEntries, kMaxUsableSectors> kvs_;
@@ -423,23 +426,52 @@
   const uint8_t kValue1 = 0xDA;
   const uint8_t kValue2 = 0x12;
   uint8_t value;
-  ASSERT_EQ(Status::OK, kvs_.Put(keys[0], kValue1));
+  ASSERT_EQ(Status::Ok(), kvs_.Put(keys[0], kValue1));
   EXPECT_EQ(kvs_.size(), 1u);
-  ASSERT_EQ(Status::OK, kvs_.Delete(keys[0]));
-  EXPECT_EQ(kvs_.Get(keys[0], &value), Status::NOT_FOUND);
-  ASSERT_EQ(Status::OK, kvs_.Put(keys[1], kValue1));
-  ASSERT_EQ(Status::OK, kvs_.Put(keys[2], kValue2));
-  ASSERT_EQ(Status::OK, kvs_.Delete(keys[1]));
-  EXPECT_EQ(Status::OK, kvs_.Get(keys[2], &value));
+  ASSERT_EQ(Status::Ok(), kvs_.Delete(keys[0]));
+  EXPECT_EQ(kvs_.Get(keys[0], &value), Status::NotFound());
+  ASSERT_EQ(Status::Ok(), kvs_.Put(keys[1], kValue1));
+  ASSERT_EQ(Status::Ok(), kvs_.Put(keys[2], kValue2));
+  ASSERT_EQ(Status::Ok(), kvs_.Delete(keys[1]));
+  EXPECT_EQ(Status::Ok(), kvs_.Get(keys[2], &value));
   EXPECT_EQ(kValue2, value);
-  ASSERT_EQ(kvs_.Get(keys[1], &value), Status::NOT_FOUND);
+  ASSERT_EQ(kvs_.Get(keys[1], &value), Status::NotFound());
   EXPECT_EQ(kvs_.size(), 1u);
 }
 
+TEST_F(LargeEmptyInitializedKvs, FullMaintenance) {
+  const uint8_t kValue1 = 0xDA;
+  const uint8_t kValue2 = 0x12;
+
+  // Write a key and write again with a different value, resulting in a stale
+  // entry from the first write.
+  ASSERT_EQ(Status::Ok(), kvs_.Put(keys[0], kValue1));
+  ASSERT_EQ(Status::Ok(), kvs_.Put(keys[0], kValue2));
+  EXPECT_EQ(kvs_.size(), 1u);
+
+  KeyValueStore::StorageStats stats = kvs_.GetStorageStats();
+  EXPECT_EQ(stats.sector_erase_count, 0u);
+  EXPECT_GT(stats.reclaimable_bytes, 0u);
+
+  // Do regular FullMaintenance, which should not touch the sector with valid
+  // data.
+  EXPECT_EQ(Status::Ok(), kvs_.FullMaintenance());
+  stats = kvs_.GetStorageStats();
+  EXPECT_EQ(stats.sector_erase_count, 0u);
+  EXPECT_GT(stats.reclaimable_bytes, 0u);
+
+  // Do aggressive FullMaintenance, which should GC the sector with valid data,
+  // resulting in no reclaimable bytes and an erased sector.
+  EXPECT_EQ(Status::Ok(), kvs_.HeavyMaintenance());
+  stats = kvs_.GetStorageStats();
+  EXPECT_EQ(stats.sector_erase_count, 1u);
+  EXPECT_EQ(stats.reclaimable_bytes, 0u);
+}
+
 TEST(InMemoryKvs, Put_MaxValueSize) {
   // Create and erase the fake flash.
   Flash flash;
-  ASSERT_EQ(Status::OK, flash.partition.Erase());
+  ASSERT_EQ(Status::Ok(), flash.partition.Erase());
 
   // Create and initialize the KVS.
   KeyValueStoreBuffer<kMaxEntries, kMaxUsableSectors> kvs(&flash.partition,
@@ -459,12 +491,12 @@
   ASSERT_GT(sizeof(large_test_flash), max_value_size + 2 * sizeof(EntryHeader));
   auto big_data = std::as_bytes(std::span(&large_test_flash, 1));
 
-  EXPECT_EQ(Status::OK, kvs.Put("K", big_data.subspan(0, max_value_size)));
+  EXPECT_EQ(Status::Ok(), kvs.Put("K", big_data.subspan(0, max_value_size)));
 
   // Larger than maximum is rejected.
-  EXPECT_EQ(Status::INVALID_ARGUMENT,
+  EXPECT_EQ(Status::InvalidArgument(),
             kvs.Put("K", big_data.subspan(0, max_value_size + 1)));
-  EXPECT_EQ(Status::INVALID_ARGUMENT, kvs.Put("K", big_data));
+  EXPECT_EQ(Status::InvalidArgument(), kvs.Put("K", big_data));
 }
 
 }  // namespace pw::kvs
diff --git a/pw_kvs/key_value_store_wear_test.cc b/pw_kvs/key_value_store_wear_test.cc
index d6cadc4..d272535 100644
--- a/pw_kvs/key_value_store_wear_test.cc
+++ b/pw_kvs/key_value_store_wear_test.cc
@@ -35,7 +35,7 @@
       : flash_(internal::Entry::kMinAlignmentBytes),
         partition_(&flash_, 0, flash_.sector_count()),
         kvs_(&partition_, format) {
-    EXPECT_EQ(Status::OK, kvs_.Init());
+    EXPECT_EQ(Status::Ok(), kvs_.Init());
   }
 
   static constexpr size_t kSectors = 16;
@@ -92,7 +92,7 @@
     test_data[0]++;
 
     EXPECT_EQ(
-        Status::OK,
+        Status::Ok(),
         kvs_.Put("key",
                  std::as_bytes(std::span(test_data, sizeof(test_data) - 70))));
   }
@@ -105,7 +105,7 @@
     test_data[0]++;
 
     printf("Add entry %zu\n", i);
-    EXPECT_EQ(Status::OK, kvs_.Put("big_key", test_data));
+    EXPECT_EQ(Status::Ok(), kvs_.Put("big_key", test_data));
   }
 
   EXPECT_EQ(2u, kvs_.size());
diff --git a/pw_kvs/public/pw_kvs/alignment.h b/pw_kvs/public/pw_kvs/alignment.h
index e8cf191..56d0c08 100644
--- a/pw_kvs/public/pw_kvs/alignment.h
+++ b/pw_kvs/public/pw_kvs/alignment.h
@@ -116,7 +116,7 @@
   // TODO: This should convert to PW_CHECK once that is available for use in
   // host tests.
   if (alignment_bytes > kBufferSize) {
-    return StatusWithSize::INTERNAL;
+    return StatusWithSize::Internal();
   }
 
   AlignedWriterBuffer<kBufferSize> buffer(alignment_bytes, output);
diff --git a/pw_kvs/public/pw_kvs/crc16_checksum.h b/pw_kvs/public/pw_kvs/crc16_checksum.h
index 6cbef1f..1cbff63 100644
--- a/pw_kvs/public/pw_kvs/crc16_checksum.h
+++ b/pw_kvs/public/pw_kvs/crc16_checksum.h
@@ -15,7 +15,7 @@
 
 #include <span>
 
-#include "pw_checksum/ccitt_crc16.h"
+#include "pw_checksum/crc16_ccitt.h"
 #include "pw_kvs/checksum.h"
 
 namespace pw::kvs {
@@ -24,14 +24,14 @@
  public:
   ChecksumCrc16() : ChecksumAlgorithm(std::as_bytes(std::span(&crc_, 1))) {}
 
-  void Reset() override { crc_ = checksum::kCcittCrc16DefaultInitialValue; }
+  void Reset() override { crc_ = checksum::Crc16Ccitt::kInitialValue; }
 
   void Update(std::span<const std::byte> data) override {
-    crc_ = checksum::CcittCrc16(data, crc_);
+    crc_ = checksum::Crc16Ccitt::Calculate(data, crc_);
   }
 
  private:
-  uint16_t crc_ = checksum::kCcittCrc16DefaultInitialValue;
+  uint16_t crc_ = checksum::Crc16Ccitt::kInitialValue;
 };
 
 }  // namespace pw::kvs
diff --git a/pw_kvs/public/pw_kvs/fake_flash_memory.h b/pw_kvs/public/pw_kvs/fake_flash_memory.h
index c1b0bbe..9795e01 100644
--- a/pw_kvs/public/pw_kvs/fake_flash_memory.h
+++ b/pw_kvs/public/pw_kvs/fake_flash_memory.h
@@ -97,9 +97,9 @@
         write_errors_(write_errors) {}
 
   // The fake flash is always enabled.
-  Status Enable() override { return Status::OK; }
+  Status Enable() override { return Status::Ok(); }
 
-  Status Disable() override { return Status::OK; }
+  Status Disable() override { return Status::Ok(); }
 
   bool IsEnabled() const override { return true; }
 
diff --git a/pw_kvs/public/pw_kvs/flash_memory.h b/pw_kvs/public/pw_kvs/flash_memory.h
index a9ad8b2..d1864b4 100644
--- a/pw_kvs/public/pw_kvs/flash_memory.h
+++ b/pw_kvs/public/pw_kvs/flash_memory.h
@@ -18,7 +18,7 @@
 #include <initializer_list>
 #include <span>
 
-#include "pw_assert/assert.h"
+#include "pw_assert/light.h"
 #include "pw_kvs/alignment.h"
 #include "pw_status/status.h"
 #include "pw_status/status_with_size.h"
@@ -34,56 +34,59 @@
  public:
   // The flash address is in the range of: 0 to FlashSize.
   typedef uint32_t Address;
-  constexpr FlashMemory(size_t sector_size,
-                        size_t sector_count,
-                        size_t alignment,
-                        uint32_t start_address = 0,
-                        uint32_t sector_start = 0,
-                        std::byte erased_memory_content = std::byte{0xFF})
+
+  // TODO(pwbug/246): This can be constexpr when tokenized asserts are fixed.
+  FlashMemory(size_t sector_size,
+              size_t sector_count,
+              size_t alignment,
+              uint32_t start_address = 0,
+              uint32_t sector_start = 0,
+              std::byte erased_memory_content = std::byte{0xFF})
       : sector_size_(sector_size),
         flash_sector_count_(sector_count),
         alignment_(alignment),
         start_address_(start_address),
         start_sector_(sector_start),
         erased_memory_content_(erased_memory_content) {
-    PW_DCHECK_UINT_NE(alignment_, 0);
+    PW_ASSERT(alignment_ != 0u);
   }
 
   virtual ~FlashMemory() = default;
 
   virtual Status Enable() = 0;
+
   virtual Status Disable() = 0;
+
   virtual bool IsEnabled() const = 0;
-  virtual Status SelfTest() { return Status::UNIMPLEMENTED; }
+
+  virtual Status SelfTest() { return Status::Unimplemented(); }
 
   // Erase num_sectors starting at a given address. Blocking call.
-  // Address should be on a sector boundary.
+  // Address should be on a sector boundary. Returns:
   //
-  //                OK: success
-  // DEADLINE_EXCEEDED: timeout
-  //  INVALID_ARGUMENT: address is not sector-aligned
-  //      OUT_OF_RANGE: erases past the end of the memory
-  //
+  // OK - success
+  // DEADLINE_EXCEEDED - timeout
+  // INVALID_ARGUMENT - address is not sector-aligned
+  // OUT_OF_RANGE - erases past the end of the memory
   virtual Status Erase(Address flash_address, size_t num_sectors) = 0;
 
-  // Reads bytes from flash into buffer. Blocking call.
+  // Reads bytes from flash into buffer. Blocking call. Returns:
   //
-  //                OK: success
-  // DEADLINE_EXCEEDED: timeout
-  //      OUT_OF_RANGE: write does not fit in the flash memory
+  // OK - success
+  // DEADLINE_EXCEEDED - timeout
+  // OUT_OF_RANGE - write does not fit in the flash memory
   virtual StatusWithSize Read(Address address, std::span<std::byte> output) = 0;
 
   StatusWithSize Read(Address address, void* buffer, size_t len) {
     return Read(address, std::span(static_cast<std::byte*>(buffer), len));
   }
 
-  // Writes bytes to flash. Blocking call.
+  // Writes bytes to flash. Blocking call. Returns:
   //
-  //                OK: success
-  // DEADLINE_EXCEEDED: timeout
-  //  INVALID_ARGUMENT: address or data size are not aligned
-  //      OUT_OF_RANGE: write does not fit in the memory
-  //
+  // OK - success
+  // DEADLINE_EXCEEDED - timeout
+  // INVALID_ARGUMENT - address or data size are not aligned
+  // OUT_OF_RANGE - write does not fit in the memory
   virtual StatusWithSize Write(Address destination_flash_address,
                                std::span<const std::byte> data) = 0;
 
@@ -102,14 +105,20 @@
   // sector start is not 0. (ex.: cases where there are portions of flash
   // that should be handled independently).
   constexpr uint32_t start_sector() const { return start_sector_; }
+
   constexpr size_t sector_size_bytes() const { return sector_size_; }
+
   constexpr size_t sector_count() const { return flash_sector_count_; }
+
   constexpr size_t alignment_bytes() const { return alignment_; }
+
   constexpr size_t size_bytes() const {
     return sector_size_ * flash_sector_count_;
   }
+
   // Address of the start of flash (the address of sector 0)
   constexpr uint32_t start_address() const { return start_address_; }
+
   constexpr std::byte erased_memory_content() const {
     return erased_memory_content_;
   }
@@ -154,78 +163,71 @@
     FlashPartition::Address address_;
   };
 
-  constexpr FlashPartition(
+  // TODO(pwbug/246): This can be constexpr when tokenized asserts are fixed.
+  FlashPartition(
       FlashMemory* flash,
       uint32_t start_sector_index,
       uint32_t sector_count,
       uint32_t alignment_bytes = 0,  // Defaults to flash alignment
-      PartitionPermission permission = PartitionPermission::kReadAndWrite)
-      : flash_(*flash),
-        start_sector_index_(start_sector_index),
-        sector_count_(sector_count),
-        alignment_bytes_(alignment_bytes == 0
-                             ? flash_.alignment_bytes()
-                             : std::max(alignment_bytes,
-                                        uint32_t(flash_.alignment_bytes()))),
-        permission_(permission) {
-    uint32_t misalignment = (alignment_bytes_ % flash_.alignment_bytes());
-    PW_DCHECK_UINT_EQ(
-        misalignment,
-        0,
-        "Flash partition alignmentmust be a multiple of the flash "
-        "memory alignment");
-  }
+      PartitionPermission permission = PartitionPermission::kReadAndWrite);
 
   // Creates a FlashPartition that uses the entire flash with its alignment.
-  constexpr FlashPartition(FlashMemory* flash)
+  // TODO(pwbug/246): This can be constexpr when tokenized asserts are fixed.
+  FlashPartition(FlashMemory* flash)
       : FlashPartition(
             flash, 0, flash->sector_count(), flash->alignment_bytes()) {}
 
+  FlashPartition(FlashPartition&&) = default;
   FlashPartition(const FlashPartition&) = delete;
   FlashPartition& operator=(const FlashPartition&) = delete;
 
   virtual ~FlashPartition() = default;
 
   // Performs any required partition or flash-level initialization.
-  virtual Status Init() { return Status::OK; }
+  virtual Status Init() { return Status::Ok(); }
 
   // Erase num_sectors starting at a given address. Blocking call.
-  // Address should be on a sector boundary.
-  // Returns: OK, on success.
-  //          TIMEOUT, on timeout.
-  //          INVALID_ARGUMENT, if address or sector count is invalid.
-  //          PERMISSION_DENIED, if partition is read only.
-  //          UNKNOWN, on HAL error
+  // Address must be on a sector boundary. Returns:
+  //
+  // OK - success.
+  // TIMEOUT - on timeout.
+  // INVALID_ARGUMENT - address or sector count is invalid.
+  // PERMISSION_DENIED - partition is read only.
+  // UNKNOWN - HAL error
   virtual Status Erase(Address address, size_t num_sectors);
 
   Status Erase() { return Erase(0, this->sector_count()); }
 
-  // Reads bytes from flash into buffer. Blocking call.
-  // Returns: OK, on success.
-  //          TIMEOUT, on timeout.
-  //          INVALID_ARGUMENT, if address or length is invalid.
-  //          UNKNOWN, on HAL error
+  // Reads bytes from flash into buffer. Blocking call. Returns:
+  //
+  // OK - success.
+  // TIMEOUT - on timeout.
+  // INVALID_ARGUMENT - address or length is invalid.
+  // UNKNOWN - HAL error
   virtual StatusWithSize Read(Address address, std::span<std::byte> output);
 
   StatusWithSize Read(Address address, size_t length, void* output) {
     return Read(address, std::span(static_cast<std::byte*>(output), length));
   }
 
-  // Writes bytes to flash. Blocking call.
-  // Returns: OK, on success.
-  //          TIMEOUT, on timeout.
-  //          INVALID_ARGUMENT, if address or length is invalid.
-  //          PERMISSION_DENIED, if partition is read only.
-  //          UNKNOWN, on HAL error
+  // Writes bytes to flash. Address and data.size_bytes() must both be a
+  // multiple of alignment_bytes(). Blocking call. Returns:
+  //
+  // OK - success.
+  // TIMEOUT - on timeout.
+  // INVALID_ARGUMENT - address or length is invalid.
+  // PERMISSION_DENIED - partition is read only.
+  // UNKNOWN - HAL error
   virtual StatusWithSize Write(Address address,
                                std::span<const std::byte> data);
 
   // Check to see if chunk of flash partition is erased. Address and len need to
-  // be aligned with FlashMemory.
-  // Returns: OK, on success.
-  //          TIMEOUT, on timeout.
-  //          INVALID_ARGUMENT, if address or length is invalid.
-  //          UNKNOWN, on HAL error
+  // be aligned with FlashMemory. Returns:
+  //
+  // OK - success.
+  // TIMEOUT - on timeout.
+  // INVALID_ARGUMENT - address or length is invalid.
+  // UNKNOWN - HAL error
   // TODO: Result<bool>
   virtual Status IsRegionErased(Address source_flash_address,
                                 size_t len,
@@ -251,6 +253,7 @@
 
   size_t size_bytes() const { return sector_count() * sector_size_bytes(); }
 
+  // Alignment required for write address and write size.
   size_t alignment_bytes() const { return alignment_bytes_; }
 
   size_t sector_count() const { return sector_count_; }
@@ -274,10 +277,15 @@
     return permission_ == PartitionPermission::kReadAndWrite;
   }
 
+  constexpr std::byte erased_memory_content() const {
+    return flash_.erased_memory_content();
+  }
+
   uint32_t start_sector_index() const { return start_sector_index_; }
 
  protected:
   Status CheckBounds(Address address, size_t len) const;
+
   FlashMemory& flash() const { return flash_; }
 
  private:
diff --git a/pw_kvs/public/pw_kvs/io.h b/pw_kvs/public/pw_kvs/io.h
index 167182f..80a0ef0 100644
--- a/pw_kvs/public/pw_kvs/io.h
+++ b/pw_kvs/public/pw_kvs/io.h
@@ -71,7 +71,7 @@
 
 // Output adapter that calls a method on a class with a std::span of bytes. If
 // the method returns void instead of the expected Status, Write always returns
-// Status::OK.
+// Status::Ok().
 template <auto kMethod>
 class OutputToMethod final : public Output {
   using Class = typename internal::FunctionTraits<decltype(kMethod)>::Class;
diff --git a/pw_kvs/public/pw_kvs/key_value_store.h b/pw_kvs/public/pw_kvs/key_value_store.h
index 6988d6f..de4249e 100644
--- a/pw_kvs/public/pw_kvs/key_value_store.h
+++ b/pw_kvs/public/pw_kvs/key_value_store.h
@@ -180,10 +180,26 @@
   StatusWithSize ValueSize(std::string_view key) const;
 
   // Perform all maintenance possible, including all neeeded repairing of
-  // corruption and garbage collection of all reclaimable space in the KVS. When
-  // configured for manual recovery, this is the only way KVS repair is
-  // triggered.
-  Status FullMaintenance();
+  // corruption and garbage collection of reclaimable space in the KVS. When
+  // configured for manual recovery, this (along with FullMaintenance) is the
+  // only way KVS repair is triggered.
+  //
+  // - Heavy garbage collection of all reclaimable space, regardless of valid
+  //   data in the sector.
+  Status HeavyMaintenance() {
+    return FullMaintenanceHelper(MaintenanceType::kHeavy);
+  }
+
+  // Perform all maintenance possible, including all neeeded repairing of
+  // corruption and garbage collection of reclaimable space in the KVS. When
+  // configured for manual recovery, this (along with HeavyMaintenance) is the
+  // only way KVS repair is triggered.
+  //
+  // - Regular will not garbage collect sectors with valid data unless the KVS
+  //   is mostly full.
+  Status FullMaintenance() {
+    return FullMaintenanceHelper(MaintenanceType::kRegular);
+  }
 
   // Perform a portion of KVS maintenance. If configured for at least lazy
   // recovery, will do any needed repairing of corruption. Does garbage
@@ -292,6 +308,7 @@
     size_t writable_bytes;
     size_t in_use_bytes;
     size_t reclaimable_bytes;
+    size_t sector_erase_count;
     size_t corrupt_sectors_recovered;
     size_t missing_redundant_entries_recovered;
   };
@@ -445,6 +462,21 @@
                        KeyValueStore::Address& address,
                        std::span<const Address> addresses_to_skip);
 
+  // Perform all maintenance possible, including all neeeded repairing of
+  // corruption and garbage collection of reclaimable space in the KVS. When
+  // configured for manual recovery, this is the only way KVS repair is
+  // triggered.
+  //
+  // - Regular will not garbage collect sectors with valid data unless the KVS
+  //   is mostly full.
+  // - Heavy will garbage collect all reclaimable space regardless of valid data
+  //   in the sector.
+  enum class MaintenanceType {
+    kRegular,
+    kHeavy,
+  };
+  Status FullMaintenanceHelper(MaintenanceType maintenance_type);
+
   // Find and garbage collect a singe sector that does not include an address to
   // skip.
   Status GarbageCollect(std::span<const Address> addresses_to_skip);
@@ -519,11 +551,12 @@
   // make it mutable.
   mutable bool error_detected_;
 
-  struct ErrorStats {
+  struct InternalStats {
+    size_t sector_erase_count;
     size_t corrupt_sectors_recovered;
     size_t missing_redundant_entries_recovered;
   };
-  ErrorStats error_stats_;
+  InternalStats internal_stats_;
 
   uint32_t last_transaction_id_;
 };
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_kvs/public/pw_kvs/test_key_value_store.h
similarity index 82%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to pw_kvs/public/pw_kvs/test_key_value_store.h
index 1670b7d..73aa609 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_kvs/public/pw_kvs/test_key_value_store.h
@@ -11,7 +11,12 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+#pragma once
 
-#include "pw_boot_armv7m/boot.h"
+#include "pw_kvs/key_value_store.h"
 
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+namespace pw::kvs {
+
+KeyValueStore& TestKvs();
+
+}  // namespace pw::kvs
diff --git a/pw_kvs/pw_kvs_private/byte_utils.h b/pw_kvs/pw_kvs_private/byte_utils.h
deleted file mode 100644
index a0315b8..0000000
--- a/pw_kvs/pw_kvs_private/byte_utils.h
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2020 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-// Utilities for building std::byte arrays from strings or integer values.
-#pragma once
-
-#include <array>
-#include <cstddef>
-
-namespace pw {
-
-template <typename T, typename... Args>
-constexpr void CopyBytes(std::byte* array, T value, Args... args) {
-  if constexpr (std::is_integral_v<T>) {
-    if constexpr (sizeof(T) == 1u) {
-      *array++ = static_cast<std::byte>(value);
-    } else {
-      for (size_t i = 0; i < sizeof(T); ++i) {
-        *array++ = static_cast<std::byte>(value & 0xFF);
-        value >>= 8;
-      }
-    }
-  } else {
-    static_assert(sizeof(value[0]) == sizeof(std::byte));
-    for (auto b : value) {
-      *array++ = static_cast<std::byte>(b);
-    }
-  }
-
-  if constexpr (sizeof...(args) > 0u) {
-    CopyBytes(array, args...);
-  }
-}
-
-template <typename T>
-constexpr size_t SizeOfBytes(const T& arg) {
-  if constexpr (std::is_integral_v<T>) {
-    return sizeof(arg);
-  } else {
-    return arg.size();
-  }
-}
-
-// Converts a series of integers or byte arrays to a std::byte array at compile
-// time.
-template <typename... Args>
-constexpr auto AsBytes(Args... args) {
-  std::array<std::byte, (SizeOfBytes(args) + ...)> bytes{};
-
-  auto iterator = bytes.begin();
-  CopyBytes(iterator, args...);
-
-  return bytes;
-}
-
-template <size_t kSize>
-constexpr auto InitializedBytes(uint8_t value) {
-  std::array<std::byte, kSize> bytes{};
-  for (std::byte& b : bytes) {
-    b = std::byte(value);
-  }
-  return bytes;
-}
-
-namespace internal {
-
-template <typename T, size_t... kIndex>
-constexpr auto ByteStr(const T& array, std::index_sequence<kIndex...>) {
-  return std::array{static_cast<std::byte>(array[kIndex])...};
-}
-
-}  // namespace internal
-
-// Converts a string literal to a byte array, without the trailing '\0'.
-template <size_t kSize, typename Indices = std::make_index_sequence<kSize - 1>>
-constexpr auto ByteStr(const char (&str)[kSize]) {
-  return internal::ByteStr(str, Indices{});
-}
-
-}  // namespace pw
diff --git a/pw_kvs/pw_kvs_private/config.h b/pw_kvs/pw_kvs_private/config.h
index bbff197..fb9dc79 100644
--- a/pw_kvs/pw_kvs_private/config.h
+++ b/pw_kvs/pw_kvs_private/config.h
@@ -17,7 +17,12 @@
 
 #include <cstddef>
 
-// The maximum flash alignment supported
+// Which log level to use for pw_kvs logs.
+#ifndef PW_KVS_LOG_LEVEL
+#define PW_KVS_LOG_LEVEL PW_LOG_LEVEL_INFO
+#endif  // PW_KVS_LOG_LEVEL
+
+// The maximum flash alignment supported.
 #ifndef PW_KVS_MAX_FLASH_ALIGNMENT
 #define PW_KVS_MAX_FLASH_ALIGNMENT 256UL
 #endif  // PW_KVS_MAX_FLASH_ALIGNMENT
@@ -26,5 +31,7 @@
               "Max flash alignment is required to be at least 16");
 
 namespace pw::kvs {
+
 inline constexpr size_t kMaxFlashAlignment = PW_KVS_MAX_FLASH_ALIGNMENT;
+
 }  // namespace pw::kvs
diff --git a/pw_kvs/sectors.cc b/pw_kvs/sectors.cc
index 01547ec..e8d91b7 100644
--- a/pw_kvs/sectors.cc
+++ b/pw_kvs/sectors.cc
@@ -12,9 +12,13 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+#define PW_LOG_MODULE_NAME "KVS"
+#define PW_LOG_LEVEL PW_KVS_LOG_LEVEL
+#define PW_LOG_USE_ULTRA_SHORT_NAMES 1
+
 #include "pw_kvs/internal/sectors.h"
 
-#define PW_LOG_USE_ULTRA_SHORT_NAMES 1
+#include "pw_kvs_private/config.h"
 #include "pw_log/log.h"
 
 namespace pw::kvs::internal {
@@ -107,7 +111,7 @@
       if ((find_mode == kAppendEntry) ||
           (sector->RecoverableBytes(sector_size_bytes) == 0)) {
         *found_sector = sector;
-        return Status::OK;
+        return Status::Ok();
       } else {
         if ((non_empty_least_reclaimable_sector == nullptr) ||
             (non_empty_least_reclaimable_sector->RecoverableBytes(
@@ -136,7 +140,7 @@
         Index(first_empty_sector));
     last_new_ = first_empty_sector;
     *found_sector = first_empty_sector;
-    return Status::OK;
+    return Status::Ok();
   }
 
   // Tier 3 check: If we got this far, use the sector with least recoverable
@@ -146,13 +150,13 @@
     DBG("  Found a usable sector %u, with %u B recoverable, in GC",
         Index(*found_sector),
         unsigned((*found_sector)->RecoverableBytes(sector_size_bytes)));
-    return Status::OK;
+    return Status::Ok();
   }
 
   // No sector was found.
   DBG("  Unable to find a usable sector");
   *found_sector = nullptr;
-  return Status::RESOURCE_EXHAUSTED;
+  return Status::ResourceExhausted();
 }
 
 SectorDescriptor& Sectors::WearLeveledSectorFromIndex(size_t idx) const {
diff --git a/pw_kvs/test_key_value_store_test.cc b/pw_kvs/test_key_value_store_test.cc
new file mode 100644
index 0000000..af6d4a9
--- /dev/null
+++ b/pw_kvs/test_key_value_store_test.cc
@@ -0,0 +1,35 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_kvs/test_key_value_store.h"
+
+#include "gtest/gtest.h"
+#include "pw_kvs/key_value_store.h"
+#include "pw_status/status.h"
+
+namespace pw::kvs {
+namespace {
+
+// Simple test to verify that the TestKvs() does basic function.
+TEST(TestKvs, PutGetValue) {
+  KeyValueStore& kvs = TestKvs();
+  ASSERT_EQ(Status::Ok(), kvs.Put("key", uint32_t(0xfeedbeef)));
+
+  uint32_t value = 0;
+  EXPECT_EQ(Status::Ok(), kvs.Get("key", &value));
+  EXPECT_EQ(uint32_t(0xfeedbeef), value);
+}
+
+}  // namespace
+}  // namespace pw::kvs
diff --git a/pw_log/BUILD b/pw_log/BUILD
index 312c202..ec768fe 100644
--- a/pw_log/BUILD
+++ b/pw_log/BUILD
@@ -30,6 +30,7 @@
     hdrs = [
         "public/pw_log/levels.h",
         "public/pw_log/log.h",
+        "public/pw_log/options.h",
     ],
     includes = ["public"],
     deps = [
diff --git a/pw_log/BUILD.gn b/pw_log/BUILD.gn
index 203739e..8aa9b43 100644
--- a/pw_log/BUILD.gn
+++ b/pw_log/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/facade.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 declare_args() {
   # Backend for the pw_log module.
   pw_log_BACKEND = ""
@@ -33,6 +33,7 @@
   public = [
     "public/pw_log/levels.h",
     "public/pw_log/log.h",
+    "public/pw_log/options.h",
   ]
 }
 
diff --git a/pw_log/CMakeLists.txt b/pw_log/CMakeLists.txt
index 4928c2f..4b77940 100644
--- a/pw_log/CMakeLists.txt
+++ b/pw_log/CMakeLists.txt
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_add_facade(pw_log
   PUBLIC_DEPS
     pw_preprocessor
diff --git a/pw_log/docs.rst b/pw_log/docs.rst
index 120333d..8f5c31e 100644
--- a/pw_log/docs.rst
+++ b/pw_log/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-log:
-
-.. default-domain:: cpp
-
-.. highlight:: cpp
+.. _module-pw_log:
 
 ------
 pw_log
@@ -22,6 +18,7 @@
 .. code-block:: cpp
 
   #define PW_LOG_MODULE_NAME "BLE"
+
   #include "pw_log/log.h"
 
   int main() {
@@ -41,6 +38,7 @@
 
   #define PW_LOG_MODULE_NAME "BLE"
   #define PW_LOG_USE_ULTRA_SHORT_NAMES 1
+
   #include "pw_log/log.h"
 
   int main() {
@@ -140,36 +138,87 @@
 
   Shorthand for `PW_LOG(PW_LOG_DEFAULT_FLAGS, <level>, fmt, ...)`.
 
-Filtering logs
---------------
+Option macros
+-------------
+This module defines macros that can be overridden to control the behavior of
+``pw_log`` statements. To override these macros, add ``#define`` statements
+for them before including headers.
 
-``pw_log`` supports compile time filtering of logs through two mechanisms.
+The option macro definitions must be visibile to ``pw_log/log.h`` the first time
+it is included. To handle potential transitive includes, place these
+``#defines`` before all ``#include`` statements. This should only be done in
+source files, not headers. For example:
 
-1. Filter by level. Source files that define ``PW_LOG_LEVEL`` will display all
-   logs at or above the chosen level.
+  .. code-block:: cpp
+
+    // Set the pw_log option macros here, before ALL of the #includes.
+    #define PW_LOG_MODULE_NAME "Calibration"
+    #define PW_LOG_LEVEL PW_LOG_LEVEL_WARN
+
+    #include <array>
+    #include <random>
+
+    #include "devices/hal9000.h"
+    #include "pw_log/log.h"
+    #include "pw_rpc/server.h"
+
+    int MyFunction() {
+      PW_LOG_INFO("hello???");
+    }
+
+.. c:macro:: PW_LOG_MODULE_NAME
+
+  A string literal module name to use in logs. Log backends may attach this
+  name to log messages or use it for runtime filtering. Defaults to ``""``. The
+  ``PW_LOG_MODULE_NAME_DEFINED`` macro is set to ``1`` or ``0`` to indicate
+  whether ``PW_LOG_MODULE_NAME`` was overridden.
+
+.. c:macro:: PW_LOG_DEFAULT_FLAGS
+
+  Log flags to use for the ``PW_LOG_<level>`` macros. Different flags may be
+  applied when using the ``PW_LOG`` macro directly.
+
+  Log backends use flags to change how they handle individual log messages.
+  Potential uses include assigning logs priority or marking them as containing
+  personal information. Defaults to ``0``.
+
+.. c:macro:: PW_LOG_LEVEL
+
+   Filters logs by level. Source files that define ``PW_LOG_LEVEL`` will display
+   only logs at or above the chosen level. Log statements below this level will
+   be compiled out of optimized builds. Defaults to ``PW_LOG_LEVEL_DEBUG``.
 
    Example:
 
    .. code-block:: cpp
 
-     #include "pw_log/log.h"
-
      #define PW_LOG_LEVEL PW_LOG_LEVEL_INFO
 
+     #include "pw_log/log.h"
+
      void DoSomething() {
        PW_LOG_DEBUG("This won't be logged at all");
        PW_LOG_INFO("This is INFO level, and will display");
        PW_LOG_WARN("This is above INFO level, and will display");
      }
 
-2. Filter by arbitrary expression based on ``level`` and ``flags``. Source
-   files that define ``PW_LOG_ENABLE_IF(level, flags)`` will display if the
-   given expression returns true.
+.. c:function:: PW_LOG_ENABLE_IF(level, flags)
+
+   Filters logs by an arbitrary expression based on ``level`` and ``flags``.
+   Source files that define ``PW_LOG_ENABLE_IF(level, flags)`` will display if
+   the given expression evaluates true.
 
    Example:
 
    .. code-block:: cpp
 
+     // Pigweed's log facade will call this macro to decide to log or not. In
+     // this case, it will drop logs with the PII flag set if display of PII is
+     // not enabled for the application.
+     #define PW_LOG_ENABLE_IF(level, flags) \
+         (level >= PW_LOG_LEVEL_INFO && \
+          !((flags & MY_PRODUCT_PII_MASK) && MY_PRODUCT_LOG_PII_ENABLED)
+
      #include "pw_log/log.h"
 
      // This define might be supplied by the build system.
@@ -178,13 +227,6 @@
      // This is the PII mask bit selected by the application.
      #define MY_PRODUCT_PII_MASK (1 << 5)
 
-     // Pigweed's log facade will call this macro to decide to log or not. In
-     // this case, it will drop logs with the PII flag set if display of PII is
-     // not enabled for the application.
-     #define PW_LOG_ENABLE_IF(level, flags) \
-         (level >= PW_LOG_INFO && \
-          !((flags & MY_PRODUCT_PII_MASK) && MY_PRODUCT_LOG_PII_ENABLED)
-
      void DoSomethingWithSensitiveInfo() {
        PW_LOG_DEBUG("This won't be logged at all");
        PW_LOG_INFO("This is INFO level, and will display");
@@ -282,15 +324,15 @@
 porting server code to microcontrollers quickly, we do not advise embedded
 projects use that approach unless absolutely necessary.
 
-- See also :ref:`chapter-pw-log-tokenized` for details on leveraging Pigweed's
+- See also :ref:`module-pw_log_tokenized` for details on leveraging Pigweed's
   tokenizer module for logging.
-- See also :ref:`chapter-pw-tokenizer` for details on Pigweed's tokenizer,
+- See also :ref:`module-pw_tokenizer` for details on Pigweed's tokenizer,
   which is useful for more than just logging.
 
 Why does the facade use header redirection instead of C functions?
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 Without header redirection, it is not possible to do sophisticated macro
-transforms in the backkend. For example, to apply tokenization to log strings,
+transforms in the backend. For example, to apply tokenization to log strings,
 the backend must define the handling macros. Additionally, compile-time
 filtering by log level or flags is not possible without header redirection.
 While it may be possible to do the filtering in the facade, that would imply
diff --git a/pw_log/public/pw_log/levels.h b/pw_log/public/pw_log/levels.h
index c4670f7..6886b6b 100644
--- a/pw_log/public/pw_log/levels.h
+++ b/pw_log/public/pw_log/levels.h
@@ -13,13 +13,17 @@
 // the License.
 #pragma once
 
-// Standard log levels. These are compatible with the log levels from Python's
-// logging library, but this could be customized if desired by the backend.
+// Standard log levels. Values are limited to 3 bits, to fit within the protobuf
+// definition of LogEntry's line_level in pw_log_rpc. These levels correspond
+// with the log levels from Python's logging library, but have different values.
 //
 // clang-format off
-#define PW_LOG_LEVEL_DEBUG    10
-#define PW_LOG_LEVEL_INFO     20
-#define PW_LOG_LEVEL_WARN     30
-#define PW_LOG_LEVEL_ERROR    40
-#define PW_LOG_LEVEL_CRITICAL 50
+#define PW_LOG_LEVEL_DEBUG    1
+#define PW_LOG_LEVEL_INFO     2
+#define PW_LOG_LEVEL_WARN     3
+#define PW_LOG_LEVEL_ERROR    4
+#define PW_LOG_LEVEL_CRITICAL 5
+
+#define PW_LOG_LEVEL_BITMASK  7  // 0b111
+#define PW_LOG_LEVEL_BITWIDTH 3
 // clang-format on
diff --git a/pw_log/public/pw_log/log.h b/pw_log/public/pw_log/log.h
index 0fc5908..16a4679 100644
--- a/pw_log/public/pw_log/log.h
+++ b/pw_log/public/pw_log/log.h
@@ -11,21 +11,20 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
-//==============================================================================
-//
+
 // This file describes Pigweed's public user-facing logging API.
 //
 // THIS PUBLIC API IS NOT STABLE OR COMPLETE!
 //
 // Key functionality is still missing:
 //
-// - API for controlling verbosity at compile time
 // - API for controlling verbosity at run time
 // - API for querying if logging is enabled for the given level or flags
 //
 #pragma once
 
 #include "pw_log/levels.h"
+#include "pw_log/options.h"
 
 // log_backend.h must ultimately resolve to a header that implements the macros
 // required by the logging facade, as described below.
@@ -35,6 +34,10 @@
 //   PW_LOG_MODULE_NAME
 //     - The module name the backend should use
 //
+//   PW_LOG_LEVEL
+//     - General log level setting. By default, logs below this level are
+//       excluded from the build.
+//
 // Outputs: Macros log_backend.h is expected to provide:
 //
 //   PW_LOG(level, flags, fmt, ...)
@@ -58,36 +61,6 @@
 //
 #include "pw_log_backend/log_backend.h"
 
-// Default: Module name
-#ifndef PW_LOG_MODULE_NAME
-#define PW_LOG_MODULE_NAME ""
-#endif  // PW_LOG_MODULE_NAME
-
-// Default: Flags
-// For log statements like LOG_INFO that don't have an explicit argument, this
-// is used for the flags value.
-#ifndef PW_LOG_DEFAULT_FLAGS
-#define PW_LOG_DEFAULT_FLAGS 0
-#endif  // PW_LOG_DEFAULT_FLAGS
-
-// Default: Log level filtering
-//
-// All log statements have a level, and this define is the default filtering.
-// This is compile-time filtering if the level is a constant.
-//
-// TODO(pwbug/17): Convert this to the config system when available.
-#ifndef PW_LOG_LEVEL
-#define PW_LOG_LEVEL PW_LOG_LEVEL_DEBUG
-#endif  // PW_LOG_LEVEL
-
-// Default: Log enabled expression
-//
-// This expression determines whether or not the statement is enabled and
-// should be passed to the backend.
-#ifndef PW_LOG_ENABLE_IF
-#define PW_LOG_ENABLE_IF(level, flags) ((level) >= PW_LOG_LEVEL)
-#endif  // PW_LOG_ENABLE_IF
-
 #ifndef PW_LOG
 #define PW_LOG(level, flags, message, ...)               \
   do {                                                   \
@@ -125,6 +98,24 @@
   PW_LOG(PW_LOG_LEVEL_CRITICAL, PW_LOG_DEFAULT_FLAGS, message, __VA_ARGS__)
 #endif  // PW_LOG_CRITICAL
 
+// Default: Number of bits available for the log level
+//
+// All log statements have a level, and this define is the number of bits
+// available for the level. Some backends restrict this for better efficiency.
+// By default, pick a restricted but large enough value to work for most cases.
+#ifndef PW_LOG_LEVEL_BITS
+#define PW_LOG_LEVEL_BITS 6
+#endif  // PW_LOG_LEVEL_BITS
+
+// Default: Number of bits available for the log flags
+//
+// All log statements have a flags field, and this define is the number of bits
+// available for the flags. Some backends restrict this for better efficiency.
+// By default, pick a restricted but large enough value to work for most cases.
+#ifndef PW_LOG_FLAG_BITS
+#define PW_LOG_FLAG_BITS 10
+#endif  // PW_LOG_FLAG_BITS
+
 // Define short, usable names if requested.
 // TODO(pwbug/17): Convert this to the config system when available.
 #ifndef PW_LOG_USE_SHORT_NAMES
diff --git a/pw_log/public/pw_log/options.h b/pw_log/public/pw_log/options.h
new file mode 100644
index 0000000..3bd9c85
--- /dev/null
+++ b/pw_log/public/pw_log/options.h
@@ -0,0 +1,69 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// This file defines macros used to control the behavior of pw_log statements.
+// Files that use pw_log may define these macros BEFORE any headers are
+// #included to customize pw_log.
+//
+// For example, the following sets the log module name to "Foobar" and the
+// minimum log level to WARN:
+//
+//   #define PW_LOG_MODULE_NAME "Foobar"
+//   #define PW_LOG_LEVEL PW_LOG_LEVEL_WARN
+//
+//   #include "foo/bar.h"
+//   #include "pw_log/log.h"
+//
+// Users of pw_log should not include this header directly; include
+// "pw_log/log.h" instead. This header is separate from "pw_log/log.h" to avoid
+// circular dependencies when implementing the pw_log facade.
+#pragma once
+
+// Default: Module name
+//
+// An empty string is used for the module name if it is not set. The
+// PW_LOG_MODULE_NAME_DEFINED macro is set to 1 or 0 to allow pw_log backends to
+// behave differently if the module name is defined. For example, a backend
+// might prefix the format string with PW_LOG_MODULE_NAME ": ", but only if the
+// module name is provided.
+#ifdef PW_LOG_MODULE_NAME
+#define PW_LOG_MODULE_NAME_DEFINED 1
+#else
+#define PW_LOG_MODULE_NAME ""
+#define PW_LOG_MODULE_NAME_DEFINED 0
+#endif  // PW_LOG_MODULE_NAME
+
+// Default: Flags
+//
+// For log statements like LOG_INFO that don't have an explicit argument, this
+// is used for the flags value.
+#ifndef PW_LOG_DEFAULT_FLAGS
+#define PW_LOG_DEFAULT_FLAGS 0
+#endif  // PW_LOG_DEFAULT_FLAGS
+
+// Default: Log level filtering
+//
+// All log statements have a level, and this define is the default filtering.
+// This is compile-time filtering if the level is a constant.
+#ifndef PW_LOG_LEVEL
+#define PW_LOG_LEVEL PW_LOG_LEVEL_DEBUG
+#endif  // PW_LOG_LEVEL
+
+// Default: Log enabled expression
+//
+// This expression determines whether or not the statement is enabled and
+// should be passed to the backend.
+#ifndef PW_LOG_ENABLE_IF
+#define PW_LOG_ENABLE_IF(level, flags) ((level) >= PW_LOG_LEVEL)
+#endif  // PW_LOG_ENABLE_IF
diff --git a/pw_log_basic/BUILD b/pw_log_basic/BUILD
index 4b877e7..2c4598d 100644
--- a/pw_log_basic/BUILD
+++ b/pw_log_basic/BUILD
@@ -40,6 +40,7 @@
     name = "pw_log_basic",
     srcs = [
         "log_basic.cc",
+        "pw_log_basic_private/config.h",
     ],
     deps = [
         ":headers",
diff --git a/pw_log_basic/BUILD.gn b/pw_log_basic/BUILD.gn
index 84fad72..5269f09 100644
--- a/pw_log_basic/BUILD.gn
+++ b/pw_log_basic/BUILD.gn
@@ -12,11 +12,19 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
+import("$dir_pw_build/module_config.gni")
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
+
+declare_args() {
+  # The build target that overrides the default configuration options for this
+  # module. This should point to a source set that provides defines through a
+  # public config (which may -include a file or add defines directly).
+  pw_log_basic_CONFIG = pw_build_DEFAULT_MODULE_CONFIG
+}
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -41,6 +49,7 @@
     "$dir_pw_log:facade",
     dir_pw_string,
     dir_pw_sys_io,
+    pw_log_basic_CONFIG,
   ]
   public = [ "public/pw_log_basic/log_basic.h" ]
 
@@ -51,7 +60,10 @@
     defines += [ "PW_EMOJI=1" ]
   }
 
-  sources = [ "log_basic.cc" ]
+  sources = [
+    "log_basic.cc",
+    "pw_log_basic_private/config.h",
+  ]
 }
 
 pw_doc_group("docs") {
diff --git a/pw_log_basic/CMakeLists.txt b/pw_log_basic/CMakeLists.txt
index 5c66fe0..3cf0f74 100644
--- a/pw_log_basic/CMakeLists.txt
+++ b/pw_log_basic/CMakeLists.txt
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_auto_add_simple_module(pw_log_basic
   IMPLEMENTS_FACADE
     pw_log
@@ -19,9 +21,3 @@
     pw_string
     pw_sys_io
 )
-
-target_include_directories(pw_log_basic PUBLIC public_overrides)
-
-# TODO(hepler): Declare pw_log_basic as the pw_log backend for now.
-add_library(pw_log.backend INTERFACE)
-target_link_libraries(pw_log.backend INTERFACE pw_log_basic)
diff --git a/pw_log_basic/docs.rst b/pw_log_basic/docs.rst
index 2b8ca41..985ec50 100644
--- a/pw_log_basic/docs.rst
+++ b/pw_log_basic/docs.rst
@@ -1,17 +1,22 @@
-.. _chapter-pw-log-basic:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_log_basic:
 
 ------------
 pw_log_basic
 ------------
-``pw_log_basic`` is a ``pw_log backend`` that sends logs over ``pw_sys_io``. The
-destination of ``pw_sys_io`` depends on the ``pw_sys_io`` backend in use. This
-is controlled by the ``dir_pw_sys_io_backend`` variable in a target's
-``target_config.gni``.
-information.
+``pw_log_basic`` is a ``pw_log backend`` that sends logs over ``pw_sys_io`` by
+default. The destination of ``pw_sys_io`` depends on the ``pw_sys_io`` backend
+in use. This is controlled by the ``dir_pw_sys_io_backend`` variable in a
+target's ``target_config.gni``.
+
+The log output may be changed from ``pw_sys_io`` to an arbitrary function by
+calling ``pw::log_basic::SetOutput``.
+
+.. cpp:namespace:: pw::log_basic
+
+.. cpp:function:: void SetOutput(void (\*log_output)(std::string_view))
+
+  Set the log output function, which defaults ``pw_sys_io::WriteLine``. This
+  function is called with each formatted log message.
 
 This module employs an internal buffer for formatting log strings, and currently
 has a fixed size of 150 bytes. Any final log statements that are larger than
diff --git a/pw_log_basic/log_basic.cc b/pw_log_basic/log_basic.cc
index 329f848..e480614 100644
--- a/pw_log_basic/log_basic.cc
+++ b/pw_log_basic/log_basic.cc
@@ -19,6 +19,7 @@
 #include <cstring>
 
 #include "pw_log/levels.h"
+#include "pw_log_basic_private/config.h"
 #include "pw_string/string_builder.h"
 #include "pw_sys_io/sys_io.h"
 
@@ -37,16 +38,7 @@
 #define RESET     "\033[0m"
 // clang-format on
 
-#ifndef PW_EMOJI
-#define PW_EMOJI 0
-#endif  // PW_EMOJI
-
-// TODO(pwbug/17): Expose these through the config system.
-#define PW_LOG_SHOW_FILENAME 0
-#define PW_LOG_SHOW_FUNCTION 0
-#define PW_LOG_SHOW_FLAG 0
-#define PW_LOG_SHOW_MODULE 0
-
+namespace pw::log_basic {
 namespace {
 
 const char* LogLevelToLogLevelName(int level) {
@@ -91,6 +83,10 @@
 }
 #endif  // PW_LOG_SHOW_FILENAME
 
+void (*write_log)(std::string_view) = [](std::string_view log) {
+  sys_io::WriteLine(log);
+};
+
 }  // namespace
 
 // This is a fully loaded, inefficient-at-the-callsite, log implementation.
@@ -151,5 +147,11 @@
   va_end(args);
 
   // All done; flush the log.
-  pw::sys_io::WriteLine(buffer.view());
+  write_log(buffer);
 }
+
+void SetOutput(void (*log_output)(std::string_view log)) {
+  write_log = log_output;
+}
+
+}  // namespace pw::log_basic
diff --git a/pw_log_basic/public/pw_log_basic/log_basic.h b/pw_log_basic/public/pw_log_basic/log_basic.h
index 6ebb4e1..d16ef0b 100644
--- a/pw_log_basic/public/pw_log_basic/log_basic.h
+++ b/pw_log_basic/public/pw_log_basic/log_basic.h
@@ -13,8 +13,8 @@
 // the License.
 #pragma once
 
+#include "pw_preprocessor/arguments.h"
 #include "pw_preprocessor/compiler.h"
-#include "pw_preprocessor/macro_arg_count.h"
 #include "pw_preprocessor/util.h"
 
 PW_EXTERN_C_START
@@ -49,3 +49,17 @@
            __func__,                              \
            message PW_COMMA_ARGS(__VA_ARGS__));   \
   } while (0)
+
+#ifdef __cplusplus
+
+#include <string_view>
+
+namespace pw::log_basic {
+
+// Sets the function to use to send log messages. Defaults to
+// pw::sys_io::WriteLine.
+void SetOutput(void (*log_output)(std::string_view log));
+
+}  // namespace pw::log_basic
+
+#endif  // __cplusplus
diff --git a/pw_log_basic/pw_log_basic_private/config.h b/pw_log_basic/pw_log_basic_private/config.h
new file mode 100644
index 0000000..6bb9438
--- /dev/null
+++ b/pw_log_basic/pw_log_basic_private/config.h
@@ -0,0 +1,53 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+// Replaces log levels and flag presence indicator with emoji.
+#ifndef PW_EMOJI
+#define PW_EMOJI 0
+#endif  // PW_EMOJI
+
+// With all the following flags enabled, log messages look like this:
+//
+// clang-format off
+//  my_file.cc                    :  42 |                Foo | TST | INF  Hello, world!
+//  buggy.cc                      :2145 |    ReadBuggyBuffer |     * ERR  No, BAD!
+//
+// With emoji:
+//  my_file.cc                    :  42 |                Foo | TST    ℹ️  Hello, world!
+//  buggy.cc                      :2145 |    ReadBuggyBuffer |     🚩 ❌  No, BAD!
+// clang-format on
+
+// Prints the name of the file that emitted the log message.
+#ifndef PW_LOG_SHOW_FILENAME
+#define PW_LOG_SHOW_FILENAME 0
+#endif  // PW_LOG_SHOW_FILENAME
+
+// Prints the name of the function that emitted the log message.
+#ifndef PW_LOG_SHOW_FUNCTION
+#define PW_LOG_SHOW_FUNCTION 0
+#endif  // PW_LOG_SHOW_FUNCTION
+
+// Prints an indicator for whether or not there are any active flags for a given
+// log statement.
+#ifndef PW_LOG_SHOW_FLAG
+#define PW_LOG_SHOW_FLAG 0
+#endif  // PW_LOG_SHOW_FLAG
+
+// Prints the module name associated with a log statement. This is provided by
+// defining PW_LOG_MODULE_NAME inside module source files, it is not implied by
+// module structure or file path magic.
+#ifndef PW_LOG_SHOW_MODULE
+#define PW_LOG_SHOW_MODULE 0
+#endif  // PW_LOG_SHOW_MODULE
diff --git a/pw_log_null/BUILD b/pw_log_null/BUILD
new file mode 100644
index 0000000..f87a328
--- /dev/null
+++ b/pw_log_null/BUILD
@@ -0,0 +1,59 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_library",
+    "pw_cc_test",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])  # Apache License 2.0
+
+pw_cc_library(
+    name = "headers",
+    hdrs = [
+        "public/pw_log_null/log_null.h",
+        "public_overrides/pw_log_backend/log_backend.h",
+    ],
+    includes = [
+        "public",
+        "public_overrides",
+    ],
+    deps = [
+        "//pw_preprocessor",
+    ],
+)
+
+pw_cc_library(
+    name = "pw_log_null",
+    srcs = [
+        "log_null.cc",
+    ],
+    deps = [
+        "//pw_log:facade",
+        "//pw_log_null:headers",
+        "//pw_string",
+        "//pw_sys_io",
+    ],
+)
+
+pw_cc_library(
+    name = "test",
+    srcs = [
+        "test.cc",
+        "test_c.c",
+    ],
+)
diff --git a/pw_rpc/test_impl/BUILD.gn b/pw_log_null/BUILD.gn
similarity index 62%
copy from pw_rpc/test_impl/BUILD.gn
copy to pw_log_null/BUILD.gn
index 68f88c1..48a25f8 100644
--- a/pw_rpc/test_impl/BUILD.gn
+++ b/pw_log_null/BUILD.gn
@@ -12,19 +12,39 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("config") {
-  include_dirs = [ "public_overrides" ]
+  include_dirs = [
+    "public",
+    "public_overrides",
+  ]
   visibility = [ ":*" ]
 }
 
-pw_source_set("test_impl") {
+pw_source_set("pw_log_null") {
   public_configs = [ ":config" ]
-  public = [ "public_overrides/pw_rpc/internal/method.h" ]
-  public_deps = [ "../:server_library_deps" ]
-  visibility = [ "..:*" ]
+  public = [ "public_overrides/pw_log_backend/log_backend.h" ]
+  sources = [ "public/pw_log_null/log_null.h" ]
+  friend = [ ":test" ]
+}
+
+pw_doc_group("docs") {
+  sources = [ "docs.rst" ]
+}
+
+pw_test_group("tests") {
+  tests = [ ":test" ]
+}
+
+pw_test("test") {
+  sources = [
+    "test.cc",
+    "test_c.c",
+  ]
+  deps = [ ":pw_log_null" ]
 }
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_log_null/CMakeLists.txt
similarity index 78%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_log_null/CMakeLists.txt
index 3c3be32..1f74b8d 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_log_null/CMakeLists.txt
@@ -12,8 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
+pw_auto_add_simple_module(pw_log_null
+  IMPLEMENTS_FACADE
+    pw_log
+  PUBLIC_DEPS
+    pw_preprocessor
+)
diff --git a/pw_log_null/docs.rst b/pw_log_null/docs.rst
new file mode 100644
index 0000000..fa7466f
--- /dev/null
+++ b/pw_log_null/docs.rst
@@ -0,0 +1,19 @@
+.. _module-pw_log_null:
+
+-----------
+pw_log_null
+-----------
+``pw_log_null`` is a ``pw_log backend`` that ignores all ``pw_log`` statements.
+The backend implements ``PW_LOG`` with an empty inline function. Using an empty
+function ensures that the arguments are evaluated and their types are correct.
+Since the function is inline in the header, the compiler will optimize out the
+function call.
+
+This backend can be used to completely disable ``pw_log``, which may be helpful
+in certain development situations (e.g. to avoid circular dependencies).
+
+.. tip::
+  If you are concerned about the resource demands of logging, try tokenizing
+  logs with :ref:`module-pw_tokenizer` and :ref:`module-pw_log_tokenized`
+  instead of disabling logs completely. Tokenized logs provide exactly same
+  information as plain text logs but use dramatically less resources.
diff --git a/pw_log_null/public/pw_log_null/log_null.h b/pw_log_null/public/pw_log_null/log_null.h
new file mode 100644
index 0000000..054d6d0
--- /dev/null
+++ b/pw_log_null/public/pw_log_null/log_null.h
@@ -0,0 +1,56 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_preprocessor/arguments.h"
+#include "pw_preprocessor/compiler.h"
+#include "pw_preprocessor/util.h"
+
+PW_EXTERN_C_START
+
+// Empty function for compiling out log statements. Since the function is empty
+// and inline, it should be completely compiled out. This function accomplishes
+// following:
+//
+//   - Uses the arguments to PW_LOG, which avoids "unused variable" warnings.
+//   - Executes expressions passed to PW_LOG, so that the behavior is consistent
+//     between this null backend and a backend that actually logs.
+//   - Checks the printf-style format string arguments to PW_LOG.
+//
+// For compatibility with C and the printf compiler attribute, the declaration
+// and definition must be separate and both marked inline.
+static inline void pw_log_Ignored(int level,
+                                  unsigned int flags,
+                                  const char* module_name,
+                                  const char* message,
+                                  ...) PW_PRINTF_FORMAT(4, 5);
+
+static inline void pw_log_Ignored(int level,
+                                  unsigned int flags,
+                                  const char* module_name,
+                                  const char* message,
+                                  ...) {
+  PW_UNUSED(level);
+  PW_UNUSED(flags);
+  PW_UNUSED(module_name);
+  PW_UNUSED(message);
+}
+
+PW_EXTERN_C_END
+
+#define PW_LOG(level, flags, message, ...) \
+  pw_log_Ignored((level),                  \
+                 (flags),                  \
+                 PW_LOG_MODULE_NAME,       \
+                 message PW_COMMA_ARGS(__VA_ARGS__))
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_log_null/public_overrides/pw_log_backend/log_backend.h
similarity index 88%
rename from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
rename to pw_log_null/public_overrides/pw_log_backend/log_backend.h
index 1670b7d..3822a51 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_log_null/public_overrides/pw_log_backend/log_backend.h
@@ -11,7 +11,6 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+#pragma once
 
-#include "pw_boot_armv7m/boot.h"
-
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+#include "pw_log_null/log_null.h"
diff --git a/pw_log_null/test.cc b/pw_log_null/test.cc
new file mode 100644
index 0000000..6c656be
--- /dev/null
+++ b/pw_log_null/test.cc
@@ -0,0 +1,49 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "gtest/gtest.h"
+#include "pw_log_null/log_null.h"
+
+#define PW_LOG_MODULE_NAME "this test!"
+
+extern "C" bool CTest();
+
+namespace {
+
+TEST(LogNull, NoArguments) {
+  PW_LOG(1, 2, "3");
+  PW_LOG(1, 2, "whoa");
+}
+
+TEST(LogNull, WithArguments) {
+  PW_LOG(1, 2, "%s", "hello");
+  PW_LOG(1, 2, "%d + %s == %p", 1, "two", nullptr);
+}
+
+TEST(LogNull, ExpressionsAreEvaluated) {
+  static int global;
+
+  global = 0;
+  bool local = true;
+
+  PW_LOG(1, 2, "You are number%s %d!", (local = false) ? "" : " not", []() {
+    global = 1;
+    return global;
+  }());
+
+  EXPECT_EQ(1, global);
+  EXPECT_FALSE(local);
+}
+
+}  // namespace
diff --git a/pw_log_null/test_c.c b/pw_log_null/test_c.c
new file mode 100644
index 0000000..4143f63
--- /dev/null
+++ b/pw_log_null/test_c.c
@@ -0,0 +1,42 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <stdbool.h>
+#include <stddef.h>
+
+#include "pw_log_null/log_null.h"
+
+#define PW_LOG_MODULE_NAME "c test!"
+
+static int global;
+
+static int IncrementGlobal(void) { return ++global; }
+
+bool CTest() {
+  PW_LOG(1, 2, "3");
+  PW_LOG(1, 2, "whoa");
+  PW_LOG(1, 2, "%s", "hello");
+  PW_LOG(1, 2, "%d + %s == %p", 1, "two", NULL);
+
+  global = 0;
+  bool local = true;
+
+  PW_LOG(1,
+         2,
+         "You are number%s %d!",
+         (local = false) ? "" : " not",
+         IncrementGlobal());
+
+  return global == 1 && !local;
+}
diff --git a/pw_log_rpc/BUILD b/pw_log_rpc/BUILD
new file mode 100644
index 0000000..17c9649
--- /dev/null
+++ b/pw_log_rpc/BUILD
@@ -0,0 +1,72 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_library",
+    "pw_cc_test",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])  # Apache License 2.0
+
+pw_cc_library(
+    name = "pw_logs",
+    srcs = [ "logs_rpc.cc" ],
+    includes = [ "public" ],
+    deps = [
+        "//pw_bytes",
+        "//pw_result",
+        "//pw_ring_buffer",
+        "//pw_status",
+    ],
+    hdrs = [ "public/pw_log_rpc/logs_rpc.h" ]
+)
+
+pw_cc_library(
+    name = "pw_log_queue",
+    srcs = [ "log_queue.cc" ],
+    includes = [ "public" ],
+    deps = [
+        "//pw_bytes",
+        "//pw_log",
+        "//pw_result",
+        "//pw_ring_buffer",
+        "//pw_status",
+    ],
+    hdrs = [ "public/pw_log_rpc/log_queue.h" ]
+)
+
+pw_cc_test(
+    name = "log_queue_test",
+    srcs = [
+        "log_queue_test.cc",
+    ],
+    deps = [
+        "//pw_preprocessor",
+        "//pw_unit_test",
+    ],
+)
+
+pw_cc_test(
+    name = "logs_rpc_test",
+    srcs = [
+        "logs_rpc_test.cc",
+    ],
+    deps = [
+        "//pw_preprocessor",
+        "//pw_unit_test",
+    ],
+)
diff --git a/pw_log_rpc/BUILD.gn b/pw_log_rpc/BUILD.gn
new file mode 100644
index 0000000..2f308a0
--- /dev/null
+++ b/pw_log_rpc/BUILD.gn
@@ -0,0 +1,82 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_protobuf_compiler/proto.gni")
+import("$dir_pw_unit_test/test.gni")
+
+config("default_config") {
+  include_dirs = [ "public" ]
+  visibility = [ ":*" ]
+}
+
+pw_source_set("logs") {
+  public_configs = [ ":default_config" ]
+  public = [ "public/pw_log_rpc/logs_rpc.h" ]
+  sources = [ "logs_rpc.cc" ]
+  public_deps = [
+    ":log_queue",
+    ":protos.pwpb",
+    ":protos.raw_rpc",
+  ]
+}
+
+pw_source_set("log_queue") {
+  public_configs = [ ":default_config" ]
+  public = [ "public/pw_log_rpc/log_queue.h" ]
+  public_deps = [
+    "$dir_pw_bytes",
+    "$dir_pw_log",
+    "$dir_pw_result",
+    "$dir_pw_ring_buffer",
+    "$dir_pw_status",
+  ]
+  sources = [ "log_queue.cc" ]
+  deps = [ ":protos.pwpb" ]
+}
+
+pw_test("logs_rpc_test") {
+  deps = [
+    ":logs",
+    "$dir_pw_rpc/raw:test_method_context",
+  ]
+  sources = [ "logs_rpc_test.cc" ]
+}
+
+pw_proto_library("protos") {
+  sources = [ "pw_log_proto/log.proto" ]
+}
+
+pw_doc_group("docs") {
+  sources = [ "docs.rst" ]
+}
+
+pw_test("log_queue_test") {
+  sources = [ "log_queue_test.cc" ]
+  deps = [
+    ":log_queue",
+    ":protos.pwpb",
+    dir_pw_protobuf,
+  ]
+}
+
+pw_test_group("tests") {
+  tests = [
+    ":log_queue_test",
+    ":logs_rpc_test",
+  ]
+}
diff --git a/pw_log_rpc/docs.rst b/pw_log_rpc/docs.rst
new file mode 100644
index 0000000..ff8b466
--- /dev/null
+++ b/pw_log_rpc/docs.rst
@@ -0,0 +1,7 @@
+.. _module-pw_log_rpc:
+
+----------
+pw_log_rpc
+----------
+This is a RPC-based logging backend for Pigweed. It is not ready for use, and
+is under construction.
diff --git a/pw_log_rpc/log_queue.cc b/pw_log_rpc/log_queue.cc
new file mode 100644
index 0000000..a17cedf
--- /dev/null
+++ b/pw_log_rpc/log_queue.cc
@@ -0,0 +1,122 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_log_rpc/log_queue.h"
+
+#include "pw_log/levels.h"
+#include "pw_log_proto/log.pwpb.h"
+#include "pw_protobuf/wire_format.h"
+#include "pw_status/try.h"
+
+namespace pw::log_rpc {
+namespace {
+
+using pw::protobuf::WireType;
+constexpr std::byte kLogKey = static_cast<std::byte>(pw::protobuf::MakeKey(
+    static_cast<uint32_t>(pw::log::LogEntries::Fields::ENTRIES),
+    WireType::kDelimited));
+
+}  // namespace
+
+Status LogQueue::PushTokenizedMessage(ConstByteSpan message,
+                                      uint32_t flags,
+                                      uint32_t level,
+                                      uint32_t line,
+                                      uint32_t thread,
+                                      int64_t timestamp) {
+  pw::protobuf::NestedEncoder nested_encoder(encode_buffer_);
+  pw::log::LogEntry::Encoder encoder(&nested_encoder);
+  Status status;
+
+  encoder.WriteMessageTokenized(message);
+  encoder.WriteLineLevel(
+      (level & PW_LOG_LEVEL_BITMASK) |
+      ((line << PW_LOG_LEVEL_BITWIDTH) & ~PW_LOG_LEVEL_BITMASK));
+  encoder.WriteFlags(flags);
+  encoder.WriteThreadTokenized(thread);
+
+  // TODO(prashanthsw): Add support for delta encoding of the timestamp.
+  encoder.WriteTimestamp(timestamp);
+
+  if (dropped_entries_ > 0) {
+    encoder.WriteDropped(dropped_entries_);
+  }
+
+  ConstByteSpan log_entry;
+  status = nested_encoder.Encode(&log_entry);
+  if (!status.ok() || log_entry.size_bytes() > max_log_entry_size_) {
+    // If an encoding failure occurs or the constructed log entry is larger
+    // than the configured max size, map the error to INTERNAL. If the
+    // underlying allocation of this encode buffer or the nested encoding
+    // sequencing are at fault, they are not the caller's responsibility. If
+    // the log entry is larger than the max allowed size, the log is dropped
+    // intentionally, and it is expected that the caller accepts this
+    // possibility.
+    status = PW_STATUS_INTERNAL;
+  } else {
+    // Try to push back the encoded log entry.
+    status = ring_buffer_.TryPushBack(log_entry, std::byte(kLogKey));
+  }
+
+  if (!status.ok()) {
+    // The ring buffer may hit the RESOURCE_EXHAUSTED state, causing us
+    // to drop packets. However, this check captures all failures from
+    // Encode and TryPushBack, as any failure here causes packet drop.
+    dropped_entries_++;
+    latest_dropped_timestamp_ = timestamp;
+    return status;
+  }
+
+  dropped_entries_ = 0;
+  return Status::Ok();
+}
+
+Result<LogEntries> LogQueue::Pop(LogEntriesBuffer entry_buffer) {
+  size_t ring_buffer_entry_size = 0;
+  PW_TRY(pop_status_for_test_);
+  // The caller must provide a buffer that is at minimum max_log_entry_size, to
+  // ensure that the front entry of the ring buffer can be popped.
+  PW_DCHECK_UINT_GE(entry_buffer.size_bytes(), max_log_entry_size_);
+  PW_TRY(ring_buffer_.PeekFrontWithPreamble(entry_buffer,
+                                            &ring_buffer_entry_size));
+  PW_DCHECK_OK(ring_buffer_.PopFront());
+
+  return LogEntries{
+      .entries = ConstByteSpan(entry_buffer.first(ring_buffer_entry_size)),
+      .entry_count = 1};
+}
+
+LogEntries LogQueue::PopMultiple(LogEntriesBuffer entries_buffer) {
+  size_t offset = 0;
+  size_t entry_count = 0;
+
+  // The caller must provide a buffer that is at minimum max_log_entry_size, to
+  // ensure that the front entry of the ring buffer can be popped.
+  PW_DCHECK_UINT_GE(entries_buffer.size_bytes(), max_log_entry_size_);
+
+  while (ring_buffer_.EntryCount() > 0 &&
+         (entries_buffer.size_bytes() - offset) > max_log_entry_size_) {
+    const Result<LogEntries> result = Pop(entries_buffer.subspan(offset));
+    if (!result.ok()) {
+      break;
+    }
+    offset += result.value().entries.size_bytes();
+    entry_count += result.value().entry_count;
+  }
+
+  return LogEntries{.entries = ConstByteSpan(entries_buffer.first(offset)),
+                    .entry_count = entry_count};
+}
+
+}  // namespace pw::log_rpc
diff --git a/pw_log_rpc/log_queue_test.cc b/pw_log_rpc/log_queue_test.cc
new file mode 100644
index 0000000..f6a499a
--- /dev/null
+++ b/pw_log_rpc/log_queue_test.cc
@@ -0,0 +1,235 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_log_rpc/log_queue.h"
+
+#include "gtest/gtest.h"
+#include "pw_log/levels.h"
+#include "pw_log_proto/log.pwpb.h"
+#include "pw_protobuf/decoder.h"
+
+namespace pw::log_rpc {
+namespace {
+
+constexpr size_t kEncodeBufferSize = 512;
+
+constexpr const char kTokenizedMessage[] = "msg_token";
+constexpr uint32_t kFlags = 0xF;
+constexpr uint32_t kLevel = 0b010;
+constexpr uint32_t kLine = 0b101011000;
+constexpr uint32_t kTokenizedThread = 0xF;
+constexpr int64_t kTimestamp = 0;
+
+constexpr size_t kLogBufferSize = kEncodeBufferSize * 3;
+
+void VerifyLogEntry(pw::protobuf::Decoder& log_decoder,
+                    const char* expected_tokenized_message,
+                    const uint32_t expected_flags,
+                    const uint32_t expected_level,
+                    const uint32_t expected_line,
+                    const uint32_t expected_tokenized_thread,
+                    const int64_t expected_timestamp) {
+  ConstByteSpan log_entry_message;
+  EXPECT_TRUE(log_decoder.Next().ok());  // preamble
+  EXPECT_EQ(1U, log_decoder.FieldNumber());
+  EXPECT_TRUE(log_decoder.ReadBytes(&log_entry_message).ok());
+
+  pw::protobuf::Decoder entry_decoder(log_entry_message);
+  ConstByteSpan tokenized_message;
+  EXPECT_TRUE(entry_decoder.Next().ok());  // tokenized_message
+  EXPECT_EQ(1U, entry_decoder.FieldNumber());
+  EXPECT_TRUE(entry_decoder.ReadBytes(&tokenized_message).ok());
+  EXPECT_TRUE(std::memcmp(tokenized_message.begin(),
+                          (const void*)expected_tokenized_message,
+                          tokenized_message.size()) == 0);
+
+  uint32_t line_level;
+  EXPECT_TRUE(entry_decoder.Next().ok());  // line_level
+  EXPECT_EQ(2U, entry_decoder.FieldNumber());
+  EXPECT_TRUE(entry_decoder.ReadUint32(&line_level).ok());
+  EXPECT_EQ(expected_level, line_level & PW_LOG_LEVEL_BITMASK);
+  EXPECT_EQ(expected_line,
+            (line_level & ~PW_LOG_LEVEL_BITMASK) >> PW_LOG_LEVEL_BITWIDTH);
+
+  uint32_t flags;
+  EXPECT_TRUE(entry_decoder.Next().ok());  // flags
+  EXPECT_EQ(3U, entry_decoder.FieldNumber());
+  EXPECT_TRUE(entry_decoder.ReadUint32(&flags).ok());
+  EXPECT_EQ(expected_flags, flags);
+
+  uint32_t tokenized_thread;
+  EXPECT_TRUE(entry_decoder.Next().ok());  // tokenized_thread
+  EXPECT_EQ(4U, entry_decoder.FieldNumber());
+  EXPECT_TRUE(entry_decoder.ReadUint32(&tokenized_thread).ok());
+  EXPECT_EQ(expected_tokenized_thread, tokenized_thread);
+
+  int64_t timestamp;
+  EXPECT_TRUE(entry_decoder.Next().ok());  // timestamp
+  EXPECT_EQ(5U, entry_decoder.FieldNumber());
+  EXPECT_TRUE(entry_decoder.ReadInt64(&timestamp).ok());
+  EXPECT_EQ(expected_timestamp, timestamp);
+}
+
+}  // namespace
+
+TEST(LogQueue, SinglePushPopTokenizedMessage) {
+  std::byte log_buffer[kLogBufferSize];
+  LogQueueWithEncodeBuffer<kEncodeBufferSize> log_queue(log_buffer);
+
+  EXPECT_EQ(Status::OK,
+            log_queue.PushTokenizedMessage(
+                std::as_bytes(std::span(kTokenizedMessage)),
+                kFlags,
+                kLevel,
+                kLine,
+                kTokenizedThread,
+                kTimestamp));
+
+  std::byte log_entry[kEncodeBufferSize];
+  Result<LogEntries> pop_result = log_queue.Pop(std::span(log_entry));
+  EXPECT_TRUE(pop_result.ok());
+
+  pw::protobuf::Decoder log_decoder(pop_result.value().entries);
+  EXPECT_EQ(pop_result.value().entry_count, 1U);
+  VerifyLogEntry(log_decoder,
+                 kTokenizedMessage,
+                 kFlags,
+                 kLevel,
+                 kLine,
+                 kTokenizedThread,
+                 kTimestamp);
+}
+
+TEST(LogQueue, MultiplePushPopTokenizedMessage) {
+  constexpr size_t kEntryCount = 3;
+
+  std::byte log_buffer[1024];
+  LogQueueWithEncodeBuffer<kEncodeBufferSize> log_queue(log_buffer);
+
+  for (size_t i = 0; i < kEntryCount; i++) {
+    EXPECT_EQ(Status::OK,
+              log_queue.PushTokenizedMessage(
+                  std::as_bytes(std::span(kTokenizedMessage)),
+                  kFlags,
+                  kLevel,
+                  kLine + (i << 3),
+                  kTokenizedThread,
+                  kTimestamp + i));
+  }
+
+  std::byte log_entry[kEncodeBufferSize];
+  for (size_t i = 0; i < kEntryCount; i++) {
+    Result<LogEntries> pop_result = log_queue.Pop(std::span(log_entry));
+    EXPECT_TRUE(pop_result.ok());
+
+    pw::protobuf::Decoder log_decoder(pop_result.value().entries);
+    EXPECT_EQ(pop_result.value().entry_count, 1U);
+    VerifyLogEntry(log_decoder,
+                   kTokenizedMessage,
+                   kFlags,
+                   kLevel,
+                   kLine + (i << 3),
+                   kTokenizedThread,
+                   kTimestamp + i);
+  }
+}
+
+TEST(LogQueue, PopMultiple) {
+  constexpr size_t kEntryCount = 3;
+
+  std::byte log_buffer[kLogBufferSize];
+  LogQueueWithEncodeBuffer<kEncodeBufferSize> log_queue(log_buffer);
+
+  for (size_t i = 0; i < kEntryCount; i++) {
+    EXPECT_EQ(Status::OK,
+              log_queue.PushTokenizedMessage(
+                  std::as_bytes(std::span(kTokenizedMessage)),
+                  kFlags,
+                  kLevel,
+                  kLine + (i << 3),
+                  kTokenizedThread,
+                  kTimestamp + i));
+  }
+
+  std::byte log_entries[kLogBufferSize];
+  Result<LogEntries> pop_result = log_queue.PopMultiple(log_entries);
+  EXPECT_TRUE(pop_result.ok());
+
+  pw::protobuf::Decoder log_decoder(pop_result.value().entries);
+  EXPECT_EQ(pop_result.value().entry_count, kEntryCount);
+  for (size_t i = 0; i < kEntryCount; i++) {
+    VerifyLogEntry(log_decoder,
+                   kTokenizedMessage,
+                   kFlags,
+                   kLevel,
+                   kLine + (i << 3),
+                   kTokenizedThread,
+                   kTimestamp + i);
+  }
+}
+
+TEST(LogQueue, TooSmallEncodeBuffer) {
+  constexpr size_t kSmallBuffer = 1;
+
+  std::byte log_buffer[kLogBufferSize];
+  LogQueueWithEncodeBuffer<kSmallBuffer> log_queue(log_buffer);
+  EXPECT_EQ(Status::INTERNAL,
+            log_queue.PushTokenizedMessage(
+                std::as_bytes(std::span(kTokenizedMessage)),
+                kFlags,
+                kLevel,
+                kLine,
+                kTokenizedThread,
+                kTimestamp));
+}
+
+TEST(LogQueue, TooSmallLogBuffer) {
+  constexpr size_t kSmallerThanPreamble = 1;
+  constexpr size_t kEntryCount = 100;
+
+  // Expect OUT_OF_RANGE when the buffer is smaller than a preamble.
+  std::byte log_buffer[kLogBufferSize];
+  LogQueueWithEncodeBuffer<kEncodeBufferSize> log_queue_small(
+      std::span(log_buffer, kSmallerThanPreamble));
+  EXPECT_EQ(Status::OUT_OF_RANGE,
+            log_queue_small.PushTokenizedMessage(
+                std::as_bytes(std::span(kTokenizedMessage)),
+                kFlags,
+                kLevel,
+                kLine,
+                kTokenizedThread,
+                kTimestamp));
+
+  // Expect RESOURCE_EXHAUSTED when there's not enough space for the chunk.
+  LogQueueWithEncodeBuffer<kEncodeBufferSize> log_queue_medium(log_buffer);
+  for (size_t i = 0; i < kEntryCount; i++) {
+    log_queue_medium.PushTokenizedMessage(
+        std::as_bytes(std::span(kTokenizedMessage)),
+        kFlags,
+        kLevel,
+        kLine,
+        kTokenizedThread,
+        kTimestamp);
+  }
+  EXPECT_EQ(Status::RESOURCE_EXHAUSTED,
+            log_queue_medium.PushTokenizedMessage(
+                std::as_bytes(std::span(kTokenizedMessage)),
+                kFlags,
+                kLevel,
+                kLine,
+                kTokenizedThread,
+                kTimestamp));
+}
+
+}  // namespace pw::log_rpc
diff --git a/pw_log_rpc/logs_rpc.cc b/pw_log_rpc/logs_rpc.cc
new file mode 100644
index 0000000..ada5470
--- /dev/null
+++ b/pw_log_rpc/logs_rpc.cc
@@ -0,0 +1,75 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_log_rpc/logs_rpc.h"
+
+#include "pw_log/log.h"
+#include "pw_log_proto/log.pwpb.h"
+#include "pw_status/try.h"
+
+namespace pw::log_rpc {
+namespace {
+
+Result<ConstByteSpan> GenerateDroppedEntryMessage(ByteSpan encode_buffer,
+                                                  size_t dropped_entries) {
+  pw::protobuf::NestedEncoder nested_encoder(encode_buffer);
+  pw::log::LogEntry::Encoder encoder(&nested_encoder);
+  encoder.WriteDropped(dropped_entries);
+  return nested_encoder.Encode();
+}
+
+}  // namespace
+
+void Logs::Get(ServerContext&, ConstByteSpan, rpc::RawServerWriter& writer) {
+  response_writer_ = std::move(writer);
+}
+
+Status Logs::Flush() {
+  // If the response writer was not initialized or has since been closed,
+  // ignore the flush operation.
+  if (!response_writer_.open()) {
+    return Status::Ok();
+  }
+
+  // If previous calls to flush resulted in dropped entries, generate a
+  // dropped entry message and write it before further log messages.
+  if (dropped_entries_ > 0) {
+    ByteSpan payload = response_writer_.PayloadBuffer();
+    Result dropped_log = GenerateDroppedEntryMessage(payload, dropped_entries_);
+    PW_TRY(dropped_log.status());
+    PW_TRY(response_writer_.Write(dropped_log.value()));
+    dropped_entries_ = 0;
+  }
+
+  // Write logs to the response writer. An important limitation of this
+  // implementation is that if this RPC call fails, the logs are lost -
+  // a subsequent call to the RPC will produce a drop count message.
+  ByteSpan payload = response_writer_.PayloadBuffer();
+  Result possible_logs = log_queue_.PopMultiple(payload);
+  PW_TRY(possible_logs.status());
+  if (possible_logs.value().entry_count == 0) {
+    return Status::Ok();
+  }
+
+  Status status = response_writer_.Write(possible_logs.value().entries);
+  if (!status.ok()) {
+    // On a failure to send logs, track the dropped entries.
+    dropped_entries_ = possible_logs.value().entry_count;
+    return status;
+  }
+
+  return Status::Ok();
+}
+
+}  // namespace pw::log_rpc
diff --git a/pw_log_rpc/logs_rpc_test.cc b/pw_log_rpc/logs_rpc_test.cc
new file mode 100644
index 0000000..50541bb
--- /dev/null
+++ b/pw_log_rpc/logs_rpc_test.cc
@@ -0,0 +1,132 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_log_rpc/logs_rpc.h"
+
+#include "gtest/gtest.h"
+#include "pw_log/log.h"
+#include "pw_rpc/raw_test_method_context.h"
+
+namespace pw::log_rpc {
+namespace {
+
+#define LOGS_METHOD_CONTEXT PW_RAW_TEST_METHOD_CONTEXT(Logs, Get)
+
+constexpr size_t kEncodeBufferSize = 128;
+constexpr size_t kLogBufferSize = 4096;
+
+class LogQueueTester : public LogQueueWithEncodeBuffer<kLogBufferSize> {
+ public:
+  LogQueueTester(ByteSpan log_queue)
+      : LogQueueWithEncodeBuffer<kLogBufferSize>(log_queue) {}
+
+  void SetPopStatus(Status error_status) {
+    pop_status_for_test_ = error_status;
+  }
+};
+
+class LogsService : public ::testing::Test {
+ public:
+  LogsService() : log_queue_(log_queue_buffer_) {}
+
+ protected:
+  void AddLogs(const size_t log_count = 1) {
+    constexpr char kTokenizedMessage[] = "message";
+    for (size_t i = 0; i < log_count; i++) {
+      EXPECT_EQ(
+          Status::Ok(),
+          log_queue_.PushTokenizedMessage(
+              std::as_bytes(std::span(kTokenizedMessage)), 0, 0, 0, 0, 0));
+    }
+  }
+
+  static Logs& GetLogs(LOGS_METHOD_CONTEXT& context) {
+    return (Logs&)(context.service());
+  }
+
+  std::array<std::byte, kEncodeBufferSize> log_queue_buffer_;
+  LogQueueWithEncodeBuffer<kLogBufferSize> log_queue_;
+};
+
+TEST_F(LogsService, Get) {
+  constexpr size_t kLogEntryCount = 3;
+  std::array<std::byte, 1> rpc_buffer;
+  LOGS_METHOD_CONTEXT context(log_queue_);
+
+  context.call(rpc_buffer);
+
+  // Flush all logs from the buffer, then close the RPC.
+  AddLogs(kLogEntryCount);
+  GetLogs(context).Flush();
+  GetLogs(context).Finish();
+
+  EXPECT_TRUE(context.done());
+  EXPECT_EQ(Status::Ok(), context.status());
+
+  // Although |kLogEntryCount| messages were in the queue, they are batched
+  // before being written to the client, so there is only one response.
+  EXPECT_EQ(1U, context.total_responses());
+}
+
+TEST_F(LogsService, GetMultiple) {
+  constexpr size_t kLogEntryCount = 1;
+  constexpr size_t kFlushCount = 3;
+  std::array<std::byte, 1> rpc_buffer;
+  LOGS_METHOD_CONTEXT context(log_queue_);
+
+  context.call(rpc_buffer);
+
+  for (size_t i = 0; i < kFlushCount; i++) {
+    AddLogs(kLogEntryCount);
+    GetLogs(context).Flush();
+  }
+  GetLogs(context).Finish();
+
+  EXPECT_TRUE(context.done());
+  EXPECT_EQ(Status::Ok(), context.status());
+  EXPECT_EQ(kFlushCount, context.total_responses());
+}
+
+TEST_F(LogsService, NoEntriesOnEmptyQueue) {
+  std::array<std::byte, 1> rpc_buffer;
+  LOGS_METHOD_CONTEXT context(log_queue_);
+
+  // Invoking flush with no logs in the queue should behave like a no-op.
+  context.call(rpc_buffer);
+  GetLogs(context).Flush();
+  GetLogs(context).Finish();
+
+  EXPECT_TRUE(context.done());
+  EXPECT_EQ(Status::Ok(), context.status());
+  EXPECT_EQ(0U, context.total_responses());
+}
+
+TEST_F(LogsService, QueueError) {
+  std::array<std::byte, 1> rpc_buffer;
+  LogQueueTester log_queue_tester(log_queue_buffer_);
+  LOGS_METHOD_CONTEXT context(log_queue_tester);
+
+  // Generate failure on log queue.
+  log_queue_tester.SetPopStatus(Status::Internal());
+  context.call(rpc_buffer);
+  GetLogs(context).Flush();
+  GetLogs(context).Finish();
+
+  EXPECT_TRUE(context.done());
+  EXPECT_EQ(Status::Ok(), context.status());
+  EXPECT_EQ(0U, context.total_responses());
+}
+
+}  // namespace
+}  // namespace pw::log_rpc
diff --git a/pw_log_rpc/public/pw_log_rpc/log_queue.h b/pw_log_rpc/public/pw_log_rpc/log_queue.h
new file mode 100644
index 0000000..450d0fa
--- /dev/null
+++ b/pw_log_rpc/public/pw_log_rpc/log_queue.h
@@ -0,0 +1,134 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#pragma once
+
+#include "pw_bytes/span.h"
+#include "pw_result/result.h"
+#include "pw_ring_buffer/prefixed_entry_ring_buffer.h"
+#include "pw_status/status.h"
+#include "pw_status/status_with_size.h"
+
+// LogQueue is a ring-buffer queue of log messages. LogQueue is backed by
+// a caller-provided byte array and stores its messages in the format
+// dictated by the pw_log log.proto format.
+//
+// Logs can be returned as a repeated proto message and the output of this
+// class can be directly fed into an RPC stream.
+//
+// Push logs:
+// 0) Create LogQueue instance.
+// 1) LogQueue::PushTokenizedMessage().
+//
+// Pop logs:
+// 0) Use exsiting LogQueue instance.
+// 1) For single entires, LogQueue::Pop().
+// 2) For multiple entries, LogQueue::PopMultiple().
+namespace pw::log_rpc {
+namespace {
+constexpr size_t kLogEntryMaxSize = 100;
+}  // namespace
+
+using LogEntriesBuffer = ByteSpan;
+
+struct LogEntries {
+  // A buffer containing an encoded protobuf of type pw.log.LogEntries.
+  ConstByteSpan entries;
+  size_t entry_count;
+};
+
+class LogQueue {
+ public:
+  // Constructs a LogQueue. Callers can optionally supply a maximum log entry
+  // size, which limits the size of messages that can be pushed into this log
+  // queue. When such an entry arrives, the queue increments its drop counter.
+  // Calls to Pop and PopMultiple should be provided a buffer of at least the
+  // configured max size.
+  LogQueue(ByteSpan log_buffer,
+           ByteSpan encode_buffer,
+           size_t max_log_entry_size = kLogEntryMaxSize)
+      : pop_status_for_test_(Status::Ok()),
+        max_log_entry_size_(max_log_entry_size),
+        encode_buffer_(encode_buffer),
+        ring_buffer_(true) {
+    ring_buffer_.SetBuffer(log_buffer);
+  }
+
+  LogQueue(const LogQueue&) = delete;
+  LogQueue& operator=(const LogQueue&) = delete;
+  LogQueue(LogQueue&&) = delete;
+  LogQueue& operator=(LogQueue&&) = delete;
+
+  // Construct a LogEntry proto message and push it into the ring buffer.
+  // Returns:
+  //
+  //  OK - success.
+  //  FAILED_PRECONDITION - Failed when encoding the proto message.
+  //  RESOURCE_EXHAUSTED - Not enough space in the buffer to write the entry.
+  Status PushTokenizedMessage(ConstByteSpan message,
+                              uint32_t flags,
+                              uint32_t level,
+                              uint32_t line,
+                              uint32_t thread,
+                              int64_t timestamp);
+
+  // Pop the oldest LogEntry from the queue into the provided buffer.
+  // On success, the size is the length of the entry, on failure, the size is 0.
+  // Returns:
+  //
+  // For now, don't support batching. This will always use a single absolute
+  // timestamp, and not use delta encoding.
+  //
+  //  OK - success.
+  //  OUT_OF_RANGE - No entries in queue to read.
+  //  RESOURCE_EXHAUSTED - Destination data std::span was smaller number of
+  //  bytes than the data size of the data chunk being read.  Available
+  //  destination bytes were filled, remaining bytes of the data chunk were
+  //  ignored.
+  Result<LogEntries> Pop(LogEntriesBuffer entry_buffer);
+
+  // Pop entries from the queue into the provided buffer. The provided buffer is
+  // filled until there is insufficient space for the next log entry.
+  // Returns:
+  //
+  // LogEntries - contains an encoded protobuf byte span of pw.log.LogEntries.
+  LogEntries PopMultiple(LogEntriesBuffer entries_buffer);
+
+ protected:
+  friend class LogQueueTester;
+  // For testing, status to return on calls to Pop.
+  Status pop_status_for_test_;
+
+ private:
+  const size_t max_log_entry_size_;
+  size_t dropped_entries_;
+  int64_t latest_dropped_timestamp_;
+
+  ByteSpan encode_buffer_;
+  pw::ring_buffer::PrefixedEntryRingBuffer ring_buffer_{true};
+};
+
+// LogQueueWithEncodeBuffer is a LogQueue where the internal encode buffer is
+// created and managed by this class.
+template <size_t kEncodeBufferSize>
+class LogQueueWithEncodeBuffer : public LogQueue {
+ public:
+  LogQueueWithEncodeBuffer(ByteSpan log_buffer)
+      : LogQueue(log_buffer, encode_buffer_) {}
+
+ private:
+  std::byte encode_buffer_[kEncodeBufferSize];
+};
+
+}  // namespace pw::log_rpc
diff --git a/pw_log_rpc/public/pw_log_rpc/logs_rpc.h b/pw_log_rpc/public/pw_log_rpc/logs_rpc.h
new file mode 100644
index 0000000..2bd354f
--- /dev/null
+++ b/pw_log_rpc/public/pw_log_rpc/logs_rpc.h
@@ -0,0 +1,52 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#pragma once
+
+#include "pw_log/log.h"
+#include "pw_log_proto/log.raw_rpc.pb.h"
+#include "pw_log_rpc/log_queue.h"
+
+namespace pw::log_rpc {
+
+// The Logs RPC service will send logs when requested by Get(). For now, Get()
+// requests result in a stream of responses, containing all log entries from
+// the attached log queue.
+//
+// The Get() method will return logs in the current queue immediately, but
+// someone else is responsible for pumping the log queue using Flush().
+class Logs final : public pw::log::generated::Logs<Logs> {
+ public:
+  Logs(LogQueue& log_queue) : log_queue_(log_queue), dropped_entries_(0) {}
+
+  // RPC API for the Logs that produces a log stream. This method will
+  // return immediately, another class must call Flush() to push logs from
+  // the queue to this stream.
+  void Get(ServerContext&, ConstByteSpan, rpc::RawServerWriter& writer);
+
+  // Interface for the owner of the service instance to flush all existing
+  // logs to the writer, if one is attached.
+  Status Flush();
+
+  // Interface for the owner of the service instance to close the RPC, if
+  // one is attached.
+  void Finish() { response_writer_.Finish(); }
+
+ private:
+  LogQueue& log_queue_;
+  rpc::RawServerWriter response_writer_;
+  size_t dropped_entries_;
+};
+
+}  // namespace pw::log_rpc
diff --git a/pw_log_rpc/pw_log_proto/log.proto b/pw_log_rpc/pw_log_proto/log.proto
new file mode 100644
index 0000000..2e38f42
--- /dev/null
+++ b/pw_log_rpc/pw_log_proto/log.proto
@@ -0,0 +1,165 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+syntax = "proto2";
+
+package pw.log;
+
+option java_package = "pw.rpc.proto";
+option java_outer_classname = "Log";
+
+// A log with a tokenized message, a string message, or dropped indicator.  A
+// message can be one of three types:
+//
+//  1. A tokenized log message (recommended for production)
+//  2. A non-tokenized log message (good for development)
+//  3. A "log missed" tombstone, indicating that some logs were dropped
+//
+// Size analysis:
+//
+// For tokenized log messages in the common case; including the proto tag for
+// the field (so adding the fields gives the total proto message size):
+//
+//  - message_tokenized  - 6-12 bytes, depending on # and value of arguments
+//  - line_level         - 3 bytes; 4 bytes if line > 2048 (uncommon)
+//  - timestamp          - 3 bytes; assuming delta encoding
+//  - thread_tokenized   - 3 bytes
+//
+// Total:
+//
+//    6-12 bytes - log
+//    9-15 bytes - log + level + line
+//   12-18 bytes - log + level + line + timestamp
+//   15-21 bytes - log + level + line + timestamp + task
+//
+// An analysis of a project's log token database revealed the following
+// distribution of the number of arguments to log messages:
+//
+//   # args   # messages
+//     0         2,700
+//     1         2,400
+//     2         1,200
+//     3+        1,000
+//
+// Note: The below proto makes some compromises compared to what one might
+// expect for a "clean" proto design, in order to shave bytes off of the
+// messages. It is critical that the log messages are as small as possible to
+// enable storing more logs in limited memory. This is why, for example, there
+// is no separate "DroppedLog" type, or a "TokenizedLog" and "StringLog", which
+// would add at least 2 extra bytes per message
+// Note: Time-related fields will likely support specifying the time as a ratio
+// (period) and an absolute time separate from the current delta fields.
+message LogEntry {
+  // The tokenized log message. Internally, the format has a 32-bit token
+  // followed by the arguments for that message. The unformatted log string
+  // corresponding to the token in the token database must follow this format:
+  //
+  //   file|module|message
+  //
+  // For example:
+  //
+  //   ../boot/bluetooth.cc|BOOT|Bluetooth is on the fritz; error code: %d
+  //
+  // Note: The level and flags are not included since level and flags are
+  // runtime values and so cannot be tokenized.
+  //
+  // Size analysis:
+  //
+  //   tag+wire = 1 byte
+  //   size     = 1 byte; payload will almost always be < 127 bytes
+  //   payload  = N bytes; typically 4-10 in practice
+  //
+  // Total: 2 + N ~= 6-12 bytes
+  optional bytes message_tokenized = 1;
+
+  // Packed log level and line number. Structure:
+  //
+  //   Level: Bottom 3 bits; level = line_level & 0x7
+  //   Line: Remaining bits; line = (line_level >> 3)
+  //
+  // Note: This packing saves two bytes per log message in most cases compared
+  // to having line and level separately; and is zero-cost if the log backend
+  // omits the line number.
+  optional uint32 line_level = 2;
+
+  // Some log messages have flags to indicate for example assert or PII. The
+  // particular flags are product- and implementation-dependent. When no flags
+  // are present, the field is omitted entirely.
+  optional uint32 flags = 3;
+
+  // The task or thread that created the log message.
+  //
+  // In practice, the task token and tag should be just 3 bytes, since a 14 bit
+  // token for the task name should be enough.
+  optional uint32 thread_tokenized = 4;
+
+  // Timestamp. Note: The units here are TBD and will likely require a separate
+  // mechanism to indicate units. This field is likely to change as we figure
+  // out the right strategy for timestamps in Pigweed. This is a variable-sized
+  // integer to enable scaling this up to a uint64 later on without impacting
+  // the wire format.
+  optional int64 timestamp = 5;
+
+  // Time since the last entry. Generally, one of timestamp or this field will
+  // be specified. This enables delta encoding when batching entries together.
+  //
+  // Size analysis for this field including tag and varint:
+  //
+  //           < 127 ms gap == 127 ms      ==  7 bits == 2 bytes
+  //        < 16,000 ms gap ==  16 seconds == 14 bits == 3 bytes
+  //     < 2,000,000 ms gap ==  35 minutes == 21 bits == 4 bytes
+  //   < 300,000,000 ms gap ==  74 hours   == 28 bits == 5 bytes
+  //
+  // Log bursts will thus consume just 2 bytes (tag + up to 127ms delta) for
+  // the timestamp, which is a good improvement over timestamp in many cases.
+  // Note: The units of this field are TBD and will likely require a separate
+  // mechanism to indicate units. The calculations above assume milliseconds
+  // and may change if the units differ.
+  optional int64 elapsed_time_since_last_entry = 6;
+
+  // Fully formatted textual log message.
+  optional string message_string = 16;
+
+  // For non-tokenized logging, the file name.
+  optional string file = 17;
+
+  // String representation of the task that created the log message.
+  optional string thread_string = 18;
+
+  // When the log buffers are full but more logs come in, the logs are counted
+  // and a special log message is omitted with only counts for the number of
+  // messages dropped. The timestamp indicates the time that the "missed logs"
+  // message was inserted into the queue.
+  //
+  // Missed logs messages will only have one of the timestamp fields and these
+  // counters specified.
+  optional uint32 dropped = 19;
+  optional uint32 dropped_warning_or_above = 20;
+
+  // Some messages are associated with trace events, which may carry additional
+  // contextual data. This is a tuple of a data format string which could be
+  // used by the decoder to identify the data (e.g. printf-style tokens) and the
+  // data itself in bytes.
+  optional string data_format_string = 21;
+  optional bytes data = 22;
+}
+
+message LogRequest {}
+message LogEntries {
+  repeated LogEntry entries = 1;
+}
+
+service Logs {
+  rpc Get(LogRequest) returns (stream LogEntries) {}
+}
diff --git a/pw_log_tokenized/BUILD b/pw_log_tokenized/BUILD
index 6f78805..c1f5d4a 100644
--- a/pw_log_tokenized/BUILD
+++ b/pw_log_tokenized/BUILD
@@ -45,6 +45,18 @@
     ],
 )
 
+pw_cc_library(
+    name = "base64_over_hdlc",
+    srcs = ["base64_over_hdlc.cc"],
+    hdrs = ["public/pw_log_tokenized/base64_over_hdlc.h"],
+    includes = ["public"],
+    deps = [
+        "//pw_hdlc_lite:encoder",
+        "//pw_tokenizer:base64",
+        "//pw_tokenizer:global_handler_with_payload.facade",
+    ],
+)
+
 pw_cc_test(
     name = "test",
     srcs = [
diff --git a/pw_log_tokenized/BUILD.gn b/pw_log_tokenized/BUILD.gn
index 8e72bf0..d8df960 100644
--- a/pw_log_tokenized/BUILD.gn
+++ b/pw_log_tokenized/BUILD.gn
@@ -12,14 +12,14 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_tokenizer/backend.gni")
 import("$dir_pw_unit_test/test.gni")
-config("default_config") {
+
+config("public_includes") {
   include_dirs = [ "public" ]
   visibility = [ ":*" ]
 }
@@ -33,7 +33,7 @@
 # backend.
 #
 # This target depends on the pw_tokenizer facade target
-# (dir_pw_tokenizer:global_handler_with_payload_facade) to avoid circular
+# (dir_pw_tokenizer:global_handler_with_payload.facade) to avoid circular
 # dependencies. The dependency graph for pw_log_tokenized is the following:
 #
 #   pw_log:facade <---   :pw_log_tokenized
@@ -42,10 +42,10 @@
 #   -> pw_log -> :log_backend --> pw_tokenizer:global_handler_with_payload
 #
 pw_source_set("pw_log_tokenized") {
-  public_configs = [ ":default_config" ]
+  public_configs = [ ":public_includes" ]
   public_deps = [
     "$dir_pw_log:facade",
-    "$dir_pw_tokenizer:global_handler_with_payload_facade",
+    "$dir_pw_tokenizer:global_handler_with_payload.facade",
     dir_pw_preprocessor,
   ]
   public = [ "public/pw_log_tokenized/log_tokenized.h" ]
@@ -59,6 +59,19 @@
   deps = [ "$dir_pw_tokenizer:global_handler_with_payload" ]
 }
 
+# This target provides a backend for pw_tokenizer that encodes tokenized logs as
+# Base64, encodes them into HDLC frames, and writes them over sys_io.
+pw_source_set("base64_over_hdlc") {
+  public_configs = [ ":public_includes" ]
+  public = [ "public/pw_log_tokenized/base64_over_hdlc.h" ]
+  sources = [ "base64_over_hdlc.cc" ]
+  deps = [
+    "$dir_pw_hdlc_lite:encoder",
+    "$dir_pw_tokenizer:base64",
+    "$dir_pw_tokenizer:global_handler_with_payload.facade",
+  ]
+}
+
 pw_test_group("tests") {
   tests = [ ":test" ]
 }
diff --git a/pw_log_tokenized/CMakeLists.txt b/pw_log_tokenized/CMakeLists.txt
index 35aaf65..1c91a64 100644
--- a/pw_log_tokenized/CMakeLists.txt
+++ b/pw_log_tokenized/CMakeLists.txt
@@ -12,11 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_auto_add_simple_module(pw_log_tokenized
   IMPLEMENTS_FACADE
     pw_log
   PUBLIC_DEPS
     pw_tokenizer
 )
-
-target_include_directories(pw_log_tokenized PUBLIC public_overrides)
diff --git a/pw_log_tokenized/base64_over_hdlc.cc b/pw_log_tokenized/base64_over_hdlc.cc
new file mode 100644
index 0000000..b8c7bdf
--- /dev/null
+++ b/pw_log_tokenized/base64_over_hdlc.cc
@@ -0,0 +1,52 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// This function serves as a backend for pw_tokenizer / pw_log_tokenized that
+// encodes tokenized logs as Base64 and writes them using HDLC.
+
+#include "pw_log_tokenized/base64_over_hdlc.h"
+
+#include <span>
+
+#include "pw_hdlc_lite/encoder.h"
+#include "pw_hdlc_lite/sys_io_stream.h"
+#include "pw_tokenizer/base64.h"
+#include "pw_tokenizer/tokenize_to_global_handler_with_payload.h"
+
+namespace pw::log_tokenized {
+namespace {
+
+stream::SysIoWriter writer;
+
+}  // namespace
+
+// Base64-encodes tokenized logs and writes them to pw::sys_io as HDLC frames.
+extern "C" void pw_tokenizer_HandleEncodedMessageWithPayload(
+    pw_tokenizer_Payload,  // TODO(hepler): Use the metadata for filtering.
+    const uint8_t log_buffer[],
+    size_t size_bytes) {
+  // Encode the tokenized message as Base64.
+  char base64_buffer[tokenizer::kDefaultBase64EncodedBufferSize];
+  const size_t base64_bytes = tokenizer::PrefixedBase64Encode(
+      std::span(log_buffer, size_bytes), base64_buffer);
+  base64_buffer[base64_bytes] = '\0';
+
+  // HDLC-encode the Base64 string via a SysIoWriter.
+  hdlc_lite::WriteInformationFrame(
+      PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS,
+      std::as_bytes(std::span(base64_buffer, base64_bytes)),
+      writer);
+}
+
+}  // namespace pw::log_tokenized
diff --git a/pw_log_tokenized/docs.rst b/pw_log_tokenized/docs.rst
index 4632af6..1c30ee7 100644
--- a/pw_log_tokenized/docs.rst
+++ b/pw_log_tokenized/docs.rst
@@ -1,15 +1,11 @@
-.. default-domain:: cpp
-
-.. highlight:: sh
-
-.. _chapter-pw-log-tokenized:
+.. _module-pw_log_tokenized:
 
 ----------------
 pw_log_tokenized
 ----------------
 ``pw_log_tokenized`` is a ``pw_log`` backend that tokenizes log messages using
 the ``pw_tokenizer`` module. Log messages are tokenized and passed to the
-``pw_TokenizerHandleEncodedMessageWithPayload`` function. For maximum
+``pw_tokenizer_HandleEncodedMessageWithPayload`` function. For maximum
 efficiency, the log level, 16-bit tokenized module name, and flags bits are
 passed through the payload argument.
 
@@ -17,8 +13,8 @@
 
 .. code-block:: cpp
 
-   extern "C" void pw_TokenizerHandleEncodedMessageWithPayload(
-       pw_TokenizerPayload payload, const uint8_t message[], size_t size) {
+   extern "C" void pw_tokenizer_HandleEncodedMessageWithPayload(
+       pw_tokenizer_Payload payload, const uint8_t message[], size_t size) {
      // The metadata object provides the log level, module token, and flags.
      // These values can be recorded and used for runtime filtering.
      pw::log_tokenized::Metadata metadata(payload);
@@ -42,7 +38,7 @@
 ``log_backend``. The ``pw_log_tokenized`` target provides the
 ``pw_log_tokenized/log_tokenized.h`` header. The ``log_backend`` target
 implements the backend for the ``pw_log`` facade. ``pw_log_tokenized`` invokes
-the ``pw_tokenizer:global_handler_with_facade`` facade, which must be
+the ``pw_tokenizer:global_handler_with_payload`` facade, which must be
 implemented by the user of ``pw_log_tokenized``.
 
 .. note::
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
similarity index 71%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
index 1670b7d..af31532 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_log_tokenized/public/pw_log_tokenized/base64_over_hdlc.h
@@ -11,7 +11,9 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+#pragma once
 
-#include "pw_boot_armv7m/boot.h"
-
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+// The HDLC address to which to write Base64-encoded tokenized logs.
+#ifndef PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
+#define PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS 1
+#endif  // PW_LOG_TOKENIZED_BASE64_LOG_HDLC_ADDRESS
diff --git a/pw_log_tokenized/public/pw_log_tokenized/log_tokenized.h b/pw_log_tokenized/public/pw_log_tokenized/log_tokenized.h
index 9a8f053..1589e88 100644
--- a/pw_log_tokenized/public/pw_log_tokenized/log_tokenized.h
+++ b/pw_log_tokenized/public/pw_log_tokenized/log_tokenized.h
@@ -16,19 +16,20 @@
 #include <assert.h>
 #include <stdint.h>
 
-#include "pw_preprocessor/util.h"
+#include "pw_log/options.h"
+#include "pw_preprocessor/concat.h"
 #include "pw_tokenizer/tokenize_to_global_handler_with_payload.h"
 
 // This macro implements PW_LOG, using
 // PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD. The log level, module token, and
 // flags are packed into the payload argument.
 //
-// To use this macro, implement pw_TokenizerHandleEncodedMessageWithPayload,
+// To use this macro, implement pw_tokenizer_HandleEncodedMessageWithPayload,
 // which is defined in pw_tokenizer/tokenize.h. The log metadata can be accessed
 // using pw::log_tokenized::Metadata. For example:
 //
-//   extern "C" void pw_TokenizerHandleEncodedMessageWithPayload(
-//       pw_TokenizerPayload payload, const uint8_t data[], size_t size) {
+//   extern "C" void pw_tokenizer_HandleEncodedMessageWithPayload(
+//       pw_tokenizer_Payload payload, const uint8_t data[], size_t size) {
 //     pw::log_tokenized::Metadata metadata(payload);
 //
 //     if (metadata.level() >= kLogLevel && ModuleEnabled(metadata.module())) {
@@ -36,19 +37,31 @@
 //     }
 //   }
 //
-// TODO(hepler): Pack the hashed version of the module name into the payload.
-#define PW_LOG_TOKENIZED_TO_GLOBAL_HANDLER_WITH_PAYLOAD(              \
-    level, flags, message, ...)                                       \
-  PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD(                         \
-      ((uintptr_t)(level) | 0u /* TODO(hepler): add module token */ | \
-       ((uintptr_t)(flags) << (_PW_LOG_TOKENIZED_LEVEL_BITS +         \
-                               _PW_LOG_TOKENIZED_MODULE_BITS))),      \
-      message,                                                        \
-      __VA_ARGS__)
+#define PW_LOG_TOKENIZED_TO_GLOBAL_HANDLER_WITH_PAYLOAD(                       \
+    level, flags, message, ...)                                                \
+  do {                                                                         \
+    _PW_TOKENIZER_CONST uintptr_t _pw_log_module_token =                       \
+        PW_TOKENIZE_STRING_DOMAIN("log_module_names", PW_LOG_MODULE_NAME);     \
+    PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD(                                \
+        ((uintptr_t)(level) |                                                  \
+         ((_pw_log_module_token &                                              \
+           ((1u << _PW_LOG_TOKENIZED_MODULE_BITS) - 1u))                       \
+          << _PW_LOG_TOKENIZED_LEVEL_BITS) |                                   \
+         ((uintptr_t)(flags)                                                   \
+          << (_PW_LOG_TOKENIZED_LEVEL_BITS + _PW_LOG_TOKENIZED_MODULE_BITS))), \
+        PW_LOG_TOKENIZED_FORMAT_STRING(message),                               \
+        __VA_ARGS__);                                                          \
+  } while (0)
 
-// By default, log format strings include PW_LOG_MODULE_NAME.
+// By default, log format strings include the PW_LOG_MODULE_NAME, if defined.
 #ifndef PW_LOG_TOKENIZED_FORMAT_STRING
-#define PW_LOG_TOKENIZED_FORMAT_STRING(string) PW_LOG_MODULE_NAME ": " string
+
+#define PW_LOG_TOKENIZED_FORMAT_STRING(string) \
+  PW_CONCAT(_PW_LOG_TOKENIZED_FMT_, PW_LOG_MODULE_NAME_DEFINED)(string)
+
+#define _PW_LOG_TOKENIZED_FMT_0(string) string
+#define _PW_LOG_TOKENIZED_FMT_1(string) PW_LOG_MODULE_NAME " " string
+
 #endif  // PW_LOG_TOKENIZED_FORMAT_STRING
 
 // The log level, module token, and flag bits are packed into the tokenizer's
diff --git a/pw_log_tokenized/public_overrides/pw_log_backend/log_backend.h b/pw_log_tokenized/public_overrides/pw_log_backend/log_backend.h
index b6035d9..02361b2 100644
--- a/pw_log_tokenized/public_overrides/pw_log_backend/log_backend.h
+++ b/pw_log_tokenized/public_overrides/pw_log_backend/log_backend.h
@@ -19,3 +19,6 @@
 #include "pw_log_tokenized/log_tokenized.h"
 
 #define PW_HANDLE_LOG PW_LOG_TOKENIZED_TO_GLOBAL_HANDLER_WITH_PAYLOAD
+
+#define PW_LOG_LEVEL_BITS _PW_LOG_TOKENIZED_LEVEL_BITS
+#define PW_LOG_FLAG_BITS _PW_LOG_TOKENIZED_FLAG_BITS
diff --git a/pw_log_tokenized/test.cc b/pw_log_tokenized/test.cc
index 36c787c..6b5f976 100644
--- a/pw_log_tokenized/test.cc
+++ b/pw_log_tokenized/test.cc
@@ -12,11 +12,11 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+#define PW_LOG_MODULE_NAME "This is the log module name!"
+
 #include <cstring>
 #include <string_view>
 
-#define PW_LOG_MODULE_NAME "test"
-
 #include "gtest/gtest.h"
 #include "pw_log_tokenized/log_tokenized.h"
 
@@ -26,13 +26,15 @@
 Metadata metadata(0);
 size_t encoded_data_size = 0;
 
-extern "C" void pw_TokenizerHandleEncodedMessageWithPayload(
-    pw_TokenizerPayload payload, const uint8_t[], size_t size) {
+extern "C" void pw_tokenizer_HandleEncodedMessageWithPayload(
+    pw_tokenizer_Payload payload, const uint8_t[], size_t size) {
   metadata = payload;
   encoded_data_size = size;
 }
 
-extern "C" void pw_TokenizerHandleEncodedMessage(const uint8_t[], size_t) {}
+constexpr uintptr_t kModuleToken =
+    PW_TOKENIZER_STRING_TOKEN(PW_LOG_MODULE_NAME) &
+    ((1u << _PW_LOG_TOKENIZED_MODULE_BITS) - 1);
 
 constexpr Metadata test1 = Metadata::Set<0, 0, 0>();
 static_assert(test1.level() == 0);
@@ -53,6 +55,7 @@
   PW_LOG_TOKENIZED_TO_GLOBAL_HANDLER_WITH_PAYLOAD(0, 0, "hello");
   EXPECT_EQ(metadata.level(), 0u);
   EXPECT_EQ(metadata.flags(), 0u);
+  EXPECT_EQ(metadata.module(), kModuleToken);
   EXPECT_EQ(encoded_data_size, 4u /* token */);
 }
 
@@ -60,6 +63,7 @@
   PW_LOG_TOKENIZED_TO_GLOBAL_HANDLER_WITH_PAYLOAD(55, 36, "hello%s", "?");
   EXPECT_EQ(metadata.level(), 55u);
   EXPECT_EQ(metadata.flags(), 36u);
+  EXPECT_EQ(metadata.module(), kModuleToken);
   EXPECT_EQ(encoded_data_size, 4u /* token */ + 2u /* encoded string */);
 }
 
@@ -67,6 +71,7 @@
   PW_LOG_TOKENIZED_TO_GLOBAL_HANDLER_WITH_PAYLOAD(63, 1023, "hello %d", 1);
   EXPECT_EQ(metadata.level(), 63u);
   EXPECT_EQ(metadata.flags(), 1023u);
+  EXPECT_EQ(metadata.module(), kModuleToken);
   EXPECT_EQ(encoded_data_size, 4u /* token */ + 1u /* encoded integer */);
 }
 
diff --git a/pw_malloc/BUILD.gn b/pw_malloc/BUILD.gn
index f56a0fe..5a700a8 100644
--- a/pw_malloc/BUILD.gn
+++ b/pw_malloc/BUILD.gn
@@ -12,7 +12,6 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/facade.gni")
@@ -20,6 +19,7 @@
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_malloc/backend.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
diff --git a/pw_malloc/docs.rst b/pw_malloc/docs.rst
index 7d55265..d927d4c 100644
--- a/pw_malloc/docs.rst
+++ b/pw_malloc/docs.rst
@@ -1,6 +1,4 @@
-.. _chapter-pw-malloc:
-
-.. default-domain:: cpp
+.. _module-pw_malloc:
 
 ---------
 pw_malloc
diff --git a/pw_malloc_freelist/BUILD.gn b/pw_malloc_freelist/BUILD.gn
index 2479f8b..027d23e 100644
--- a/pw_malloc_freelist/BUILD.gn
+++ b/pw_malloc_freelist/BUILD.gn
@@ -12,13 +12,13 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
-import("$dir_pw_malloc/backend.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_malloc/backend.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
diff --git a/pw_malloc_freelist/docs.rst b/pw_malloc_freelist/docs.rst
index 928ac5b..c6483d2 100644
--- a/pw_malloc_freelist/docs.rst
+++ b/pw_malloc_freelist/docs.rst
@@ -1,6 +1,4 @@
-.. _chapter-pw-malloc-freelist:
-
-.. default-domain:: cpp
+.. _module-pw_malloc_freelist:
 
 ------------------
 pw_malloc_freelist
diff --git a/pw_malloc_freelist/freelist_malloc.cc b/pw_malloc_freelist/freelist_malloc.cc
index b2285e5..8797295 100644
--- a/pw_malloc_freelist/freelist_malloc.cc
+++ b/pw_malloc_freelist/freelist_malloc.cc
@@ -32,11 +32,11 @@
 #endif
 // Define the global heap variables.
 void pw_MallocInit() {
-  // pw_heap_low_addr and pw_heap_high_addr specifies the heap region from
-  // the linker script in "pw_boot_armv7m".
+  // pw_boot_heap_low_addr and pw_boot_heap_high_addr specifies the heap region
+  // from the linker script in "pw_boot_armv7m".
   std::span<std::byte> pw_allocator_freelist_raw_heap =
-      std::span(reinterpret_cast<std::byte*>(&pw_heap_low_addr),
-                &pw_heap_high_addr - &pw_heap_low_addr);
+      std::span(reinterpret_cast<std::byte*>(&pw_boot_heap_low_addr),
+                &pw_boot_heap_high_addr - &pw_boot_heap_low_addr);
   pw_freelist_heap = new (&buf)
       pw::allocator::FreeListHeapBuffer(pw_allocator_freelist_raw_heap);
 }
diff --git a/pw_metric/BUILD b/pw_metric/BUILD
new file mode 100644
index 0000000..051fc27
--- /dev/null
+++ b/pw_metric/BUILD
@@ -0,0 +1,92 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_library",
+    "pw_cc_test",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])  # Apache License 2.0
+
+pw_cc_library(
+    name = "metric",
+    hdrs = [
+        "public/pw_metric/metric.h",
+        "public/pw_metric/global.h",
+    ],
+    includes = ["public"],
+    srcs = [ "metric.cc" ],
+    deps = [
+        "//pw_assert",
+        "//pw_containers",
+        "//pw_log",
+        "//pw_span",
+        "//pw_tokenizer",
+    ],
+)
+
+pw_cc_library(
+    name = "global",
+    hdrs = [
+        "public/pw_metric/global.h",
+    ],
+    srcs = [ "global.cc" ],
+    deps = [
+        ":metric",
+    ],
+)
+
+pw_cc_library(
+    name = "metric_service_nanopb",
+    hdrs = [
+        "public/pw_metric/metric_service_nanopb.h",
+    ],
+    srcs = [ "metric_service_nanopb.cc" ],
+    deps = [
+        ":metric",
+    ],
+)
+
+pw_cc_test(
+    name = "metric_test",
+    srcs = [
+        "metric_test.cc",
+    ],
+    deps = [
+        ":metric",
+    ],
+)
+
+pw_cc_test(
+    name = "global_test",
+    srcs = [
+        "global_test.cc",
+    ],
+    deps = [
+        ":global",
+    ],
+)
+
+pw_cc_test(
+    name = "metric_service_nanopb_test",
+    srcs = [
+        "metric_service_nanopb_test.cc",
+    ],
+    deps = [
+        ":metric_service_nanopb",
+    ],
+)
diff --git a/pw_metric/BUILD.gn b/pw_metric/BUILD.gn
new file mode 100644
index 0000000..0abc449
--- /dev/null
+++ b/pw_metric/BUILD.gn
@@ -0,0 +1,139 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_bloat/bloat.gni")
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_protobuf_compiler/proto.gni")
+import("$dir_pw_third_party/nanopb/nanopb.gni")
+import("$dir_pw_unit_test/test.gni")
+
+config("default_config") {
+  include_dirs = [ "public" ]
+}
+
+pw_source_set("pw_metric") {
+  public_configs = [ ":default_config" ]
+  public = [ "public/pw_metric/metric.h" ]
+  sources = [ "metric.cc" ]
+  public_deps = [
+    "$dir_pw_tokenizer:base64",
+    dir_pw_assert,
+    dir_pw_containers,
+    dir_pw_log,
+    dir_pw_tokenizer,
+  ]
+}
+
+# This gives access to the "PW_METRIC_GLOBAL()" macros, for globally-registered
+# metric definitions.
+pw_source_set("global") {
+  public_configs = [ ":default_config" ]
+  public = [ "public/pw_metric/global.h" ]
+  sources = [ "global.cc" ]
+  public_deps = [
+    ":pw_metric",
+    dir_pw_tokenizer,
+  ]
+}
+
+################################################################################
+# Service
+pw_proto_library("metric_service_proto") {
+  sources = [ "pw_metric_proto/metric_service.proto" ]
+  inputs = [ "pw_metric_proto/metric_service.options" ]
+}
+
+# TODO(keir): Consider moving the nanopb service into the nanopb/ directory
+# instead of having it directly inside pw_metric/.
+if (dir_pw_third_party_nanopb != "") {
+  pw_source_set("metric_service_nanopb") {
+    public_configs = [ ":default_config" ]
+    public_deps = [
+      ":metric_service_proto.nanopb_rpc",
+      ":pw_metric",
+    ]
+    public = [ "public/pw_metric/metric_service_nanopb.h" ]
+    deps = [
+      ":metric_service_proto.nanopb_rpc",
+      "$dir_pw_containers:vector",
+      dir_pw_tokenizer,
+    ]
+    sources = [ "metric_service_nanopb.cc" ]
+  }
+
+  pw_test("metric_service_nanopb_test") {
+    deps = [
+      ":global",
+      ":metric_service_nanopb",
+      "$dir_pw_rpc/nanopb:test_method_context",
+    ]
+    sources = [ "metric_service_nanopb_test.cc" ]
+  }
+}
+
+################################################################################
+
+pw_test_group("tests") {
+  tests = [
+    ":metric_test",
+    ":global_test",
+  ]
+  if (dir_pw_third_party_nanopb != "") {
+    tests += [ ":metric_service_nanopb_test" ]
+  }
+}
+
+pw_test("metric_test") {
+  sources = [ "metric_test.cc" ]
+  deps = [ ":pw_metric" ]
+}
+
+pw_test("global_test") {
+  sources = [ "global_test.cc" ]
+  deps = [ ":global" ]
+}
+
+pw_size_report("metric_size_report") {
+  title = "Typical pw_metric use (no RPC service)"
+
+  # To see all the symbols, uncomment the following:
+  # Note: The size report RST table won't be generated when full_report = true.
+  #full_report = true
+
+  binaries = [
+    {
+      target = "size_report:one_metric"
+      base = "size_report:base"
+      label = "1 metric and 1 group no dump or export"
+    },
+    {
+      target = "size_report:dump"
+      base = "size_report:base"
+      label = "(+) dump group and metrics to log"
+    },
+    {
+      target = "size_report:more_metrics"
+      base = "size_report:dump"
+      label = "(+) 1 group (+) 4 metrics"
+    },
+  ]
+}
+
+pw_doc_group("docs") {
+  sources = [ "docs.rst" ]
+  report_deps = [ ":metric_size_report" ]
+}
diff --git a/pw_metric/docs.rst b/pw_metric/docs.rst
new file mode 100644
index 0000000..6bdcd5a
--- /dev/null
+++ b/pw_metric/docs.rst
@@ -0,0 +1,847 @@
+.. _module-pw_metric:
+
+=========
+pw_metric
+=========
+
+.. attention::
+
+  This module is **not yet production ready**; ask us if you are interested in
+  using it out or have ideas about how to improve it.
+
+--------
+Overview
+--------
+Pigweed's metric module is a **lightweight manual instrumentation system** for
+tracking system health metrics like counts or set values. For example,
+``pw_metric`` could help with tracking the number of I2C bus writes, or the
+number of times a buffer was filled before it could drain in time, or safely
+incrementing counters from ISRs.
+
+Key features of ``pw_metric``:
+
+- **Tokenized names** - Names are tokenized using the ``pw_tokenizer`` enabling
+  long metric names that don't bloat your binary.
+
+- **Tree structure** - Metrics can form a tree, enabling grouping of related
+  metrics for clearer organization.
+
+- **Per object collection** - Metrics and groups can live on object instances
+  and be flexibly combined with metrics from other instances.
+
+- **Global registration** - For legacy code bases or just because it's easier,
+  ``pw_metric`` supports automatic aggregation of metrics. This is optional but
+  convenient in many cases.
+
+- **Simple design** - There are only two core data structures: ``Metric`` and
+  ``Group``, which are both simple to understand and use. The only type of
+  metric supported is ``uint32_t`` and ``float``. This module does not support
+  complicated aggregations like running average or min/max.
+
+Example: Instrumenting a single object
+--------------------------------------
+The below example illustrates what instrumenting a class with a metric group
+and metrics might look like. In this case, the object's
+``MySubsystem::metrics()`` member is not globally registered; the user is on
+their own for combining this subsystem's metrics with others.
+
+.. code::
+
+  #include "pw_metric/metric.h"
+
+  class MySubsystem {
+   public:
+    void DoSomething() {
+      attempts_.Increment();
+      if (ActionSucceeds()) {
+        successes_.Increment();
+      }
+    }
+    Group& metrics() { return metrics_; }
+
+   private:
+    PW_METRIC_GROUP(metrics_, "my_subsystem");
+    PW_METRIC(metrics_, attempts_, "attempts", 0u);
+    PW_METRIC(metrics_, successes_, "successes", 0u);
+  };
+
+The metrics subsystem has no canonical output format at this time, but a JSON
+dump might look something like this:
+
+.. code:: none
+
+  {
+    "my_subsystem" : {
+      "successes" : 1000,
+      "attempts" : 1200,
+    }
+  }
+
+In this case, every instance of ``MySubsystem`` will have unique counters.
+
+Example: Instrumenting a legacy codebase
+----------------------------------------
+A common situation in embedded development is **debugging legacy code** or code
+which is hard to change; where it is perhaps impossible to plumb metrics
+objects around with dependency injection. The alternative to plumbing metrics
+is to register the metrics through a global mechanism. ``pw_metric`` supports
+this use case. For example:
+
+**Before instrumenting:**
+
+.. code::
+
+  // This code was passed down from generations of developers before; no one
+  // knows what it does or how it works. But it needs to be fixed!
+  void OldCodeThatDoesntWorkButWeDontKnowWhy() {
+    if (some_variable) {
+      DoSomething();
+    } else {
+      DoSomethingElse();
+    }
+  }
+
+**After instrumenting:**
+
+.. code::
+
+  #include "pw_metric/global.h"
+  #include "pw_metric/metric.h"
+
+  PW_METRIC_GLOBAL(legacy_do_something, "legacy_do_something");
+  PW_METRIC_GLOBAL(legacy_do_something_else, "legacy_do_something_else");
+
+  // This code was passed down from generations of developers before; no one
+  // knows what it does or how it works. But it needs to be fixed!
+  void OldCodeThatDoesntWorkButWeDontKnowWhy() {
+    if (some_variable) {
+      legacy_do_something.Increment();
+      DoSomething();
+    } else {
+      legacy_do_something_else.Increment();
+      DoSomethingElse();
+    }
+  }
+
+In this case, the developer merely had to add the metrics header, define some
+metrics, and then start incrementing them. These metrics will be available
+globally through the ``pw::metric::global_metrics`` object defined in
+``pw_metric/global.h``.
+
+Why not just use simple counter variables?
+------------------------------------------
+One might wonder what the point of leveraging a metric library is when it is
+trivial to make some global variables and print them out. There are a few
+reasons:
+
+- **Metrics offload** - To make it easy to get metrics off-device by sharing
+  the infrastructure for offloading.
+
+- **Consistent format** - To get the metrics in a consistent format (e.g.
+  protobuf or JSON) for analysis
+
+- **Uncoordinated collection** - To provide a simple and reliable way for
+  developers on a team to all collect metrics for their subsystems, without
+  having to coordinate to offload. This could extend to code in libraries
+  written by other teams.
+
+- **Pre-boot or interrupt visibility** - Some of the most challenging bugs come
+  from early system boot when not all system facilities are up (e.g. logging or
+  UART). In those cases, metrics provide a low-overhead approach to understand
+  what is happening. During early boot, metrics can be incremented, then after
+  boot dumping the metrics provides insights into what happened. While basic
+  counter variables can work in these contexts to, one still has to deal with
+  the offloading problem; which the library handles.
+
+---------------------
+Metrics API reference
+---------------------
+
+The metrics API consists of just a few components:
+
+- The core data structures ``pw::metric::Metric`` and ``pw::metric::Group``
+- The macros for scoped metrics and groups ``PW_METRIC`` and
+  ``PW_METRIC_GROUP``
+- The macros for globally registered metrics and groups
+  ``PW_METRIC_GLOBAL`` and ``PW_METRIC_GROUP_GLOBAL``
+- The global groups and metrics list: ``pw::metric::global_groups`` and
+  ``pw::metric::global_metrics``.
+
+Metric
+------
+The ``pw::metric::Metric`` provides:
+
+- A 31-bit tokenized name
+- A 1-bit discriminator for int or float
+- A 32-bit payload (int or float)
+- A 32-bit next pointer (intrusive list)
+
+The metric object is 12 bytes on 32-bit platforms.
+
+.. cpp:class:: pw::metric::Metric
+
+  .. cpp:function:: Increment(uint32_t amount = 0)
+
+    Increment the metric by the given amount. Results in undefined behaviour if
+    the metric is not of type int.
+
+  .. cpp:function:: Set(uint32_t value)
+
+    Set the metric to the given value. Results in undefined behaviour if the
+    metric is not of type int.
+
+  .. cpp:function:: Set(float value)
+
+    Set the metric to the given value. Results in undefined behaviour if the
+    metric is not of type float.
+
+Group
+-----
+The ``pw::metric::Group`` object is simply:
+
+- A name for the group
+- A list of children groups
+- A list of leaf metrics groups
+- A 32-bit next pointer (intrusive list)
+
+The group object is 16 bytes on 32-bit platforms.
+
+.. cpp:class:: pw::metric::Group
+
+  .. cpp:function:: Dump(int indent_level = 0)
+
+    Recursively dump a metrics group to ``pw_log``. Produces output like:
+
+    .. code:: none
+
+      "$6doqFw==": {
+        "$05OCZw==": {
+          "$VpPfzg==": 1,
+          "$LGPMBQ==": 1.000000,
+          "$+iJvUg==": 5,
+        }
+        "$9hPNxw==": 65,
+        "$oK7HmA==": 13,
+        "$FCM4qQ==": 0,
+      }
+
+    Note the metric names are tokenized with base64. Decoding requires using
+    the Pigweed detokenizer. With a detokenizing-enabled logger, you could get
+    something like:
+
+    .. code:: none
+
+      "i2c_1": {
+        "gyro": {
+          "num_sampleses": 1,
+          "init_time_us": 1.000000,
+          "initialized": 5,
+        }
+        "bus_errors": 65,
+        "transactions": 13,
+        "bytes_sent": 0,
+      }
+
+Macros
+------
+The **macros are the primary mechanism for creating metrics**, and should be
+used instead of directly constructing metrics or groups. The macros handle
+tokenizing the metric and group names.
+
+.. cpp:function:: PW_METRIC(identifier, name, value)
+.. cpp:function:: PW_METRIC(group, identifier, name, value)
+.. cpp:function:: PW_METRIC_STATIC(identifier, name, value)
+.. cpp:function:: PW_METRIC_STATIC(group, identifier, name, value)
+
+  Declare a metric, optionally adding it to a group.
+
+  - **identifier** - An identifier name for the created variable or member.
+    For example: ``i2c_transactions`` might be used as a local or global
+    metric; inside a class, could be named according to members
+    (``i2c_transactions_`` for Google's C++ style).
+  - **name** - The string name for the metric. This will be tokenized. There
+    are no restrictions on the contents of the name; however, consider
+    restricting these to be valid C++ identifiers to ease integration with
+    other systems.
+  - **value** - The initial value for the metric. Must be either a floating
+    point value (e.g. ``3.2f``) or unsigned int (e.g. ``21u``).
+  - **group** - A ``pw::metric::Group`` instance. If provided, the metric is
+    added to the given group.
+
+  The macro declares a variable or member named "name" with type
+  ``pw::metric::Metric``, and works in three contexts: global, local, and
+  member.
+
+  If the `_STATIC` variant is used, the macro declares a variable with static
+  storage. These can be used in function scopes, but not in classes.
+
+  1. At global scope:
+
+    .. code::
+
+      PW_METRIC(foo, "foo", 15.5f);
+
+      void MyFunc() {
+        foo.Increment();
+      }
+
+  2. At local function or member function scope:
+
+    .. code::
+
+      void MyFunc() {
+        PW_METRIC(foo, "foo", 15.5f);
+        foo.Increment();
+        // foo goes out of scope here; be careful!
+      }
+
+  3. At member level inside a class or struct:
+
+    .. code::
+
+      struct MyStructy {
+        void DoSomething() {
+          somethings.Increment();
+        }
+        // Every instance of MyStructy will have a separate somethings counter.
+        PW_METRIC(somethings, "somethings", 0u);
+      }
+
+  You can also put a metric into a group with the macro. Metrics can belong to
+  strictly one group, otherwise a assertion will fail. Example:
+
+  .. code::
+
+    PW_METRIC_GROUP(my_group, "my_group");
+    PW_METRIC(my_group, foo, "foo", 0.2f);
+    PW_METRIC(my_group, bar, "bar", 44000u);
+    PW_METRIC(my_group, zap, "zap", 3.14f);
+
+  .. tip::
+
+    If you want a globally registered metric, see ``pw_metric/global.h``; in
+    that contexts, metrics are globally registered without the need to
+    centrally register in a single place.
+
+.. cpp:function:: PW_METRIC_GROUP(identifier, name)
+.. cpp:function:: PW_METRIC_GROUP(parent_group, identifier, name)
+.. cpp:function:: PW_METRIC_GROUP_STATIC(identifier, name)
+.. cpp:function:: PW_METRIC_GROUP_STATIC(parent_group, identifier, name)
+
+  Declares a ``pw::metric::Group`` with name name; the name is tokenized.
+  Works similar to ``PW_METRIC`` and can be used in the same contexts (global,
+  local, and member). Optionally, the group can be added to a parent group.
+
+  If the `_STATIC` variant is used, the macro declares a variable with static
+  storage. These can be used in function scopes, but not in classes.
+
+  Example:
+
+  .. code::
+
+    PW_METRIC_GROUP(my_group, "my_group");
+    PW_METRIC(my_group, foo, "foo", 0.2f);
+    PW_METRIC(my_group, bar, "bar", 44000u);
+    PW_METRIC(my_group, zap, "zap", 3.14f);
+
+.. cpp:function:: PW_METRIC_GLOBAL(identifier, name, value)
+
+  Declare a ``pw::metric::Metric`` with name name, and register it in the
+  global metrics list ``pw::metric::global_metrics``.
+
+  Example:
+
+  .. code::
+
+    #include "pw_metric/metric.h"
+    #include "pw_metric/global.h"
+
+    // No need to coordinate collection of foo and bar; they're autoregistered.
+    PW_METRIC_GLOBAL(foo, "foo", 0.2f);
+    PW_METRIC_GLOBAL(bar, "bar", 44000u);
+
+  Note that metrics defined with ``PW_METRIC_GLOBAL`` should never be added to
+  groups defined with ``PW_METRIC_GROUP_GLOBAL``. Each metric can only belong
+  to one group, and metrics defined with ``PW_METRIC_GLOBAL`` are
+  pre-registered with the global metrics list.
+
+  .. attention::
+
+    Do not create ``PW_METRIC_GLOBAL`` instances anywhere other than global
+    scope. Putting these on an instance (member context) would lead to dangling
+    pointers and misery. Metrics are never deleted or unregistered!
+
+.. cpp:function:: PW_METRIC_GROUP_GLOBAL(identifier, name, value)
+
+  Declare a ``pw::metric::Group`` with name name, and register it in the
+  global metric groups list ``pw::metric::global_groups``.
+
+  Note that metrics created with ``PW_METRIC_GLOBAL`` should never be added to
+  groups! Instead, just create a freestanding metric and register it into the
+  global group (like in the example below).
+
+  Example:
+
+  .. code::
+
+    #include "pw_metric/metric.h"
+    #include "pw_metric/global.h"
+
+    // No need to coordinate collection of this group; it's globally registered.
+    PW_METRIC_GROUP_GLOBAL(leagcy_system, "legacy_system");
+    PW_METRIC(leagcy_system, foo, "foo",0.2f);
+    PW_METRIC(leagcy_system, bar, "bar",44000u);
+
+  .. attention::
+
+    Do not create ``PW_METRIC_GROUP_GLOBAL`` instances anywhere other than
+    global scope. Putting these on an instance (member context) would lead to
+    dangling pointers and misery. Metrics are never deleted or unregistered!
+
+----------------------
+Usage & Best Practices
+----------------------
+This library makes several tradeoffs to enable low memory use per-metric, and
+one of those tradeoffs results in requiring care in constructing the metric
+trees.
+
+Use the Init() pattern for static objects with metrics
+------------------------------------------------------
+A common pattern in embedded systems is to allocate many objects globally, and
+reduce reliance on dynamic allocation (or eschew malloc entirely). This leads
+to a pattern where rich/large objects are statically constructed at global
+scope, then interacted with via tasks or threads. For example, consider a
+hypothetical global ``Uart`` object:
+
+.. code::
+
+  class Uart {
+   public:
+    Uart(span<std::byte> rx_buffer, span<std::byte> tx_buffer)
+      : rx_buffer_(rx_buffer), tx_buffer_(tx_buffer) {}
+
+    // Send/receive here...
+
+   private:
+    std::span<std::byte> rx_buffer;
+    std::span<std::byte> tx_buffer;
+  };
+
+  std::array<std::byte, 512> uart_rx_buffer;
+  std::array<std::byte, 512> uart_tx_buffer;
+  Uart uart1(uart_rx_buffer, uart_tx_buffer);
+
+Through the course of building a product, the team may want to add metrics to
+the UART to for example gain insight into which operations are triggering lots
+of data transfer. When adding metrics to the above imaginary UART object, one
+might consider the following approach:
+
+.. code::
+
+  class Uart {
+   public:
+    Uart(span<std::byte> rx_buffer,
+         span<std::byte> tx_buffer,
+         Group& parent_metrics)
+      : rx_buffer_(rx_buffer),
+        tx_buffer_(tx_buffer) {
+        // PROBLEM! parent_metrics may not be constructed if it's a reference
+        // to a static global.
+        parent_metrics.Add(tx_bytes_);
+        parent_metrics.Add(rx_bytes_);
+     }
+
+    // Send/receive here which increment tx/rx_bytes.
+
+   private:
+    std::span<std::byte> rx_buffer;
+    std::span<std::byte> tx_buffer;
+
+    PW_METRIC(tx_bytes_, "tx_bytes", 0);
+    PW_METRIC(rx_bytes_, "rx_bytes", 0);
+  };
+
+  PW_METRIC_GROUP(global_metrics, "/");
+  PW_METRIC_GROUP(global_metrics, uart1_metrics, "uart1");
+
+  std::array<std::byte, 512> uart_rx_buffer;
+  std::array<std::byte, 512> uart_tx_buffer;
+  Uart uart1(uart_rx_buffer,
+             uart_tx_buffer,
+             uart1_metrics);
+
+However, this **is incorrect**, since the ``parent_metrics`` (pointing to
+``uart1_metrics`` in this case) may not be constructed at the point of
+``uart1`` getting constructed. Thankfully in the case of ``pw_metric`` this
+will result in an assertion failure (or it will work correctly if the
+constructors are called in a favorable order), so the problem will not go
+unnoticed.  Instead, consider using the ``Init()`` pattern for static objects,
+where references to dependencies may only be stored during construction, but no
+methods on the dependencies are called.
+
+Instead, the ``Init()`` approach separates global object construction into two
+phases: The constructor where references are stored, and a ``Init()`` function
+which is called after all static constructors have run. This approach works
+correctly, even when the objects are allocated globally:
+
+.. code::
+
+  class Uart {
+   public:
+    // Note that metrics is not passed in here at all.
+    Uart(span<std::byte> rx_buffer,
+         span<std::byte> tx_buffer)
+      : rx_buffer_(rx_buffer),
+        tx_buffer_(tx_buffer) {}
+
+     // Precondition: parent_metrics is already constructed.
+     void Init(Group& parent_metrics) {
+        parent_metrics.Add(tx_bytes_);
+        parent_metrics.Add(rx_bytes_);
+     }
+
+    // Send/receive here which increment tx/rx_bytes.
+
+   private:
+    std::span<std::byte> rx_buffer;
+    std::span<std::byte> tx_buffer;
+
+    PW_METRIC(tx_bytes_, "tx_bytes", 0);
+    PW_METRIC(rx_bytes_, "rx_bytes", 0);
+  };
+
+  PW_METRIC_GROUP(root_metrics, "/");
+  PW_METRIC_GROUP(root_metrics, uart1_metrics, "uart1");
+
+  std::array<std::byte, 512> uart_rx_buffer;
+  std::array<std::byte, 512> uart_tx_buffer;
+  Uart uart1(uart_rx_buffer,
+             uart_tx_buffer);
+
+  void main() {
+    // uart1_metrics is guaranteed to be initialized by this point, so it is
+    safe to pass it to Init().
+    uart1.Init(uart1_metrics);
+  }
+
+.. attention::
+
+  Be extra careful about **static global metric registration**. Consider using
+  the ``Init()`` pattern.
+
+Metric member order matters in objects
+--------------------------------------
+The order of declaring in-class groups and metrics matters if the metrics are
+within a group declared inside the class. For example, the following class will
+work fine:
+
+.. code::
+
+  #include "pw_metric/metric.h"
+
+  class PowerSubsystem {
+   public:
+     Group& metrics() { return metrics_; }
+     const Group& metrics() const { return metrics_; }
+
+   private:
+    PW_METRIC_GROUP(metrics_, "power");  // Note metrics_ declared first.
+    PW_METRIC(metrics_, foo, "foo", 0.2f);
+    PW_METRIC(metrics_, bar, "bar", 44000u);
+  };
+
+but the following one will not since the group is constructed after the metrics
+(and will result in a compile error):
+
+.. code::
+
+  #include "pw_metric/metric.h"
+
+  class PowerSubsystem {
+   public:
+     Group& metrics() { return metrics_; }
+     const Group& metrics() const { return metrics_; }
+
+   private:
+    PW_METRIC(metrics_, foo, "foo", 0.2f);
+    PW_METRIC(metrics_, bar, "bar", 44000u);
+    PW_METRIC_GROUP(metrics_, "power");  // Error: metrics_ must be first.
+  };
+
+.. attention::
+
+  Put **groups before metrics** when declaring metrics members inside classes.
+
+Thread safety
+-------------
+``pw_metric`` has **no built-in synchronization for manipulating the tree**
+structure. Users are expected to either rely on shared global mutex when
+constructing the metric tree, or do the metric construction in a single thread
+(e.g. a boot/init thread). The same applies for destruction, though we do not
+advise destructing metrics or groups.
+
+Individual metrics have atomic ``Increment()``, ``Set()``, and the value
+accessors ``as_float()`` and ``as_int()`` which don't require separate
+synchronization, and can be used from ISRs.
+
+.. attention::
+
+  **You must synchronize access to metrics**. ``pw_metrics`` does not
+  internally synchronize access during construction. Metric Set/Increment are
+  safe.
+
+Lifecycle
+---------
+Metric objects are not designed to be destructed, and are expected to live for
+the lifetime of the program or application. If you need dynamic
+creation/destruction of metrics, ``pw_metric`` does not attempt to cover that
+use case. Instead, ``pw_metric`` covers the case of products with two execution
+phases:
+
+1. A boot phase where the metric tree is created.
+2. A run phase where metrics are collected. The tree structure is fixed.
+
+Technically, it is possible to destruct metrics provided care is taken to
+remove the given metric (or group) from the list it's contained in. However,
+there are no helper functions for this, so be careful.
+
+Below is an example that **is incorrect**. Don't do what follows!
+
+.. code::
+
+  #include "pw_metric/metric.h"
+
+  void main() {
+    PW_METRIC_GROUP(root, "/");
+    {
+      // BAD! The metrics have a different lifetime than the group.
+      PW_METRIC(root, temperature, "temperature_f", 72.3f);
+      PW_METRIC(root, humidity, "humidity_relative_percent", 33.2f);
+    }
+    // OOPS! root now has a linked list that points to the destructed
+    // "humidity" object.
+  }
+
+.. attention::
+
+  **Don't destruct metrics**. Metrics are designed to be registered /
+  structured upfront, then manipulated during a device's active phase. They do
+  not support destruction.
+
+-----------------
+Exporting metrics
+-----------------
+Collecting metrics on a device is not useful without a mechanism to export
+those metrics for analysis and debugging. ``pw_metric`` offers an optional RPC
+service library (``:metric_service_nanopb``) that enables exporting a
+user-supplied set of on-device metrics via RPC. This facility is intended to
+function from the early stages of device bringup through production in the
+field.
+
+The metrics are fetched by calling the ``MetricService.Get`` RPC method, which
+streams all registered metrics to the caller in batches (server streaming RPC).
+Batching the returned metrics avoids requiring a large buffer or large RPC MTU.
+
+The returned metric objects have flattened paths to the root. For example, the
+returned metrics (post detokenization and jsonified) might look something like:
+
+.. code:: none
+
+  {
+    "/i2c1/failed_txns": 17,
+    "/i2c1/total_txns": 2013,
+    "/i2c1/gyro/resets": 24,
+    "/i2c1/gyro/hangs": 1,
+    "/spi1/thermocouple/reads": 242,
+    "/spi1/thermocouple/temp_celcius": 34.52,
+  }
+
+Note that there is no nesting of the groups; the nesting is implied from the
+path.
+
+RPC service setup
+-----------------
+To expose a ``MetricService`` in your application, do the following:
+
+1. Define metrics around the system, and put them in a group or list of
+   metrics. Easy choices include for example the ``global_groups`` and
+   ``global_metrics`` variables; or creat your own.
+2. Create an instance of ``pw::metric::MetricService``.
+3. Register the service with your RPC server.
+
+For example:
+
+.. code::
+
+   #include "pw_rpc/server.h"
+   #include "pw_metric/metric.h"
+   #include "pw_metric/global.h"
+   #include "pw_metric/metric_service_nanopb.h"
+
+   // Note: You must customize the RPC server setup; see pw_rpc.
+   Channel channels[] = {
+    Channel::Create<1>(&uart_output),
+   };
+   Server server(channels);
+
+   // Metric service instance, pointing to the global metric objects.
+   // This could also point to custom per-product or application objects.
+   pw::metric::MetricService metric_service(
+       pw::metric::global_metrics,
+       pw::metric::global_groups);
+
+   void RegisterServices() {
+     server.RegisterService(metric_service);
+     // Register other services here.
+   }
+
+   void main() {
+     // ... system initialization ...
+
+     RegisterServices();
+
+     // ... start your applcation ...
+   }
+
+.. attention::
+
+  Take care when exporting metrics. Ensure **appropriate access control** is in
+  place. In some cases it may make sense to entirely disable metrics export for
+  production builds. Although reading metrics via RPC won't influence the
+  device, in some cases the metrics could expose sensitive information if
+  product owners are not careful.
+
+.. attention::
+
+  **MetricService::Get is a synchronous RPC method**
+
+  Calls to is ``MetricService::Get`` are blocking and will send all metrics
+  immediately, even though it is a server-streaming RPC. This will work fine if
+  the device doesn't have too many metics, or doesn't have concurrent RPCs like
+  logging, but could be a problem in some cases.
+
+  We plan to offer an async version where the application is responsible for
+  pumping the metrics into the streaming response. This gives flow control to
+  the application.
+
+-----------
+Size report
+-----------
+The below size report shows the cost in code and memory for a few examples of
+metrics. This does not include the RPC service.
+
+.. include:: metric_size_report
+
+.. attention::
+
+  At time of writing, **the above sizes show an unexpectedly large flash
+  impact**. We are investigating why GCC is inserting large global static
+  constructors per group, when all the logic should be reused across objects.
+
+----------------
+Design tradeoffs
+----------------
+There are many possible approaches to metrics collection and aggregation. We've
+chosen some points on the tradeoff curve:
+
+- **Atomic-sized metrics** - Using simple metric objects with just uint32/float
+  enables atomic operations. While it might be nice to support larger types, it
+  is more useful to have safe metrics increment from interrupt subroutines.
+
+- **No aggregate metrics (yet)** - Aggregate metrics (e.g. average, max, min,
+  histograms) are not supported, and must be built on top of the simple base
+  metrics. By taking this route, we can considerably simplify the core metrics
+  system and have aggregation logic in separate modules. Those modules can then
+  feed into the metrics system - for example by creating multiple metrics for a
+  single underlying metric. For example: "foo", "foo_max", "foo_min" and so on.
+
+  The other problem with automatic aggregation is that what period the
+  aggregation happens over is often important, and it can be hard to design
+  this cleanly into the API. Instead, this responsibility is pushed to the user
+  who must take more care.
+
+  Note that we will add helpers for aggregated metrics.
+
+- **No virtual metrics** - An alternate approach to the concrete Metric class
+  in the current module is to have a virtual interface for metrics, and then
+  allow those metrics to have their own storage. This is attractive but can
+  lead to many vtables and excess memory use in simple one-metric use cases.
+
+- **Linked list registration** - Using linked lists for registration is a
+  tradeoff, accepting some memory overhead in exchange for flexibility. Other
+  alternatives include a global table of metrics, which has the disadvantage of
+  requiring centralizing the metrics -- an impossibility for middleware like
+  Pigweed.
+
+- **Synchronization** - The only synchronization guarantee provided by
+  pw_metric is that increment and set are atomic. Other than that, users are on
+  their own to synchonize metric collection and updating.
+
+- **No fast metric lookup** - The current design does not make it fast to
+  lookup a metric at runtime; instead, one must run a linear search of the tree
+  to find the matching metric. In most non-dynamic use cases, this is fine in
+  practice, and saves having a more involved hash table. Metric updates will be
+  through direct member or variable accesses.
+
+- **Relying on C++ static initialization** - In short, the convenience
+  outweighs the cost and risk. Without static initializers, it would be
+  impossible to automatically collect the metrics without post-processing the
+  C++ code to find the metrics; a huge and debatably worthwhile approach. We
+  have carefully analyzed the static initializer behaviour of Pigweed's
+  IntrusiveList and are confident it is correct.
+
+- **Both local & global support** - Potentially just one approach (the local or
+  global one) could be offered, making the module less complex. However, we
+  feel the additional complexity is worthwhile since there are legimitate use
+  cases for both e.g. ``PW_METRIC`` and ``PW_METRIC_GLOBAL``. We'd prefer to
+  have a well-tested upstream solution for these use cases rather than have
+  customers re-implement one of these.
+
+----------------
+Roadmap & Status
+----------------
+- **String metric names** - ``pw_metric`` stores metric names as tokens. On one
+  hand, this is great for production where having a compact binary is often a
+  requirement to fit the application in the given part. However, in early
+  development before flash is a constraint, string names are more convenient to
+  work with since there is no need for host-side detokenization. We plan to add
+  optional support for using supporting strings.
+
+- **Aggregate metrics** - We plan to add support for aggregate metrics on top
+  of the simple metric mechanism, either as another module or as additional
+  functionality inside this one. Likely examples include min/max,
+
+- **Selectively enable or disable metrics** - Currently the metrics are always
+  enabled once included. In practice this is not ideal since many times only a
+  few metrics are wanted in production, but having to strip all the metrics
+  code is error prone. Instead, we will add support for controlling what
+  metrics are enabled or disabled at compile time. This may rely on of C++20's
+  support for zero-sized members to fully remove the cost.
+
+- **Async RCPC** - The current RPC service exports the metrics by streaming
+  them to the client in batches. However, the current solution streams all the
+  metrics to completion; this may block the RPC thread. In the future we will
+  have an async solution where the user is in control of flow priority.
+
+- **Timer integration** - We would like to add a stopwatch type mechanism to
+  time multiple in-flight events.
+
+- **C support** - In practice it's often useful or necessary to instrument
+  C-only code. While it will be impossible to support the global registration
+  system that the C++ version supports, we will figure out a solution to make
+  instrumenting C code relatively smooth.
+
+- **Global counter** - We may add a global metric counter to help detect cases
+  where post-initialization metrics manipulations are done.
+
+- **Proto structure** - It may be possible to directly map metrics to a custom
+  proto structure, where instead of a name or token field, a tag field is
+  provided. This could result in elegant export to an easily machine parsable
+  and compact representation on the host. We may investigate this in the
+  future.
+
+- **Safer data structures** - At a cost of 4B per metric and 4B per group, it
+  may be possible to make metric structure instantiation safe even in static
+  constructors, and also make it safe to remove metrics dynamically. We will
+  consider whether this tradeoff is the right one, since a 4B cost per metric
+  is substantial on projects with many metrics.
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_metric/global.cc
similarity index 76%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to pw_metric/global.cc
index 1670b7d..822a1d7 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_metric/global.cc
@@ -12,6 +12,11 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-#include "pw_boot_armv7m/boot.h"
+#include "pw_metric/global.h"
 
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+namespace pw::metric {
+
+constinit IntrusiveList<Group> global_groups;
+constinit IntrusiveList<Metric> global_metrics;
+
+}  // namespace pw::metric
diff --git a/pw_metric/global_test.cc b/pw_metric/global_test.cc
new file mode 100644
index 0000000..ab0aab5
--- /dev/null
+++ b/pw_metric/global_test.cc
@@ -0,0 +1,72 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_metric/global.h"
+
+#include "gtest/gtest.h"
+#include "pw_log/log.h"
+#include "pw_metric/metric.h"
+
+namespace pw {
+namespace metric {
+
+// Count elements in an iterable.
+template <typename T>
+int Size(T& container) {
+  int num_elements = 0;
+  for (auto& element : container) {
+    PW_UNUSED(element);
+    num_elements++;
+  }
+  return num_elements;
+}
+
+// Create two global metrics; make sure they show up.
+PW_METRIC_GLOBAL(stat_x, "stat_x", 123u);
+PW_METRIC_GLOBAL(stat_y, "stat_y", 123u);
+
+TEST(Global, Metrics) {
+  Metric::Dump(global_metrics);
+  EXPECT_EQ(Size(global_metrics), 2);
+}
+
+// Create three global metric groups; make sure they show up.
+// Also, register some sub-metrics in the global groups.
+PW_METRIC_GROUP_GLOBAL(gyro_metrics, "gyro");
+PW_METRIC(gyro_metrics, max_velocity, "max_velocity", 5.0f);
+
+PW_METRIC_GROUP_GLOBAL(comms_metrics, "comms");
+PW_METRIC(comms_metrics, packet_drops, "packet_drops", 10u);
+PW_METRIC(comms_metrics, bandwidth, "bandwidth", 230.3f);
+
+PW_METRIC_GROUP_GLOBAL(power_metrics, "power");
+PW_METRIC(power_metrics, voltage, "voltage", 3.33f);
+PW_METRIC(power_metrics, battery_cycles, "battery_cycles", 550u);
+PW_METRIC(power_metrics, current_ma, "current_ma", 35.2f);
+
+TEST(Global, Groups) {
+  Group::Dump(global_groups);
+  EXPECT_EQ(Size(global_groups), 4);
+
+  EXPECT_EQ(Size(gyro_metrics.metrics()), 1);
+  EXPECT_EQ(Size(comms_metrics.metrics()), 2);
+  EXPECT_EQ(Size(power_metrics.metrics()), 3);
+}
+
+}  // namespace metric
+}  // namespace pw
+
+// this is a compilation test to make sure metrics can be defined outside of
+// ::pw::metric
+PW_METRIC_GROUP_GLOBAL(global_group, "global group");
diff --git a/pw_metric/metric.cc b/pw_metric/metric.cc
new file mode 100644
index 0000000..5e99329
--- /dev/null
+++ b/pw_metric/metric.cc
@@ -0,0 +1,127 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_metric/metric.h"
+
+#include <array>
+#include <span>
+
+#include "pw_log/log.h"
+#include "pw_tokenizer/base64.h"
+
+namespace pw::metric {
+namespace {
+
+template <typename T>
+std::span<const std::byte> AsSpan(const T& t) {
+  return std::span<const std::byte>(reinterpret_cast<const std::byte*>(&t),
+                                    sizeof(t));
+}
+
+// A convenience class to encode a token as base64 while managing the storage.
+// TODO(keir): Consider putting this into upstream pw_tokenizer.
+struct Base64EncodedToken {
+  Base64EncodedToken(Token token) {
+    int encoded_size = tokenizer::PrefixedBase64Encode(AsSpan(token), data);
+    data[encoded_size] = 0;
+  }
+
+  const char* value() { return data.data(); }
+  std::array<char, 16> data;
+};
+
+const char* Indent(int level) {
+  static const char* kWhitespace8 = "        ";
+  level = std::min(level, 4);
+  return kWhitespace8 + 8 - 2 * level;
+}
+
+}  // namespace
+
+// Enable easier registration when used as a member.
+Metric::Metric(Token name, float value, IntrusiveList<Metric>& metrics)
+    : Metric(name, value) {
+  metrics.push_front(*this);
+}
+Metric::Metric(Token name, uint32_t value, IntrusiveList<Metric>& metrics)
+    : Metric(name, value) {
+  metrics.push_front(*this);
+}
+
+float Metric::as_float() const {
+  PW_DCHECK(is_float());
+  return float_;
+}
+
+uint32_t Metric::as_int() const {
+  PW_DCHECK(is_int());
+  return uint_;
+}
+
+void Metric::Increment(uint32_t amount) {
+  PW_DCHECK(is_int());
+  uint_ += amount;
+}
+
+void Metric::SetInt(uint32_t value) {
+  PW_DCHECK(is_int());
+  uint_ = value;
+}
+
+void Metric::SetFloat(float value) {
+  PW_DCHECK(is_float());
+  float_ = value;
+}
+
+void Metric::Dump(int level) {
+  Base64EncodedToken encoded_name(name());
+  const char* indent = Indent(level);
+  if (is_float()) {
+    PW_LOG_INFO("%s \"%s\": %f,", indent, encoded_name.value(), as_float());
+  } else {
+    PW_LOG_INFO("%s \"%s\": %u,",
+                indent,
+                encoded_name.value(),
+                static_cast<unsigned int>(as_int()));
+  }
+}
+
+void Metric::Dump(IntrusiveList<Metric>& metrics, int level) {
+  for (auto& m : metrics) {
+    m.Dump(level);
+  }
+}
+
+Group::Group(Token name) : name_(name) {}
+
+Group::Group(Token name, IntrusiveList<Group>& groups) : name_(name) {
+  groups.push_front(*this);
+}
+
+void Group::Dump(int level) {
+  Base64EncodedToken encoded_name(name());
+  const char* indent = Indent(level);
+  PW_LOG_INFO("%s \"%s\": {", indent, encoded_name.value());
+  Group::Dump(children(), level + 1);
+  Metric::Dump(metrics(), level + 1);
+  PW_LOG_INFO("%s }", indent);
+}
+
+void Group::Dump(IntrusiveList<Group>& groups, int level) {
+  for (auto& group : groups) {
+    group.Dump(level);
+  }
+}
+
+}  // namespace pw::metric
diff --git a/pw_metric/metric_service_nanopb.cc b/pw_metric/metric_service_nanopb.cc
new file mode 100644
index 0000000..49130a3
--- /dev/null
+++ b/pw_metric/metric_service_nanopb.cc
@@ -0,0 +1,149 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_metric/metric_service_nanopb.h"
+
+#include <cstring>
+#include <span>
+
+#include "pw_assert/assert.h"
+#include "pw_containers/vector.h"
+#include "pw_metric/metric.h"
+#include "pw_preprocessor/util.h"
+
+namespace pw::metric {
+namespace {
+
+class MetricWriter {
+ public:
+  MetricWriter(rpc::ServerWriter<pw_metric_MetricResponse>& response_writer)
+      : response_(pw_metric_MetricResponse_init_zero),
+        response_writer_(response_writer) {}
+
+  // TODO(keir): Figure out a pw_rpc mechanism to fill a streaming packet based
+  // on transport MTU, rather than having this as a static knob. For example,
+  // some transports may be able to fit 30 metrics; others, only 5.
+  void Write(const Metric& metric, const Vector<Token>& path) {
+    // Nanopb doesn't offer an easy way to do bounds checking, so use span's
+    // type deduction magic to figure out the max size.
+    std::span<pw_metric_Metric> metrics(response_.metrics);
+    PW_CHECK_INT_LT(response_.metrics_count, metrics.size());
+
+    // Grab the next available Metric slot to write to in the response.
+    pw_metric_Metric& proto_metric = response_.metrics[response_.metrics_count];
+
+    // Copy the path.
+    std::span<Token> proto_path(proto_metric.token_path);
+    PW_CHECK_INT_LE(path.size(), proto_path.size());
+    std::copy(path.begin(), path.end(), proto_path.begin());
+    proto_metric.token_path_count = path.size();
+
+    // Copy the metric value.
+    if (metric.is_float()) {
+      proto_metric.value.as_float = metric.as_float();
+      proto_metric.which_value = pw_metric_Metric_as_float_tag;
+    } else {
+      proto_metric.value.as_int = metric.as_int();
+      proto_metric.which_value = pw_metric_Metric_as_int_tag;
+    }
+
+    // Move write head to the next slot.
+    response_.metrics_count++;
+
+    // If the metric response object is full, send the response and reset.
+    // TODO(keir): Support runtime batch sizes < max proto size.
+    if (response_.metrics_count == metrics.size()) {
+      Flush();
+    }
+  }
+
+  void Flush() {
+    if (response_.metrics_count) {
+      response_writer_.Write(response_);
+      response_ = pw_metric_MetricResponse_init_zero;
+    }
+  }
+
+ private:
+  pw_metric_MetricResponse response_;
+  // This RPC stream writer handle must be valid for the metric writer lifetime.
+  rpc::ServerWriter<pw_metric_MetricResponse>& response_writer_;
+};
+
+// Walk a metric tree recursively; passing metrics with their path (names) to a
+// metric writer which can consume them.
+//
+// TODO(keir): Generalize this to support a generic visitor.
+class MetricWalker {
+ public:
+  MetricWalker(MetricWriter& writer) : writer_(writer) {}
+
+  void Walk(const IntrusiveList<Metric>& metrics) {
+    for (const auto& m : metrics) {
+      ScopedName(m.name(), *this);
+      writer_.Write(m, path_);
+    }
+  }
+
+  void Walk(const IntrusiveList<Group>& groups) {
+    for (const auto& g : groups) {
+      Walk(g);
+    }
+  }
+
+  void Walk(const Group& group) {
+    ScopedName(group.name(), *this);
+    Walk(group.children());
+    Walk(group.metrics());
+  }
+
+ private:
+  // Exists to safely push/pop parent groups from the explicit stack.
+  struct ScopedName {
+    ScopedName(Token name, MetricWalker& rhs) : walker(rhs) {
+      PW_CHECK_INT_LT(walker.path_.size(),
+                      walker.path_.capacity(),
+                      "Metrics are too deep; bump path_ capacity");
+      walker.path_.push_back(name);
+    }
+    ~ScopedName() { walker.path_.pop_back(); }
+    MetricWalker& walker;
+  };
+
+  Vector<Token, 4 /* max depth */> path_;
+  MetricWriter& writer_;
+};
+
+}  // namespace
+
+void MetricService::Get(ServerContext&,
+                        const pw_metric_MetricRequest& /* request */,
+                        ServerWriter<pw_metric_MetricResponse>& response) {
+  // For now, ignore the request and just stream all the metrics back.
+  MetricWriter writer(response);
+  MetricWalker walker(writer);
+
+  // This will stream all the metrics in the span of this Get() method call.
+  // This will have the effect of blocking the RPC thread until all the metrics
+  // are sent. That is likely to cause problems if there are many metrics, or
+  // if other RPCs are higher priority and should complete first.
+  //
+  // In the future, this should be replaced with an optional async solution
+  // that puts the application in control of when the response batches are sent.
+  walker.Walk(metrics_);
+  walker.Walk(groups_);
+  writer.Flush();
+}
+
+}  // namespace pw::metric
diff --git a/pw_metric/metric_service_nanopb_test.cc b/pw_metric/metric_service_nanopb_test.cc
new file mode 100644
index 0000000..26d6ebf
--- /dev/null
+++ b/pw_metric/metric_service_nanopb_test.cc
@@ -0,0 +1,135 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_metric/metric_service_nanopb.h"
+
+#include "gtest/gtest.h"
+#include "pw_log/log.h"
+#include "pw_rpc/nanopb_test_method_context.h"
+
+namespace pw::metric {
+namespace {
+
+#define MetricMethodContext      \
+  PW_NANOPB_TEST_METHOD_CONTEXT( \
+      MetricService, Get, 4, sizeof(pw_metric_MetricResponse))
+
+TEST(MetricService, EmptyGroupAndNoMetrics) {
+  // Empty root group.
+  PW_METRIC_GROUP(root, "/");
+
+  // Run the RPC and ensure it completes.
+  MetricMethodContext context(root.metrics(), root.children());
+  context.call({});
+  EXPECT_TRUE(context.done());
+  EXPECT_EQ(Status::Ok(), context.status());
+
+  // No metrics should be in the response.
+  EXPECT_EQ(0u, context.responses().size());
+}
+
+TEST(MetricService, FlatMetricsNoGroupsOneResponseOnly) {
+  // Set up a one-group suite of metrics.
+  PW_METRIC_GROUP(root, "/");
+  PW_METRIC(root, a, "a", 1.0);
+  PW_METRIC(root, b, "b", 1.0);
+  PW_METRIC(root, c, "c", 1.0);
+  PW_METRIC(root, d, "d", 1.0);
+  PW_METRIC(root, e, "e", 1.0);
+
+  // Run the RPC and ensure it completes.
+  MetricMethodContext context(root.metrics(), root.children());
+  context.call({});
+  EXPECT_TRUE(context.done());
+  EXPECT_EQ(Status::Ok(), context.status());
+
+  // All of the responses should have fit in one proto.
+  EXPECT_EQ(1u, context.responses().size());
+  EXPECT_EQ(5, context.responses()[0].metrics_count);
+}
+
+TEST(MetricService, NestedGroupsButOnlyOneBatch) {
+  // Set up a nested group of metrics that will fit in the default batch (10).
+  PW_METRIC_GROUP(root, "/");
+  PW_METRIC(root, a, "a", 1.0);
+  PW_METRIC(root, b, "b", 1.0);
+  PW_METRIC(root, c, "c", 1.0);
+
+  PW_METRIC_GROUP(inner, "inner");
+  PW_METRIC(inner, x, "x", 1.0);
+  PW_METRIC(inner, y, "y", 1.0);
+  PW_METRIC(inner, z, "z", 1.0);
+
+  root.Add(inner);
+
+  // Run the RPC and ensure it completes.
+  MetricMethodContext context(root.metrics(), root.children());
+  context.call({});
+  EXPECT_TRUE(context.done());
+  EXPECT_EQ(Status::Ok(), context.status());
+
+  // All of the responses should fit in one proto.
+  EXPECT_EQ(1u, context.responses().size());
+  EXPECT_EQ(6, context.responses()[0].metrics_count);
+}
+
+TEST(MetricService, NestedGroupsWithBatches) {
+  // Set up a nested group of metrics that will not fit in a single batch.
+  PW_METRIC_GROUP(root, "/");
+  PW_METRIC(root, a, "a", 1u);
+  PW_METRIC(root, d, "d", 2u);
+  PW_METRIC(root, f, "f", 3u);
+
+  PW_METRIC_GROUP(inner_1, "inner1");
+  PW_METRIC(inner_1, x, "x", 4u);
+  PW_METRIC(inner_1, y, "y", 5u);
+  PW_METRIC(inner_1, z, "z", 6u);
+
+  PW_METRIC_GROUP(inner_2, "inner2");
+  PW_METRIC(inner_2, p, "p", 7u);
+  PW_METRIC(inner_2, q, "q", 8u);
+  PW_METRIC(inner_2, r, "r", 9u);
+  PW_METRIC(inner_2, s, "s", 10u);  // Note: Max # per response is 10.
+  PW_METRIC(inner_2, t, "s", 11u);
+  PW_METRIC(inner_2, u, "s", 12u);
+
+  root.Add(inner_1);
+  root.Add(inner_2);
+
+  // Run the RPC and ensure it completes.
+  MetricMethodContext context(root.metrics(), root.children());
+  context.call({});
+  EXPECT_TRUE(context.done());
+  EXPECT_EQ(Status::Ok(), context.status());
+
+  // The response had to be split into two parts; check that they have the
+  // appropriate sizes.
+  EXPECT_EQ(2u, context.responses().size());
+  EXPECT_EQ(10, context.responses()[0].metrics_count);
+  EXPECT_EQ(2, context.responses()[1].metrics_count);
+
+  // The metrics are the numbers 1..12; sum them and compare.
+  uint32_t metric_sum = 0;
+  for (const auto& response : context.responses()) {
+    for (unsigned i = 0; i < response.metrics_count; ++i) {
+      metric_sum += response.metrics[i].value.as_int;
+    }
+  }
+  EXPECT_EQ(78u, metric_sum);
+
+  // TODO(keir): Properly check all the fields.
+}
+
+}  // namespace
+}  // namespace pw::metric
diff --git a/pw_metric/metric_test.cc b/pw_metric/metric_test.cc
new file mode 100644
index 0000000..0286485
--- /dev/null
+++ b/pw_metric/metric_test.cc
@@ -0,0 +1,245 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_metric/metric.h"
+
+#include "gtest/gtest.h"
+#include "pw_log/log.h"
+
+namespace pw::metric {
+
+TEST(Metric, FloatFromObject) {
+  // Note leading bit is 1; it is stripped from the name to store the type.
+  Token token = 0xf1223344;
+
+  TypedMetric<float> m(token, 1.5f);
+  EXPECT_EQ(m.name(), 0x71223344u);
+  EXPECT_TRUE(m.is_float());
+  EXPECT_FALSE(m.is_int());
+  EXPECT_EQ(m.value(), 1.5f);
+
+  m.Set(55.1f);
+  EXPECT_EQ(m.value(), 55.1f);
+
+  // No increment operation for float.
+}
+
+TEST(Metric, IntFromObject) {
+  // Note leading bit is 1; it is stripped from the name to store the type.
+  Token token = 0xf1223344;
+
+  TypedMetric<uint32_t> m(token, static_cast<uint32_t>(31337u));
+  EXPECT_EQ(m.name(), 0x71223344u);
+  EXPECT_TRUE(m.is_int());
+  EXPECT_FALSE(m.is_float());
+  EXPECT_EQ(m.value(), 31337u);
+
+  m.Set(414u);
+  EXPECT_EQ(m.value(), 414u);
+
+  m.Increment();
+  EXPECT_EQ(m.value(), 415u);
+
+  m.Increment(11u);
+  EXPECT_EQ(m.value(), 426u);
+}
+
+TEST(m, IntFromMacroLocal) {
+  PW_METRIC(m, "some_metric", 14u);
+  EXPECT_TRUE(m.is_int());
+  EXPECT_EQ(m.value(), 14u);
+}
+
+TEST(Metric, FloatFromMacroLocal) {
+  PW_METRIC(m, "some_metric", 3.14f);
+  EXPECT_TRUE(m.is_float());
+  EXPECT_EQ(m.value(), 3.14f);
+}
+
+TEST(Metric, GroupMacroInFunctionContext) {
+  PW_METRIC_GROUP(group, "fancy_subsystem");
+  PW_METRIC(group, x, "x", 5555u);
+  PW_METRIC(group, y, "y", 6.0f);
+
+  // These calls are needed to satisfy GCC, otherwise GCC warns about an unused
+  // variable (even though it is used and passed to the group, which adds it):
+  //
+  //   metric_test.cc:72:20: error: variable 'x' set but not used
+  //   [-Werror=unused-but-set-variable]
+  //
+  x.Increment(10);
+  y.Set(5.0f);
+
+  int num_metrics = 0;
+  for (auto& m : group.metrics()) {
+    PW_UNUSED(m);
+    num_metrics++;
+  }
+  group.Dump();
+  EXPECT_EQ(num_metrics, 2);
+}
+
+// The below are compile tests to ensure the macros work at global scope.
+
+// Case 1: No group specified.
+PW_METRIC(global_x, "global_x", 5555u);
+PW_METRIC(global_y, "global_y", 6.0f);
+
+// Case 2: Group specified.
+PW_METRIC_GROUP(global_group, "a_global_group");
+PW_METRIC(global_group, global_z, "global_x", 5555u);
+PW_METRIC(global_group, global_w, "global_y", 6.0f);
+
+// A fake object to illustrate the API and show nesting metrics.
+// This also tests creating metrics as members inside a class.
+class I2cBus {
+ public:
+  void Transaction() {
+    // An entirely unconvincing fake I2C transaction implementation.
+    transactions_.Increment();
+    bytes_sent_.Increment(5);
+  }
+
+  Group& stats() { return metrics_; }
+
+ private:
+  // Test a group with metrics in it, as a class member.
+  // Note that in many cases, the group would be passed in externally instead.
+  PW_METRIC_GROUP(metrics_, "i2c");
+  PW_METRIC(metrics_, bus_errors_, "bus_errors", 0u);
+  PW_METRIC(metrics_, transactions_, "transactions", 0u);
+  PW_METRIC(metrics_, bytes_sent_, "bytes_sent", 0u);
+
+  // Test metrics without a group, as a class member.
+  PW_METRIC(a, "a", 0u);
+  PW_METRIC(b, "b", 10.0f);
+  PW_METRIC(c, "c", 525u);
+};
+
+class Gyro {
+ public:
+  Gyro(I2cBus& i2c_bus, Group& parent_metrics) : i2c_bus_(i2c_bus) {
+    // Make the gyro a child of the I2C bus. Note that the other arrangement,
+    // where the i2c bus is a child of the gyro, doesn't work if there are
+    // multiple objects on the I2C bus due to the intrusive list mechanism.
+    parent_metrics.Add(metrics_);
+  }
+
+  void Init() {
+    i2c_bus_.Transaction();
+    initialized_.Increment();
+  }
+
+  void ReadAngularVelocity() {
+    // Pretend to be doing some transactions and pulling angular velocity.
+    // Pretend this gyro is inefficient and requires multiple transactions.
+    i2c_bus_.Transaction();
+    i2c_bus_.Transaction();
+    i2c_bus_.Transaction();
+    num_samples_.Increment();
+  }
+
+  Group& stats() { return metrics_; }
+
+ private:
+  I2cBus& i2c_bus_;
+
+  // In this case, "gyro" groups the relevant metrics, but it is possible to
+  // have freestanding metrics directly without a group; however, those
+  // free-standing metrics must be added to a group or list supplied elsewhere
+  // for collection.
+  PW_METRIC_GROUP(metrics_, "gyro");
+  PW_METRIC(metrics_, num_samples_, "num_samples", 1u);
+  PW_METRIC(metrics_, init_time_us_, "init_time_us", 1.0f);
+  PW_METRIC(metrics_, initialized_, "initialized", 0u);
+};
+
+// The below test produces output like:
+//
+//   "$6doqFw==": {
+//     "$05OCZw==": {
+//       "$VpPfzg==": 1,
+//       "$LGPMBQ==": 1.000000,
+//       "$+iJvUg==": 5,
+//     }
+//     "$9hPNxw==": 65,
+//     "$oK7HmA==": 13,
+//     "$FCM4qQ==": 0,
+//   }
+//
+// Note the metric names are tokenized with base64. Decoding requires using the
+// Pigweed detokenizer. With a detokenizing-enabled logger, you would get:
+//
+//   "i2c": {
+//     "gyro": {
+//       "num_sampleses": 1,
+//       "init_time_us": 1.000000,
+//       "initialized": 5,
+//     }
+//     "bus_errors": 65,
+//     "transactions": 13,
+//     "bytes_sent": 0,
+//   }
+//
+TEST(Metric, InlineConstructionWithGroups) {
+  I2cBus i2c_bus;
+  Gyro gyro(i2c_bus, i2c_bus.stats());
+
+  gyro.Init();
+  gyro.ReadAngularVelocity();
+  gyro.ReadAngularVelocity();
+  gyro.ReadAngularVelocity();
+  gyro.ReadAngularVelocity();
+
+  // This "test" doesn't really test anything, and more illustrates how to the
+  // metrics could be instantiated in an object tree.
+  //
+  // Unfortunatlely, testing dump is difficult since we don't have log
+  // redirection for tests.
+  i2c_bus.stats().Dump();
+}
+
+// PW_METRIC_STATIC doesn't support class scopes, since a definition must be
+// provided outside of the class body.
+// TODO(paulmathieu): add support for class scopes and enable this test
+#if 0
+class MetricTest: public ::testing::Test {
+  public:
+    void Increment() {
+      metric_.Increment();
+    }
+
+  private:
+    PW_METRIC_STATIC(metric_, "metric", 0u);
+};
+
+TEST_F(MetricTest, StaticWithinAClass) {
+  Increment();
+}
+#endif
+
+Metric* StaticMetricIncrement() {
+  PW_METRIC_STATIC(metric, "metric", 0u);
+  metric.Increment();
+  return &metric;
+}
+
+TEST(Metric, StaticWithinAFunction) {
+  Metric* metric = StaticMetricIncrement();
+  EXPECT_EQ(metric->as_int(), 1u);
+  StaticMetricIncrement();
+  EXPECT_EQ(metric->as_int(), 2u);
+}
+
+}  // namespace pw::metric
diff --git a/pw_metric/public/pw_metric/global.h b/pw_metric/public/pw_metric/global.h
new file mode 100644
index 0000000..f3b8440
--- /dev/null
+++ b/pw_metric/public/pw_metric/global.h
@@ -0,0 +1,45 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_containers/intrusive_list.h"
+#include "pw_metric/metric.h"
+#include "pw_tokenizer/tokenize.h"
+
+namespace pw::metric {
+
+// TODO(keir): Add protection to IntrusiveList to detect the unitialized case,
+// which can happen with the global static constructors in the -O0 case.
+extern IntrusiveList<Group> global_groups;
+extern IntrusiveList<Metric> global_metrics;
+
+// Define a metric that is registered in pw::metric::global_metrics.
+//
+// This is useful for cases where uncoordinated instrumentation with metrics is
+// needed; for example, when instrumenting legacy code where plumbing a metric
+// group around by dependency injection is infeasible.
+#define PW_METRIC_GLOBAL(variable_name, metric_name, init)                    \
+  static constexpr uint32_t variable_name##_token =                           \
+      PW_TOKENIZE_STRING_DOMAIN("metrics", #metric_name);                     \
+  ::pw::metric::TypedMetric<_PW_METRIC_FLOAT_OR_UINT32(init)> variable_name = \
+      {variable_name##_token, init, ::pw::metric::global_metrics}
+
+// Define a group that is registered in pw::metric::global_groups.
+#define PW_METRIC_GROUP_GLOBAL(variable_name, group_name)     \
+  static constexpr uint32_t variable_name##_token =           \
+      PW_TOKENIZE_STRING_DOMAIN("metrics", #group_name);      \
+  ::pw::metric::Group variable_name = {variable_name##_token, \
+                                       ::pw::metric::global_groups}
+
+}  // namespace pw::metric
diff --git a/pw_metric/public/pw_metric/metric.h b/pw_metric/public/pw_metric/metric.h
new file mode 100644
index 0000000..3b8d79c
--- /dev/null
+++ b/pw_metric/public/pw_metric/metric.h
@@ -0,0 +1,308 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <algorithm>
+#include <initializer_list>
+#include <limits>
+
+#include "pw_assert/assert.h"
+#include "pw_containers/intrusive_list.h"
+#include "pw_preprocessor/arguments.h"
+#include "pw_tokenizer/tokenize.h"
+
+namespace pw::metric {
+
+// Currently, this is for tokens, but later may be a char* when non-tokenized
+// metric names are supported.
+using tokenizer::Token;
+
+// An individual metric. There are only two supported types: uint32_t and
+// float. More complicated compound metrics can be built on these primitives.
+// See the documentation for a discussion for this design was selected.
+//
+// Size: 12 bytes / 96 bits - next, name, value.
+//
+// TODO(keir): Implement Set() and Increment() using atomics.
+// TODO(keir): Consider an alternative structure where metrics have pointers to
+// parent groups, which would enable (1) safe destruction and (2) safe static
+// initialization, but at the cost of an additional 4 bytes per metric and 4
+// bytes per group..
+class Metric : public IntrusiveList<Metric>::Item {
+ public:
+  Token name() const { return name_and_type_ & kTokenMask; }
+
+  bool is_float() const { return (name_and_type_ & kTypeMask) == kTypeFloat; }
+  bool is_int() const { return (name_and_type_ & kTypeMask) == kTypeInt; }
+
+  float as_float() const;
+  uint32_t as_int() const;
+
+  // Dump a metric or metrics to logs. Level determines the indentation
+  // indent_level up to a maximum of 4. Example output:
+  //
+  //   "$FCM4qQ==": 0,
+  //
+  // Note the base64-encoded token name. Detokenization tools are necessary to
+  // convert this to human-readable form.
+  void Dump(int indent_level = 0);
+  static void Dump(IntrusiveList<Metric>& metrics, int indent_level = 0);
+
+  // Disallow copy and assign.
+  Metric(Metric const&) = delete;
+  void operator=(const Metric&) = delete;
+
+ protected:
+  Metric(Token name, float value)
+      : name_and_type_((name & kTokenMask) | kTypeFloat), float_(value) {}
+
+  Metric(Token name, uint32_t value)
+      : name_and_type_((name & kTokenMask) | kTypeInt), uint_(value) {}
+
+  Metric(Token name, float value, IntrusiveList<Metric>& metrics);
+  Metric(Token name, uint32_t value, IntrusiveList<Metric>& metrics);
+
+  // Hide mutation methods, and only offer write access through the specialized
+  // TypedMetric below. This makes it impossible to call metric.Increment() on
+  // a float metric at compile time.
+  void Increment(uint32_t amount = 1);
+
+  void SetInt(uint32_t value);
+
+  void SetFloat(float value);
+
+ private:
+  // The name of this metric as a token; from PW_TOKENIZE_STRING("my_metric").
+  // Last bit of the token is used to store int or float; 0 == int, 1 == float.
+  Token name_and_type_;
+
+  union {
+    float float_;
+    uint32_t uint_;
+  };
+
+  enum {
+    kTokenMask = 0x7fff'ffff,
+    kTypeMask = 0x8000'0000,
+    kTypeFloat = 0x8000'0000,
+    kTypeInt = 0x0,
+  };
+};
+
+// TypedMetric provides a type-safe wrapper the runtime-typed Metric object.
+// Note: Definition omitted to prevent accidental instantiation.
+// TODO(keir): Provide a more precise error message via static assert.
+template <typename T>
+class TypedMetric;
+
+// A metric for floats. Does not offer an Increment() function, since it is too
+// easy to do unsafe operations like accumulating small values in floats.
+template <>
+class TypedMetric<float> : public Metric {
+ public:
+  TypedMetric(Token name, float value) : Metric(name, value) {}
+  TypedMetric(Token name, float value, IntrusiveList<Metric>& metrics)
+      : Metric(name, value, metrics) {}
+
+  void Set(float value) { SetFloat(value); }
+  float value() const { return Metric::as_float(); }
+
+ private:
+  // Shadow these accessors to hide them on the typed version of Metric.
+  float as_float() const { return 0.0; }
+  uint32_t as_int() const { return 0; }
+};
+
+// A metric for uint32_ts. Offers both Set() and Increment().
+template <>
+class TypedMetric<uint32_t> : public Metric {
+ public:
+  TypedMetric(Token name, uint32_t value) : Metric(name, value) {}
+  TypedMetric(Token name, uint32_t value, IntrusiveList<Metric>& metrics)
+      : Metric(name, value, metrics) {}
+
+  void Increment(uint32_t amount = 1u) { Metric::Increment(amount); }
+  void Set(uint32_t value) { SetInt(value); }
+  uint32_t value() const { return Metric::as_int(); }
+
+ private:
+  // Shadow these accessors to hide them on the typed version of Metric.
+  float as_float() const { return 0.0; }
+  uint32_t as_int() const { return 0; }
+};
+
+// A metric tree; consisting of children groups and leaf metrics.
+//
+// Size: 16 bytes/128 bits - next, name, metrics, children.
+class Group : public IntrusiveList<Group>::Item {
+ public:
+  Group(Token name);
+  Group(Token name, IntrusiveList<Group>& groups);
+
+  Token name() const { return name_; }
+
+  void Add(Metric& metric) { metrics_.push_front(metric); }
+  void Add(Group& group) { children_.push_front(group); }
+
+  IntrusiveList<Metric>& metrics() { return metrics_; }
+  IntrusiveList<Group>& children() { return children_; }
+
+  const IntrusiveList<Metric>& metrics() const { return metrics_; }
+  const IntrusiveList<Group>& children() const { return children_; }
+
+  // Dump a metric group or groups to logs. Level determines the indentation
+  // indent_level up to a maximum of 4. Example output:
+  //
+  //   "$6doqFw==": {
+  //     "$05OCZw==": {
+  //       "$VpPfzg==": 1,
+  //       "$LGPMBQ==": 1.000000,
+  //       "$+iJvUg==": 5,
+  //     }
+  //     "$9hPNxw==": 65,
+  //     "$oK7HmA==": 13,
+  //     "$FCM4qQ==": 0,
+  //   }
+  //
+  // Note the base64-encoded token name. Detokenization tools are necessary to
+  // convert this to human-readable form.
+  void Dump(int indent_level = 0);
+  static void Dump(IntrusiveList<Group>& groups, int indent_level = 0);
+
+  // Disallow copy and assign.
+  Group(Group const&) = delete;
+  void operator=(const Group&) = delete;
+
+ private:
+  // The name of this group as a token; from PW_TOKENIZE_STRING("my_group").
+  Token name_;
+
+  IntrusiveList<Metric> metrics_;
+  IntrusiveList<Group> children_;
+};
+
+// Declare a metric, optionally adding it to a group. Use:
+//
+//   PW_METRIC(variable_name, metric_name, value)
+//   PW_METRIC(group, variable_name, metric_name, value)
+//
+// - variable_name is an identifier
+// - metric_name is a string name for the metric (will be tokenized)
+// - value must be either a floating point value (3.2f) or unsigned int (21u).
+// - group is a Group instance.
+//
+// The macro declares a variable or member named "name" with type Metric, and
+// works in three contexts: global, local, and member.
+//
+// 1. At global scope
+//
+//    PW_METRIC(foo, 15.5f);
+//
+//    void MyFunc() {
+//      foo.Increment();
+//    }
+//
+// 2. At local function or member function scope:
+//
+//    void MyFunc() {
+//      PW_METRIC(foo, "foo", 15.5f);
+//      foo.Increment();
+//      // foo goes out of scope here; be careful!
+//    }
+//
+// 3. At member level inside a class or struct:
+//
+//    struct MyStructy {
+//      void DoSomething() {
+//        somethings_.Increment();
+//      }
+//      // Every instance of MyStructy will have a separate somethings counter.
+//      PW_METRIC(somethings_, "somethings", 0u);
+//    }
+//
+// You can also put a metric into a group with the macro. Metrics can belong to
+// strictly one group, otherwise a assertion will fail. Example:
+//
+//   PW_METRIC_GROUP(my_group, "my_group_name_here");
+//   PW_METRIC(my_group, foo_, "foo", 0.2f);
+//   PW_METRIC(my_group, bar_, "bar", 44000u);
+//   PW_METRIC(my_group, zap_, "zap", 3.14f);
+//
+// NOTE: If you want a globally registered metric, see pw_metric/global.h; in
+// that contexts, metrics are globally registered without the need to centrally
+// register in a single place.
+#define PW_METRIC(...) PW_DELEGATE_BY_ARG_COUNT(_PW_METRIC_, , __VA_ARGS__)
+#define PW_METRIC_STATIC(...) \
+  PW_DELEGATE_BY_ARG_COUNT(_PW_METRIC_, static, __VA_ARGS__)
+
+// Force conversion to uint32_t for non-float types, no matter what the
+// platform uses as the "u" suffix literal. This enables dispatching to the
+// correct TypedMetric specialization.
+#define _PW_METRIC_FLOAT_OR_UINT32(literal)                       \
+  std::conditional_t<std::is_floating_point_v<decltype(literal)>, \
+                     float,                                       \
+                     uint32_t>
+
+// Case: PW_METRIC(name, initial_value)
+#define _PW_METRIC_4(static_def, variable_name, metric_name, init)       \
+  static constexpr uint32_t variable_name##_token =                      \
+      PW_TOKENIZE_STRING_DOMAIN("metrics", metric_name);                 \
+  static_def ::pw::metric::TypedMetric<_PW_METRIC_FLOAT_OR_UINT32(init)> \
+      variable_name = {variable_name##_token, init}
+
+// Case: PW_METRIC(group, name, initial_value)
+#define _PW_METRIC_5(static_def, group, variable_name, metric_name, init) \
+  static constexpr uint32_t variable_name##_token =                       \
+      PW_TOKENIZE_STRING_DOMAIN("metrics", metric_name);                  \
+  static_def ::pw::metric::TypedMetric<_PW_METRIC_FLOAT_OR_UINT32(init)>  \
+      variable_name = {variable_name##_token, init, group.metrics()}
+
+// Define a metric group. Works like PW_METRIC, and works in the same contexts.
+//
+// Example:
+//
+//   class MySubsystem {
+//    public:
+//     void DoSomething() {
+//       attempts.Increment();
+//       if (ActionSucceeds()) {
+//         successes.Increment();
+//       }
+//     }
+//     const Group& metrics() const { return metrics_; }
+//     Group& metrics() { return metrics_; }
+//
+//    private:
+//     PW_METRIC_GROUP(metrics_, "my_subsystem");
+//     PW_METRIC(metrics_, attempts_, "attempts", 0u);
+//     PW_METRIC(metrics_, successes_, "successes", 0u);
+//   };
+//
+#define PW_METRIC_GROUP(...) \
+  PW_DELEGATE_BY_ARG_COUNT(_PW_METRIC_GROUP_, , __VA_ARGS__)
+#define PW_METRIC_GROUP_STATIC(...) \
+  PW_DELEGATE_BY_ARG_COUNT(_PW_METRIC_GROUP_, static, __VA_ARGS__)
+
+#define _PW_METRIC_GROUP_3(static_def, variable_name, group_name) \
+  static constexpr uint32_t variable_name##_token =               \
+      PW_TOKENIZE_STRING_DOMAIN("metrics", group_name);           \
+  static_def ::pw::metric::Group variable_name = {variable_name##_token};
+
+#define _PW_METRIC_GROUP_4(static_def, parent, variable_name, group_name) \
+  static constexpr uint32_t variable_name##_token =                       \
+      PW_TOKENIZE_STRING_DOMAIN("metrics", group_name);                   \
+  static_def ::pw::metric::Group variable_name = {variable_name##_token,  \
+                                                  parent.children()};
+
+}  // namespace pw::metric
diff --git a/pw_metric/public/pw_metric/metric_service_nanopb.h b/pw_metric/public/pw_metric/metric_service_nanopb.h
new file mode 100644
index 0000000..15fd6ee
--- /dev/null
+++ b/pw_metric/public/pw_metric/metric_service_nanopb.h
@@ -0,0 +1,49 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <cstring>
+#include <span>
+
+#include "pw_log/log.h"
+#include "pw_metric/metric.h"
+#include "pw_metric_proto/metric_service.rpc.pb.h"
+
+namespace pw::metric {
+
+// The MetricService will send metrics when requested by Get(). For now, each
+// Get() request results in a stream of responses, containing the metrics from
+// the supplied list of groups and metrics. This includes recursive traversal
+// of subgroups. In the future, filtering will be supported.
+//
+// An important limitation of the current implementation is that the Get()
+// method is blocking, and sends all metrics at once (though batched). In the
+// future, we may switch to offering an async version where the Get() method
+// returns immediately, and someone else is responsible for pumping the queue.
+class MetricService final : public generated::MetricService<MetricService> {
+ public:
+  MetricService(const IntrusiveList<Metric>& metrics,
+                const IntrusiveList<Group>& groups)
+      : metrics_(metrics), groups_(groups) {}
+
+  void Get(ServerContext&,
+           const pw_metric_MetricRequest& request,
+           ServerWriter<pw_metric_MetricResponse>& response);
+
+ private:
+  const IntrusiveList<Metric>& metrics_;
+  const IntrusiveList<Group>& groups_;
+};
+
+}  // namespace pw::metric
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_metric/pw_metric_proto/metric_service.options
similarity index 81%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to pw_metric/pw_metric_proto/metric_service.options
index 1670b7d..65eb3b3 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_metric/pw_metric_proto/metric_service.options
@@ -12,6 +12,7 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-#include "pw_boot_armv7m/boot.h"
+// TODO(keir): Figure out appropriate options.
+pw.metric.Metric.token_path max_count:4
+pw.metric.MetricResponse.metrics max_count:10
 
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
diff --git a/pw_metric/pw_metric_proto/metric_service.proto b/pw_metric/pw_metric_proto/metric_service.proto
new file mode 100644
index 0000000..9014463
--- /dev/null
+++ b/pw_metric/pw_metric_proto/metric_service.proto
@@ -0,0 +1,71 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+syntax = "proto3";
+
+package pw.metric;
+
+// A metric, described by the name (path + name), and the value.
+//
+// This flattened representation, while more complicated than the obvious tree
+// structure alternative, enables streaming metrics from the device in low
+// memory or low compute situations.
+message Metric {
+  // The token path from the root. The last token is the metric name, and
+  // previous tokens are the parent group names. This could be converted from
+  // the tokens into a string; for example the token path {0xfaff, 0xabcd}:
+  //
+  //  - The group is 0xfaff (root, parent)
+  //  - The metric is 0xabcd
+  //
+  // Given the token database, this might be converted into:
+  //
+  //   /i2c_bus_1/failed_transactions
+  //
+  // Note: This uses a repeated fixed32 instead of a "Oneof" with the string
+  // path to reduce the encoded size. Using a repeated Oneof name { str,
+  // fixed32 } would cost approximately 6N bytes for N path elements, vs 2 + 4N
+  // bytes in the packed case.
+  repeated fixed32 token_path = 1;
+
+  // The string path from the root. Similar to token path, but with strings.
+  // Note: This is currently unsupported.
+  repeated string string_path = 2;
+
+  // The metric value. This field should be omitted when used as a query.
+  oneof value {
+    float as_float = 3;
+    uint32 as_int = 4;
+  };
+}
+
+message MetricRequest {
+  // Metrics or the groups matched to the given paths are returned.  The intent
+  // is to support matching semantics, with at least subsetting to e.g. collect
+  // all the metrics in a group and its children. We may also implement
+  // wildcard matchers.
+  //
+  // Value fields in the metrics will be ignored, since this is a query.
+  //
+  // Note: This is currently unsupported.
+  repeated Metric metrics = 1;
+}
+
+message MetricResponse {
+  repeated Metric metrics = 1;
+}
+
+service MetricService {
+  // Returns metrics or groups matching the requested paths.
+  rpc Get(MetricRequest) returns (stream MetricResponse) {}
+}
diff --git a/pw_metric/size_report/BUILD b/pw_metric/size_report/BUILD
new file mode 100644
index 0000000..f7693a9
--- /dev/null
+++ b/pw_metric/size_report/BUILD
@@ -0,0 +1,65 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_binary",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])  # Apache License 2.0
+
+pw_cc_binary(
+    name = "base",
+    srcs = ["base.cc"],
+    deps = [
+        "//pw_bloat:bloat_this_binary",
+        "//pw_log",
+        "//pw_assert",
+    ],
+)
+
+pw_cc_binary(
+    name = "one_metric",
+    srcs = ["one_metric.cc"],
+    deps = [
+        "//pw_bloat:bloat_this_binary",
+        "//pw_metric",
+        "//pw_log",
+        "//pw_assert",
+    ],
+)
+
+pw_cc_binary(
+    name = "dump",
+    srcs = ["dump.cc"],
+    deps = [
+        "//pw_bloat:bloat_this_binary",
+        "//pw_metric",
+        "//pw_log",
+        "//pw_assert",
+    ],
+)
+
+pw_cc_binary(
+    name = "more_metrics",
+    srcs = ["more_metrics.cc"],
+    deps = [
+        "//pw_bloat:bloat_this_binary",
+        "//pw_metric",
+        "//pw_log",
+        "//pw_assert",
+    ],
+)
diff --git a/pw_metric/size_report/BUILD.gn b/pw_metric/size_report/BUILD.gn
new file mode 100644
index 0000000..34c12ac
--- /dev/null
+++ b/pw_metric/size_report/BUILD.gn
@@ -0,0 +1,56 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+pw_executable("base") {
+  sources = [ "base.cc" ]
+  deps = [
+    "$dir_pw_bloat:bloat_this_binary",
+    dir_pw_assert,
+    dir_pw_log,
+  ]
+}
+
+pw_executable("one_metric") {
+  sources = [ "one_metric.cc" ]
+  deps = [
+    "$dir_pw_bloat:bloat_this_binary",
+    "..",
+    dir_pw_assert,
+    dir_pw_log,
+  ]
+}
+
+pw_executable("dump") {
+  sources = [ "dump.cc" ]
+  deps = [
+    "$dir_pw_bloat:bloat_this_binary",
+    "..",
+    dir_pw_assert,
+    dir_pw_log,
+  ]
+}
+
+pw_executable("more_metrics") {
+  sources = [ "more_metrics.cc" ]
+  deps = [
+    "$dir_pw_bloat:bloat_this_binary",
+    "..",
+    dir_pw_assert,
+    dir_pw_log,
+  ]
+}
diff --git a/pw_metric/size_report/base.cc b/pw_metric/size_report/base.cc
new file mode 100644
index 0000000..967201c
--- /dev/null
+++ b/pw_metric/size_report/base.cc
@@ -0,0 +1,31 @@
+// Copyright 2019 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_assert/assert.h"
+#include "pw_bloat/bloat_this_binary.h"
+#include "pw_log/log.h"
+
+int volatile* unoptimizable;
+
+int main() {
+  pw::bloat::BloatThisBinary();
+
+  // Ensure we are paying the cost for log and assert.
+  PW_CHECK_INT_GE(*unoptimizable, 0, "Ensure this CHECK logic stays");
+  PW_LOG_INFO("We care about optimizing: %d", *unoptimizable);
+  // This matches the log preventing optimizing the "m" metric in one_metric.cc.
+  PW_LOG_INFO("some_metric: %d", *unoptimizable);
+
+  return *unoptimizable;
+}
diff --git a/pw_metric/size_report/dump.cc b/pw_metric/size_report/dump.cc
new file mode 100644
index 0000000..12a83fb
--- /dev/null
+++ b/pw_metric/size_report/dump.cc
@@ -0,0 +1,45 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_assert/assert.h"
+#include "pw_bloat/bloat_this_binary.h"
+#include "pw_log/log.h"
+#include "pw_metric/metric.h"
+
+PW_METRIC_GROUP(group_foo, "some_group");
+PW_METRIC(group_foo, metric_x, "some_metric", 14u);
+
+int volatile* unoptimizable;
+
+int main() {
+  pw::bloat::BloatThisBinary();
+
+  if (*unoptimizable) {
+    metric_x.Increment();
+  }
+  metric_x.Increment();
+
+  // Ensure log and assert aren't optimized out.
+  PW_CHECK_INT_GE(*unoptimizable, 0, "Ensure this CHECK logic stays");
+  PW_LOG_INFO("Ensure logs are pulled in: %d", *unoptimizable);
+
+  // Ensure metric_x isn't optimized out.
+  PW_LOG_INFO("metric_x: %d", static_cast<int>(metric_x.value()));
+
+  // Trigger pulling in the dump code. Dump twice to cover cost in more_metrics.
+  group_foo.Dump();
+  group_foo.Dump();
+
+  return *unoptimizable;
+}
diff --git a/pw_metric/size_report/more_metrics.cc b/pw_metric/size_report/more_metrics.cc
new file mode 100644
index 0000000..2ceb7f0
--- /dev/null
+++ b/pw_metric/size_report/more_metrics.cc
@@ -0,0 +1,65 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_assert/assert.h"
+#include "pw_bloat/bloat_this_binary.h"
+#include "pw_log/log.h"
+#include "pw_metric/metric.h"
+
+PW_METRIC_GROUP(group_bar, "bar");
+PW_METRIC(group_bar, metric_a, "a", 14u);
+PW_METRIC(group_bar, metric_b, "b", 14u);
+PW_METRIC(group_bar, metric_c, "c", 14u);
+
+PW_METRIC_GROUP(group_foo, "foo");
+PW_METRIC(group_foo, metric_x, "x", 14u);
+PW_METRIC(group_foo, metric_y, "y", 14u);
+
+int volatile* unoptimizable;
+
+int main() {
+  pw::bloat::BloatThisBinary();
+
+  if (*unoptimizable) {
+    // Bar
+    metric_a.Increment();
+    metric_b.Increment();
+    metric_c.Increment();
+
+    // Foo
+    metric_x.Increment();
+    metric_y.Increment();
+  }
+  // Bar
+  metric_a.Increment();
+  metric_b.Increment();
+  metric_c.Increment();
+
+  // Foo
+  metric_x.Increment();
+  metric_y.Increment();
+
+  // Ensure log and assert aren't optimized out.
+  PW_CHECK_INT_GE(*unoptimizable, 0, "Ensure this CHECK logic stays");
+  PW_LOG_INFO("Ensure logs are pulled in: %d", *unoptimizable);
+
+  // Ensure metric_x isn't optimized out.
+  PW_LOG_INFO("metric_x: %d", static_cast<int>(metric_x.value()));
+
+  // Trigger pulling in the dump code.
+  group_foo.Dump();
+  group_bar.Dump();
+
+  return *unoptimizable;
+}
diff --git a/pw_metric/size_report/one_metric.cc b/pw_metric/size_report/one_metric.cc
new file mode 100644
index 0000000..2869267
--- /dev/null
+++ b/pw_metric/size_report/one_metric.cc
@@ -0,0 +1,42 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_assert/assert.h"
+#include "pw_bloat/bloat_this_binary.h"
+#include "pw_log/log.h"
+#include "pw_metric/metric.h"
+
+PW_METRIC_GROUP(group_foo, "foo");
+PW_METRIC(group_foo, metric_x, "x", 14u);
+
+int volatile* unoptimizable;
+
+int main() {
+  PW_METRIC_GROUP(inner, "some_group");
+  pw::bloat::BloatThisBinary();
+
+  if (*unoptimizable) {
+    metric_x.Increment();
+  }
+  metric_x.Increment();
+
+  // Ensure log and assert aren't optimized out.
+  PW_CHECK_INT_GE(*unoptimizable, 0, "Ensure this CHECK logic stays");
+  PW_LOG_INFO("Ensure logs are pulled in: %d", *unoptimizable);
+
+  // Ensure metric_x isn't optimized out.
+  PW_LOG_INFO("metric_x: %d", static_cast<int>(metric_x.value()));
+
+  return *unoptimizable;
+}
diff --git a/pw_minimal_cpp_stdlib/BUILD.gn b/pw_minimal_cpp_stdlib/BUILD.gn
index 5f3a6ad..332e18c 100644
--- a/pw_minimal_cpp_stdlib/BUILD.gn
+++ b/pw_minimal_cpp_stdlib/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("include_dirs") {
   include_dirs = [ "public" ]
 }
diff --git a/pw_minimal_cpp_stdlib/docs.rst b/pw_minimal_cpp_stdlib/docs.rst
index 8a574d3..29a89aa 100644
--- a/pw_minimal_cpp_stdlib/docs.rst
+++ b/pw_minimal_cpp_stdlib/docs.rst
@@ -1,6 +1,4 @@
-.. _chapter-pw-minimal-cpp-stdlib:
-
-.. default-domain:: cpp
+.. _module-pw_minimal_cpp_stdlib:
 
 ---------------------
 pw_minimal_cpp_stdlib
diff --git a/pw_module/BUILD.gn b/pw_module/BUILD.gn
index 4ad8f50..0969672 100644
--- a/pw_module/BUILD.gn
+++ b/pw_module/BUILD.gn
@@ -12,10 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_docgen/docs.gni")
+
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
 }
diff --git a/pw_module/docs.rst b/pw_module/docs.rst
index 0eea2d0..ec564eb 100644
--- a/pw_module/docs.rst
+++ b/pw_module/docs.rst
@@ -1,20 +1,16 @@
-.. default-domain:: cpp
-
-.. highlight:: sh
-
-.. _chapter-module:
+.. _module-pw_module:
 
 ---------
 pw_module
 ---------
 The ``pw_module`` module contains tools for managing Pigweed modules.
 For information on the structure of a Pigweed module, refer to
-:ref:`chapter-module-guide`
+:ref:`docs-module-guides`.
 
 Commands
 --------
 
-.. _chapter-module-module-check:
+.. _module-pw_module-module-check:
 
 ``pw module-check``
 ^^^^^^^^^^^^^^^^^^^
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_module/py/BUILD.gn
similarity index 71%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_module/py/BUILD.gn
index 3c3be32..09f7896 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_module/py/BUILD.gn
@@ -12,8 +12,15 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_module/__init__.py",
+    "pw_module/check.py",
+  ]
+  tests = [ "check_test.py" ]
 }
diff --git a/pw_module/py/pw_module/__init__.py b/pw_module/py/pw_module/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_module/py/pw_module/__init__.py
diff --git a/pw_module/py/pw_module/py.typed b/pw_module/py/pw_module/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_module/py/pw_module/py.typed
diff --git a/pw_module/py/setup.py b/pw_module/py/setup.py
index 46e1ab1..5957252 100644
--- a/pw_module/py/setup.py
+++ b/pw_module/py/setup.py
@@ -13,7 +13,7 @@
 # the License.
 """pw_module"""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='pw_module',
@@ -22,4 +22,6 @@
     author_email='pigweed-developers@googlegroups.com',
     description='Meta-module for Pigweed',
     packages=setuptools.find_packages(),
+    package_data={'pw_module': ['py.typed']},
+    zip_safe=False,
 )
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_package/BUILD.gn
similarity index 78%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_package/BUILD.gn
index 3c3be32..dd021e8 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_package/BUILD.gn
@@ -12,8 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_docgen/docs.gni")
+
+pw_doc_group("docs") {
+  sources = [ "docs.rst" ]
 }
diff --git a/pw_package/docs.rst b/pw_package/docs.rst
new file mode 100644
index 0000000..b3055db
--- /dev/null
+++ b/pw_package/docs.rst
@@ -0,0 +1,110 @@
+.. _module-pw_package:
+
+==========
+pw_package
+==========
+The package module provides a mechanism to install additional tools used by
+Pigweed. Most Pigweed dependencies should be installed using
+:ref:`module-pw_env_setup`. Examples of reasons packages should be managed using
+this module instead are listed below.
+
+* The dependency is extremely large and not commonly used.
+* The dependency has a number of compatible versions and we want to allow
+  downstream projects to pick a version rather than being forced to use ours.
+* The dependency has license issues that make it complicated for Google to
+  include it directly as a submodule or distribute it as a CIPD package.
+* The dependency needs to be "installed" into the system in some manner beyond
+  just extraction and thus isn't a good match for distribution with CIPD.
+
+-----
+Usage
+-----
+The package module can be accessed through the ``pw package`` command. This
+has several subcommands.
+
+``pw package list``
+  Lists all the packages installed followed by all the packages available.
+
+``pw package install <package-name>``
+  Installs ``<package-name>``. Exactly how this works is package-dependent,
+  and packages can decide to do nothing because the package is current, do an
+  incremental update, or delete the current version and install anew. Use
+  ``--force`` to remove the package before installing.
+
+``pw package status <package-name>``
+  Indicates whether ``<packagxe-name>`` is installed.
+
+``pw package remove <package-name>``
+  Removes ``<package-name>``.
+
+-----------
+Configuring
+-----------
+
+Compatibility
+~~~~~~~~~~~~~
+Python 3
+
+Adding a New Package
+~~~~~~~~~~~~~~~~~~~~
+To add a new package create a class that subclasses ``Package`` from
+``pw_package/package_manager.py``.
+
+.. code-block:: python
+
+  class Package:
+      """Package to be installed.
+
+      Subclass this to implement installation of a specific package.
+      """
+      def __init__(self, name):
+          self._name = name
+
+      @property
+      def name(self):
+          return self._name
+
+      def install(self, path: pathlib.Path) -> None:
+          """Install the package at path.
+
+          Install the package in path. Cannot assume this directory is empty—it
+          may need to be deleted or updated.
+          """
+
+      def remove(self, path: pathlib.Path) -> None:
+          """Remove the package from path.
+
+          Removes the directory containing the package. For most packages this
+          should be sufficient to remove the package, and subclasses should not
+          need to override this package.
+          """
+          if os.path.exists(path):
+              shutil.rmtree(path)
+
+      def status(self, path: pathlib.Path) -> bool:
+          """Returns if package is installed at path and current.
+
+          This method will be skipped if the directory does not exist.
+          """
+
+There's also a helper class for retrieving specific revisions of Git
+repositories in ``pw_package/git_repo.py``.
+
+Then call ``pw_package.package_manager.register(PackageClass)`` to register
+the class with the package manager.
+
+Setting up a Project
+~~~~~~~~~~~~~~~~~~~~
+To set up the package manager for a new project create a file like below and
+add it to the ``PW_PLUGINS`` file (see :ref:`module-pw_cli` for details). This
+file is based off of ``pw_package/pigweed_packages.py``.
+
+.. code-block:: python
+
+  from pw_package import package_manager
+  # These modules register themselves so must be imported despite appearing
+  # unused.
+  from pw_package.packages import nanopb
+
+  def main(argv=None) -> int:
+      return package_manager.run(**vars(package_manager.parse_args(argv)))
diff --git a/pw_rpc/test_impl/BUILD.gn b/pw_package/py/BUILD.gn
similarity index 62%
copy from pw_rpc/test_impl/BUILD.gn
copy to pw_package/py/BUILD.gn
index 68f88c1..6388bc0 100644
--- a/pw_rpc/test_impl/BUILD.gn
+++ b/pw_package/py/BUILD.gn
@@ -12,19 +12,18 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
-import("$dir_pw_build/target_types.gni")
-import("$dir_pw_unit_test/test.gni")
-config("config") {
-  include_dirs = [ "public_overrides" ]
-  visibility = [ ":*" ]
-}
+import("$dir_pw_build/python.gni")
 
-pw_source_set("test_impl") {
-  public_configs = [ ":config" ]
-  public = [ "public_overrides/pw_rpc/internal/method.h" ]
-  public_deps = [ "../:server_library_deps" ]
-  visibility = [ "..:*" ]
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_package/__init__.py",
+    "pw_package/git_repo.py",
+    "pw_package/package_manager.py",
+    "pw_package/packages/__init__.py",
+    "pw_package/packages/nanopb.py",
+    "pw_package/pigweed_packages.py",
+  ]
 }
diff --git a/pw_package/py/pw_package/__init__.py b/pw_package/py/pw_package/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_package/py/pw_package/__init__.py
diff --git a/pw_package/py/pw_package/git_repo.py b/pw_package/py/pw_package/git_repo.py
new file mode 100644
index 0000000..4b98366
--- /dev/null
+++ b/pw_package/py/pw_package/git_repo.py
@@ -0,0 +1,71 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Install and check status of Git repository-based packages."""
+
+import os
+import pathlib
+import shutil
+import subprocess
+from typing import Union
+
+import pw_package.package_manager
+
+PathOrStr = Union[pathlib.Path, str]
+
+
+def git_stdout(*args: PathOrStr,
+               show_stderr=False,
+               repo: PathOrStr = '.') -> str:
+    return subprocess.run(['git', '-C', repo, *args],
+                          stdout=subprocess.PIPE,
+                          stderr=None if show_stderr else subprocess.DEVNULL,
+                          check=True).stdout.decode().strip()
+
+
+def git(*args: PathOrStr,
+        repo: PathOrStr = '.') -> subprocess.CompletedProcess:
+    return subprocess.run(['git', '-C', repo, *args], check=True)
+
+
+class GitRepo(pw_package.package_manager.Package):
+    """Install and check status of Git repository-based packages."""
+    def __init__(self, url, commit, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self._url = url
+        self._commit = commit
+
+    def status(self, path: pathlib.Path) -> bool:
+        if not os.path.isdir(path / '.git'):
+            return False
+
+        remote = git_stdout('remote', 'get-url', 'origin', repo=path)
+        commit = git_stdout('rev-parse', 'HEAD', repo=path)
+        status = git_stdout('status', '--porcelain=v1', repo=path)
+        return remote == self._url and commit == self._commit and not status
+
+    def install(self, path: pathlib.Path) -> None:
+        # If already installed and at correct version exit now.
+        if self.status(path):
+            return
+
+        # Otherwise delete current version and clone again.
+        if os.path.isdir(path):
+            shutil.rmtree(path)
+
+        # --filter=blob:none means we don't get history, just the current
+        # revision. If we later run commands that need history it will be
+        # retrieved on-demand. For small repositories the effect is negligible
+        # but for large repositories this should be a significant improvement.
+        git('clone', '--filter=blob:none', self._url, path)
+        git('reset', '--hard', self._commit, repo=path)
diff --git a/pw_package/py/pw_package/package_manager.py b/pw_package/py/pw_package/package_manager.py
new file mode 100644
index 0000000..dd89dba
--- /dev/null
+++ b/pw_package/py/pw_package/package_manager.py
@@ -0,0 +1,183 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Install and remove optional packages."""
+
+import argparse
+import dataclasses
+import logging
+import os
+import pathlib
+import shutil
+from typing import Dict, List, Tuple
+
+_LOG: logging.Logger = logging.getLogger(__name__)
+
+
+class Package:
+    """Package to be installed.
+
+    Subclass this to implement installation of a specific package.
+    """
+    def __init__(self, name):
+        self._name = name
+
+    @property
+    def name(self):
+        return self._name
+
+    def install(self, path: pathlib.Path) -> None:  # pylint: disable=no-self-use
+        """Install the package at path.
+
+        Install the package in path. Cannot assume this directory is empty—it
+        may need to be deleted or updated.
+        """
+
+    def remove(self, path: pathlib.Path) -> None:  # pylint: disable=no-self-use
+        """Remove the package from path.
+
+        Removes the directory containing the package. For most packages this
+        should be sufficient to remove the package, and subclasses should not
+        need to override this package.
+        """
+        if os.path.exists(path):
+            shutil.rmtree(path)
+
+    def status(self, path: pathlib.Path) -> bool:  # pylint: disable=no-self-use
+        """Returns if package is installed at path and current.
+
+        This method will be skipped if the directory does not exist.
+        """
+
+
+_PACKAGES: Dict[str, Package] = {}
+
+
+def register(package_class: type) -> None:
+    obj = package_class()
+    _PACKAGES[obj.name] = obj
+
+
+@dataclasses.dataclass
+class Packages:
+    all: Tuple[str, ...]
+    installed: Tuple[str, ...]
+    available: Tuple[str, ...]
+
+
+class PackageManager:
+    """Install and remove optional packages."""
+    def __init__(self, root: pathlib.Path):
+        self._pkg_root = root
+        os.makedirs(root, exist_ok=True)
+
+    def install(self, package: str, force: bool = False) -> None:
+        pkg = _PACKAGES[package]
+        if force:
+            self.remove(package)
+        pkg.install(self._pkg_root / pkg.name)
+
+    def remove(self, package: str) -> None:
+        pkg = _PACKAGES[package]
+        pkg.remove(self._pkg_root / pkg.name)
+
+    def status(self, package: str) -> bool:
+        pkg = _PACKAGES[package]
+        path = self._pkg_root / pkg.name
+        return os.path.isdir(path) and pkg.status(path)
+
+    def list(self) -> Packages:
+        installed = []
+        available = []
+        for package in sorted(_PACKAGES.keys()):
+            pkg = _PACKAGES[package]
+            if pkg.status(self._pkg_root / pkg.name):
+                installed.append(pkg.name)
+            else:
+                available.append(pkg.name)
+
+        return Packages(
+            all=tuple(_PACKAGES.keys()),
+            installed=tuple(installed),
+            available=tuple(available),
+        )
+
+
+class PackageManagerCLI:
+    """Command-line interface to PackageManager."""
+    def __init__(self):
+        self._mgr: PackageManager = None
+
+    def install(self, package: str, force: bool = False) -> int:
+        _LOG.info('Installing %s...', package)
+        self._mgr.install(package, force)
+        _LOG.info('Installing %s...done.', package)
+        return 0
+
+    def remove(self, package: str) -> int:
+        _LOG.info('Removing %s...', package)
+        self._mgr.remove(package)
+        _LOG.info('Removing %s...done.', package)
+        return 0
+
+    def status(self, package: str) -> int:
+        if self._mgr.status(package):
+            _LOG.info('%s is installed.', package)
+            return 0
+
+        _LOG.info('%s is not installed.', package)
+        return -1
+
+    def list(self) -> int:
+        packages = self._mgr.list()
+
+        _LOG.info('Installed packages:')
+        for package in packages.installed:
+            _LOG.info('  %s', package)
+        _LOG.info('')
+
+        _LOG.info('Available packages:')
+        for package in packages.available:
+            _LOG.info('  %s', package)
+        _LOG.info('')
+
+        return 0
+
+    def run(self, command: str, pkg_root: pathlib.Path, **kwargs) -> int:
+        self._mgr = PackageManager(pkg_root)
+        return getattr(self, command)(**kwargs)
+
+
+def parse_args(argv: List[str] = None) -> argparse.Namespace:
+    parser = argparse.ArgumentParser("Manage packages.")
+    parser.add_argument(
+        '--package-root',
+        '-e',
+        dest='pkg_root',
+        type=pathlib.Path,
+        default=(pathlib.Path(os.environ['_PW_ACTUAL_ENVIRONMENT_ROOT']) /
+                 'packages'),
+    )
+    subparsers = parser.add_subparsers(dest='command', required=True)
+    install = subparsers.add_parser('install')
+    install.add_argument('--force', '-f', action='store_true')
+    remove = subparsers.add_parser('remove')
+    status = subparsers.add_parser('status')
+    for cmd in (install, remove, status):
+        cmd.add_argument('package', choices=_PACKAGES.keys())
+    _ = subparsers.add_parser('list')
+    return parser.parse_args(argv)
+
+
+def run(**kwargs):
+    return PackageManagerCLI().run(**kwargs)
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_package/py/pw_package/packages/__init__.py
similarity index 78%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_package/py/pw_package/packages/__init__.py
index 3c3be32..2c8334f 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_package/py/pw_package/packages/__init__.py
@@ -11,9 +11,3 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations under
 # the License.
-
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
diff --git a/pw_package/py/pw_package/packages/nanopb.py b/pw_package/py/pw_package/packages/nanopb.py
new file mode 100644
index 0000000..96955bd
--- /dev/null
+++ b/pw_package/py/pw_package/packages/nanopb.py
@@ -0,0 +1,30 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Install and check status of nanopb."""
+
+import pw_package.git_repo
+import pw_package.package_manager
+
+
+class NanoPB(pw_package.git_repo.GitRepo):
+    """Install and check status of nanopb."""
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args,
+                         name='nanopb',
+                         url='https://github.com/nanopb/nanopb.git',
+                         commit='9f57cc871d8a025039019c2d2fde217591f4e30d',
+                         **kwargs)
+
+
+pw_package.package_manager.register(NanoPB)
diff --git a/pw_package/py/pw_package/pigweed_packages.py b/pw_package/py/pw_package/pigweed_packages.py
new file mode 100644
index 0000000..734b6d7
--- /dev/null
+++ b/pw_package/py/pw_package/pigweed_packages.py
@@ -0,0 +1,29 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Install and remove optional packages for Pigweed."""
+
+import sys
+
+from pw_package import package_manager
+# These modules register themselves so must be imported despite appearing
+# unused.
+from pw_package.packages import nanopb  # pylint: disable=unused-import
+
+
+def main(argv=None) -> int:
+    return package_manager.run(**vars(package_manager.parse_args(argv)))
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/pw_package/py/pw_package/py.typed b/pw_package/py/pw_package/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_package/py/pw_package/py.typed
diff --git a/pw_package/py/setup.py b/pw_package/py/setup.py
new file mode 100644
index 0000000..eb574be
--- /dev/null
+++ b/pw_package/py/setup.py
@@ -0,0 +1,28 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""The pw_package package."""
+
+import setuptools  # type: ignore
+
+setuptools.setup(
+    name='pw_package',
+    version='0.0.1',
+    author='Pigweed Authors',
+    author_email='pigweed-developers@googlegroups.com',
+    description='Tools for installing optional packages',
+    packages=setuptools.find_packages(),
+    package_data={'pw_package': ['py.typed']},
+    zip_safe=False,
+    install_requires=[],
+)
diff --git a/pw_polyfill/BUILD b/pw_polyfill/BUILD
index 36affb2..976b45a 100644
--- a/pw_polyfill/BUILD
+++ b/pw_polyfill/BUILD
@@ -25,7 +25,7 @@
 pw_cc_library(
     name = "pw_polyfill",
     hdrs = [
-        "public/pw_polyfill/language_features.h",
+        "public/pw_polyfill/language_feature_macros.h",
         "public/pw_polyfill/standard.h",
     ],
     includes = ["public"],
@@ -34,11 +34,19 @@
 
 pw_cc_library(
     name = "overrides",
+    srcs = ["language_features.h"],
     hdrs = [
+        "public_overrides/array",
         "public_overrides/assert.h",
+        "public_overrides/bit",
         "public_overrides/cstddef",
         "public_overrides/iterator",
         "public_overrides/type_traits",
+        "public_overrides/utility",
+    ],
+    copts = [
+        "-include",
+        "language_features.h",
     ],
     includes = ["public_overrides"],
     deps = [":standard_library"],
@@ -47,11 +55,14 @@
 pw_cc_library(
     name = "standard_library",
     hdrs = [
+        "standard_library_public/pw_polyfill/standard_library/array.h",
         "standard_library_public/pw_polyfill/standard_library/assert.h",
+        "standard_library_public/pw_polyfill/standard_library/bit.h",
         "standard_library_public/pw_polyfill/standard_library/cstddef.h",
         "standard_library_public/pw_polyfill/standard_library/iterator.h",
         "standard_library_public/pw_polyfill/standard_library/namespace.h",
         "standard_library_public/pw_polyfill/standard_library/type_traits.h",
+        "standard_library_public/pw_polyfill/standard_library/utility.h",
     ],
     includes = ["standard_library_public"],
     visibility = ["//visibility:private"],
diff --git a/pw_polyfill/BUILD.gn b/pw_polyfill/BUILD.gn
index e3d40f2..7774294 100644
--- a/pw_polyfill/BUILD.gn
+++ b/pw_polyfill/BUILD.gn
@@ -12,14 +12,15 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("public") {
   include_dirs = [ "public" ]
+  visibility = [ ":*" ]
 }
 
 pw_source_set("pw_polyfill") {
@@ -27,14 +28,21 @@
   remove_public_deps = [ "*" ]
   public_deps = [ ":standard_library" ]
   public = [
-    "public/pw_polyfill/language_features.h",
+    "public/pw_polyfill/language_feature_macros.h",
     "public/pw_polyfill/standard.h",
   ]
-  sources = public
 }
 
 config("overrides_config") {
   include_dirs = [ "public_overrides" ]
+  cflags_cc = [
+    # Use -include to include the language features header in dependent files,
+    # without requiring a #include. This allows the use of newer C++ language
+    # features in older C++ versions without an explicit include.
+    "-include",
+    rebase_path("language_features.h"),
+  ]
+  visibility = [ ":*" ]
 }
 
 pw_source_set("overrides") {
@@ -42,11 +50,15 @@
   remove_public_deps = [ "*" ]
   public_deps = [ ":standard_library" ]
   inputs = [
+    "public_overrides/array",
     "public_overrides/assert.h",
+    "public_overrides/bit",
     "public_overrides/cstddef",
     "public_overrides/iterator",
     "public_overrides/type_traits",
+    "public_overrides/utility",
   ]
+  sources = [ "language_features.h" ]
 }
 
 config("standard_library_public") {
@@ -57,13 +69,15 @@
   public_configs = [ ":standard_library_public" ]
   remove_public_deps = [ "*" ]
   public = [
+    "standard_library_public/pw_polyfill/standard_library/array.h",
     "standard_library_public/pw_polyfill/standard_library/assert.h",
+    "standard_library_public/pw_polyfill/standard_library/bit.h",
     "standard_library_public/pw_polyfill/standard_library/cstddef.h",
     "standard_library_public/pw_polyfill/standard_library/iterator.h",
     "standard_library_public/pw_polyfill/standard_library/namespace.h",
     "standard_library_public/pw_polyfill/standard_library/type_traits.h",
+    "standard_library_public/pw_polyfill/standard_library/utility.h",
   ]
-  sources = public
   visibility = [
     ":overrides",
     ":pw_polyfill",
diff --git a/pw_polyfill/CMakeLists.txt b/pw_polyfill/CMakeLists.txt
index 0914577..583b500 100644
--- a/pw_polyfill/CMakeLists.txt
+++ b/pw_polyfill/CMakeLists.txt
@@ -22,3 +22,6 @@
     public_overrides
     standard_library_public
 )
+target_compile_options(pw_polyfill.overrides INTERFACE
+    -include "${CMAKE_CURRENT_SOURCE_DIR}/language_features.h"
+)
diff --git a/pw_polyfill/docs.rst b/pw_polyfill/docs.rst
index 48ef25f..1d35878 100644
--- a/pw_polyfill/docs.rst
+++ b/pw_polyfill/docs.rst
@@ -1,48 +1,75 @@
-.. _chapter-pw-polyfill:
+.. _module-pw_polyfill:
 
-.. default-domain:: cpp
-
-.. highlight:: sh
-
------------
+===========
 pw_polyfill
------------
-The ``pw_polyfill`` module backports C++17 features to C++11 and C++14.
+===========
+The ``pw_polyfill`` module backports new C++ features to older C++ standards.
+When possible, features are adapted to work with in standards as old as C++11.
+Pigweed does not support C++ standards older than C++11.
 
-Compatibility
-=============
-C++11
-
-Features
-========
-
-Adapt code to compile with older versions of C++
 ------------------------------------------------
-The ``pw_polyfill`` module provides features for adapting C++17 code to work
-when compiled with older C++ standards.
+Backport new C++ features to older C++ standards
+------------------------------------------------
+The main purpose of ``pw_polyfill`` is to bring new C++ library and language
+features to older C++ standards. No additional ``#include`` statements are
+required to use these features; simply write code assuming that the features are
+available. This implicit feature backporting is provided through the
+``overrides`` library in the ``pw_polyfill`` module. GN automatically adds this
+library as a dependency in ``pw_source_set``.
 
-  - ``pw_polyfill/standard.h`` -- provides a macro for checking the C++ standard
-  - ``pw_polyfill/language_features.h`` -- provides macros for adapting code to
-    work without newer language features
-  - ``pw_polyfill/standard_library/`` -- adapters for C++ standard library
-    features, such as ``std::byte``, ``std::size``/``std::data``, and type
-    traits convenience aliases
+``pw_polyfill`` backports C++ library features by wrapping the standard C++ and
+C headers. The wrapper headers include the original header using
+`#include_next <https://gcc.gnu.org/onlinedocs/cpp/Wrapper-Headers.html>`_, then
+add missing features. The backported features are only defined if they aren't
+provided by the standard header, so ``pw_polyfill`` is safe to use when
+compiling with any standard C++11 or newer.
 
-In GN, Bazel, or CMake, depend on ``$dir_pw_polyfill``, ``//pw_polyfill``,
-or ``pw_polyfill``, respectively. In other build systems, add
-``pw_polyfill/standard_library_public`` and ``pw_polyfill/public_overrides`` as
-include paths.
+Language features are backported or stubbed via the
+``pw_polyfill/language_features.h`` header. This header is included in files
+that depend on ``pw_polyfill`` with GCC's ``-include`` option so that no
+``#include`` statement is required.
 
-Override C++ standard library headers
--------------------------------------
-The headers in ``public_overrides`` provide wrappers for C++ standard library
-headers, including ``<cstddef>``, ``<iterator>``, ``<type_traits>``. These are
-provided through the ``"$dir_pw_polyfill:overrides"`` library, which the GN
-build adds as a dependency for all targets. This makes some C++17 library
-features available to targets compiled with older C++ standards, without needing
-to change the code.
-
-To apply overrides in Bazel or CMake, depend on the
+The wrapper headers are in ``public_overrides``. These are provided through the
+``"$dir_pw_polyfill:overrides"`` library, which the GN build adds as a
+dependency for all targets. To apply overrides in Bazel or CMake, depend on the
 ``//pw_polyfill:overrides`` or ``pw_polyfill.overrides`` targets. In other build
 systems, add ``pw_polyfill/standard_library_public`` and
+``pw_polyfill/public_overrides`` as include paths, and add a ``-include`` option
+for the ``language_features.h`` header.
+
+Backported features
+===================
+==================  ================================  ============================================  ========================================
+Header              Feature                           Level of support                              Feature test macro
+==================  ================================  ============================================  ========================================
+<array>             std::to_array                     full                                          __cpp_lib_to_array
+<bit>               std::endian                       full                                          __cpp_lib_endian
+<cstdlib>           std::byte                         full; some operators not constexpr in C++11   __cpp_lib_byte
+<iterator>          std::data, std::size              full                                          __cpp_lib_nonmember_container_access
+<type_traits>       \*_t trait aliases                partial (can expand as needed)                __cpp_lib_transformation_trait_aliases
+<type_traits>       std::is_null_pointer              full                                          __cpp_lib_is_null_pointer
+<utilty>            std::integer_sequence & helpers   full                                          __cpp_lib_integer_sequence
+(language feature)  consteval keyword                 ignored (equivalent to constexpr)             __cpp_consteval
+(language feature)  constinit keyword                 supported in clang; ignored in GCC            __cpp_constinit
+(language feature)  static_assert with no message     full                                          __cpp_static_assert
+==================  ================================  ============================================  ========================================
+
+----------------------------------------------------
+Adapt code to compile with different versions of C++
+----------------------------------------------------
+ ``pw_polyfill`` provides features for adapting to different C++ standards when
+ ``pw_polyfill:overrides``'s automatic backporting is insufficient:
+
+  - ``pw_polyfill/standard.h`` -- provides a macro for checking the C++ standard
+  - ``pw_polyfill/language_feature_macros.h`` -- provides macros for adapting
+    code to work with or without newer language features
+
+In GN, Bazel, or CMake, depend on ``$dir_pw_polyfill``, ``//pw_polyfill``,
+or ``pw_polyfill``, respectively, to access these features. In other build
+systems, add ``pw_polyfill/standard_library_public`` and
 ``pw_polyfill/public_overrides`` as include paths.
+
+-------------
+Compatibility
+-------------
+C++11
diff --git a/pw_polyfill/language_features.h b/pw_polyfill/language_features.h
new file mode 100644
index 0000000..9527935
--- /dev/null
+++ b/pw_polyfill/language_features.h
@@ -0,0 +1,71 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// This file provides adapters for newer C++ language features so that they can
+// be used in older versions of C++ (though the code will not function exactly
+// the same). This file is not on an include path and is intended to be used
+// with -include when compiling C++.
+//
+// pw_polyfill/language_feature_macros.h provides macro wrappers for a few
+// specific uses of modern C++ keywords.
+#pragma once
+
+// C++11 is required for the features in this header.
+#if defined(__cplusplus) && __cplusplus >= 201103L
+
+// If consteval is not supported, use constexpr. This does not guarantee
+// compile-time execution, but works equivalently in constant expressions.
+#ifndef __cpp_consteval
+#define consteval constexpr
+#endif  // __cpp_consteval
+
+// If constinit is not supported, use a compiler attribute or omit it. If
+// omitted, the compiler may still constant initialize the variable, but there
+// is no guarantee.
+#ifndef __cpp_constinit
+#ifdef __clang__
+#define constinit [[clang::require_constant_initialization]]
+#else
+#define constinit
+#endif  // __clang__
+#endif  // __cpp_constinit
+
+// This is an adapter for supporting static_assert with a single argument in
+// C++11 or C++14. Macros don't correctly parse commas in template expressions,
+// so the static_assert arguments are passed to an overloaded C++ function. The
+// full stringified static_assert arguments are used as the message.
+#if __cpp_static_assert < 201411L
+#undef __cpp_static_assert
+#define __cpp_static_assert 201411L
+
+#define static_assert(...)                                                     \
+  static_assert(::pw::polyfill::internal::StaticAssertExpression(__VA_ARGS__), \
+                #__VA_ARGS__)
+
+namespace pw {
+namespace polyfill {
+namespace internal {
+
+constexpr bool StaticAssertExpression(bool expression) { return expression; }
+
+constexpr bool StaticAssertExpression(bool expression, const char*) {
+  return expression;
+}
+
+}  // namespace internal
+}  // namespace polyfill
+}  // namespace pw
+
+#endif  // __cpp_static_assert < 201411L
+#endif  // defined(__cplusplus) && __cplusplus >= 201103L
diff --git a/pw_polyfill/public/pw_polyfill/language_feature_macros.h b/pw_polyfill/public/pw_polyfill/language_feature_macros.h
new file mode 100644
index 0000000..389493f
--- /dev/null
+++ b/pw_polyfill/public/pw_polyfill/language_feature_macros.h
@@ -0,0 +1,31 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Macros for adapting to older versions of C++. A few keywords (consteval,
+// constinit) are handled by pw_polfyill/language_features.h, which is directly
+// -included by users of pw_polyfill.
+#pragma once
+
+#ifdef __cpp_inline_variables
+#define PW_INLINE_VARIABLE inline
+#else
+#define PW_INLINE_VARIABLE
+#endif  // __cpp_inline_variables
+
+// Mark functions as constexpr if the relaxed constexpr rules are supported.
+#if __cpp_constexpr >= 201304L
+#define PW_CONSTEXPR_FUNCTION constexpr
+#else
+#define PW_CONSTEXPR_FUNCTION
+#endif  // __cpp_constexpr >= 201304L
diff --git a/pw_polyfill/public/pw_polyfill/language_features.h b/pw_polyfill/public/pw_polyfill/language_features.h
deleted file mode 100644
index 0e9477a..0000000
--- a/pw_polyfill/public/pw_polyfill/language_features.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2020 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-#pragma once
-
-#ifdef __cpp_inline_variables
-#define PW_INLINE_VARIABLE inline
-#else
-#define PW_INLINE_VARIABLE
-#endif  // __cpp_inline_variables
-
-// Mark functions as constexpr if the relaxed constexpr rules are supported.
-#if __cpp_constexpr >= 201304L
-#define PW_CONSTEXPR_FUNCTION constexpr
-#else
-#define PW_CONSTEXPR_FUNCTION
-#endif  // __cpp_constexpr >= 201304L
-
-// This is an adapter for supporting static_assert with a single argument in
-// C++11 or C++14. Macros don't correctly parse commas in template expressions,
-// so the static_assert arguments are passed to an overloaded C++ function. The
-// full stringified static_assert arguments are used as the message.
-#if __cpp_static_assert < 201411L
-
-#define static_assert(...)                                                     \
-  static_assert(::pw::polyfill::internal::StaticAssertExpression(__VA_ARGS__), \
-                #__VA_ARGS__)
-
-namespace pw {
-namespace polyfill {
-namespace internal {
-
-constexpr bool StaticAssertExpression(bool expression) { return expression; }
-
-constexpr bool StaticAssertExpression(bool expression, const char*) {
-  return expression;
-}
-
-}  // namespace internal
-}  // namespace polyfill
-}  // namespace pw
-
-#endif  // __cpp_static_assert < 201411L
diff --git a/pw_polyfill/public/pw_polyfill/standard.h b/pw_polyfill/public/pw_polyfill/standard.h
index 8e8feeb..d5e1b75 100644
--- a/pw_polyfill/public/pw_polyfill/standard.h
+++ b/pw_polyfill/public/pw_polyfill/standard.h
@@ -13,8 +13,12 @@
 // the License.
 #pragma once
 
+#ifdef __cplusplus
 #define PW_CXX_STANDARD_IS_SUPPORTED(std) \
   (__cplusplus >= _PW_CXX_STANDARD_##std())
+#else
+#define PW_CXX_STANDARD_IS_SUPPORTED(std) (0 >= _PW_CXX_STANDARD_##std())
+#endif
 
 #define _PW_CXX_STANDARD_98() 199711L
 #define _PW_CXX_STANDARD_11() 201103L
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_polyfill/public_overrides/array
similarity index 86%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to pw_polyfill/public_overrides/array
index 1670b7d..b3ade61 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_polyfill/public_overrides/array
@@ -11,7 +11,8 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+#pragma once
 
-#include "pw_boot_armv7m/boot.h"
+#include_next <array>
 
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+#include "pw_polyfill/standard_library/array.h"
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_polyfill/public_overrides/bit
similarity index 79%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to pw_polyfill/public_overrides/bit
index 1670b7d..3d5460a 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_polyfill/public_overrides/bit
@@ -11,7 +11,10 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+#pragma once
 
-#include "pw_boot_armv7m/boot.h"
+#if __has_include_next(<bit>)
+#include_next <bit>
+#endif  // __has_include_next(<bit>)
 
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+#include "pw_polyfill/standard_library/bit.h"
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_polyfill/public_overrides/utility
similarity index 86%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to pw_polyfill/public_overrides/utility
index 1670b7d..8d22206 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_polyfill/public_overrides/utility
@@ -11,7 +11,8 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+#pragma once
 
-#include "pw_boot_armv7m/boot.h"
+#include_next <utility>
 
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+#include "pw_polyfill/standard_library/utility.h"
diff --git a/pw_polyfill/standard_library_public/pw_polyfill/standard_library/array.h b/pw_polyfill/standard_library_public/pw_polyfill/standard_library/array.h
new file mode 100644
index 0000000..441cd79
--- /dev/null
+++ b/pw_polyfill/standard_library_public/pw_polyfill/standard_library/array.h
@@ -0,0 +1,55 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <array>
+#include <type_traits>
+#include <utility>
+
+#include "pw_polyfill/standard_library/namespace.h"
+
+#ifndef __cpp_lib_to_array
+#define __cpp_lib_to_array 201907L
+
+_PW_POLYFILL_BEGIN_NAMESPACE_STD
+
+namespace impl {
+
+template <typename T, size_t size, size_t... indices>
+constexpr array<remove_cv_t<T>, size> CopyArray(const T (&values)[size],
+                                                index_sequence<indices...>) {
+  return {{values[indices]...}};
+}
+
+template <typename T, size_t size, size_t... indices>
+constexpr array<remove_cv_t<T>, size> MoveArray(T(&&values)[size],
+                                                index_sequence<indices...>) {
+  return {{move(values[indices])...}};
+}
+
+}  // namespace impl
+
+template <typename T, size_t size>
+constexpr array<remove_cv_t<T>, size> to_array(T (&values)[size]) {
+  return impl::CopyArray(values, make_index_sequence<size>{});
+}
+
+template <typename T, size_t size>
+constexpr array<remove_cv_t<T>, size> to_array(T(&&values)[size]) {
+  return impl::MoveArray(move(values), make_index_sequence<size>{});
+}
+
+_PW_POLYFILL_END_NAMESPACE_STD
+
+#endif  // __cpp_lib_to_array
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_polyfill/standard_library_public/pw_polyfill/standard_library/bit.h
similarity index 63%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to pw_polyfill/standard_library_public/pw_polyfill/standard_library/bit.h
index 1670b7d..72cea9e 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_polyfill/standard_library_public/pw_polyfill/standard_library/bit.h
@@ -11,7 +11,21 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+#pragma once
 
-#include "pw_boot_armv7m/boot.h"
+#include "pw_polyfill/standard_library/namespace.h"
 
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+_PW_POLYFILL_BEGIN_NAMESPACE_STD
+
+#ifndef __cpp_lib_endian
+#define __cpp_lib_endian 201907L
+
+enum class endian {
+  little = __ORDER_LITTLE_ENDIAN__,
+  big = __ORDER_BIG_ENDIAN__,
+  native = __BYTE_ORDER__,
+};
+
+#endif  // __cpp_lib_endian
+
+_PW_POLYFILL_END_NAMESPACE_STD
diff --git a/pw_polyfill/standard_library_public/pw_polyfill/standard_library/cstddef.h b/pw_polyfill/standard_library_public/pw_polyfill/standard_library/cstddef.h
index 4f086b5..763224c 100644
--- a/pw_polyfill/standard_library_public/pw_polyfill/standard_library/cstddef.h
+++ b/pw_polyfill/standard_library_public/pw_polyfill/standard_library/cstddef.h
@@ -19,6 +19,7 @@
 
 // Defines the std::byte type if it is not present.
 #ifndef __cpp_lib_byte
+#define __cpp_lib_byte 201603L
 
 _PW_POLYFILL_BEGIN_NAMESPACE_STD
 
diff --git a/pw_polyfill/standard_library_public/pw_polyfill/standard_library/iterator.h b/pw_polyfill/standard_library_public/pw_polyfill/standard_library/iterator.h
index 027ddf9..2a49b92 100644
--- a/pw_polyfill/standard_library_public/pw_polyfill/standard_library/iterator.h
+++ b/pw_polyfill/standard_library_public/pw_polyfill/standard_library/iterator.h
@@ -19,6 +19,7 @@
 
 // Define std::data and std::size.
 #ifndef __cpp_lib_nonmember_container_access
+#define __cpp_lib_nonmember_container_access 201411L
 
 #include <cstddef>
 
diff --git a/pw_polyfill/standard_library_public/pw_polyfill/standard_library/type_traits.h b/pw_polyfill/standard_library_public/pw_polyfill/standard_library/type_traits.h
index e91e5ae..33ea9d1 100644
--- a/pw_polyfill/standard_library_public/pw_polyfill/standard_library/type_traits.h
+++ b/pw_polyfill/standard_library_public/pw_polyfill/standard_library/type_traits.h
@@ -22,6 +22,7 @@
 // Defines std:foo_t aliases for typename foo::type. This is a small subset of
 // <type_traits> which may be expanded as needed.
 #ifndef __cpp_lib_transformation_trait_aliases
+#define __cpp_lib_transformation_trait_aliases 201304L
 
 template <decltype(sizeof(int)) Len, decltype(sizeof(int)) Align>
 using aligned_storage_t = typename aligned_storage<Len, Align>::type;
@@ -56,6 +57,7 @@
 #endif  // __cpp_lib_transformation_trait_aliases
 
 #ifndef __cpp_lib_is_null_pointer
+#define __cpp_lib_is_null_pointer 201309L
 
 template <typename T>
 struct is_null_pointer : std::is_same<decltype(nullptr), std::remove_cv_t<T>> {
diff --git a/pw_polyfill/standard_library_public/pw_polyfill/standard_library/utility.h b/pw_polyfill/standard_library_public/pw_polyfill/standard_library/utility.h
new file mode 100644
index 0000000..026068e
--- /dev/null
+++ b/pw_polyfill/standard_library_public/pw_polyfill/standard_library/utility.h
@@ -0,0 +1,57 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <utility>
+
+#include "pw_polyfill/standard_library/namespace.h"
+
+#ifndef __cpp_lib_integer_sequence
+#define __cpp_lib_integer_sequence 201304L
+
+_PW_POLYFILL_BEGIN_NAMESPACE_STD
+
+template <typename T, T... sequence>
+struct integer_sequence {
+  static constexpr size_t size() noexcept { return sizeof...(sequence); }
+};
+
+namespace impl {
+
+// In the absence of a compiler builtin for this, have MakeSequence expand
+// recursively to enumerate all indices up to count.
+template <size_t count, typename T, T... sequence>
+struct MakeSequence : MakeSequence<count - 1, T, count - 1, sequence...> {};
+
+template <typename T, T... sequence>
+struct MakeSequence<0, T, sequence...> : std::integer_sequence<T, sequence...> {
+};
+
+}  // namespace impl
+
+template <size_t... sequence>
+using index_sequence = integer_sequence<size_t, sequence...>;
+
+template <typename T, T count>
+using make_integer_sequence = impl::MakeSequence<count, T>;
+
+template <size_t count>
+using make_index_sequence = make_integer_sequence<size_t, count>;
+
+template <typename... T>
+using index_sequence_for = make_index_sequence<sizeof...(T)>;
+
+_PW_POLYFILL_END_NAMESPACE_STD
+
+#endif  // __cpp_lib_integer_sequence
diff --git a/pw_polyfill/test.cc b/pw_polyfill/test.cc
index dfc50c1..27d2a19 100644
--- a/pw_polyfill/test.cc
+++ b/pw_polyfill/test.cc
@@ -15,11 +15,13 @@
 #include <array>
 
 #include "gtest/gtest.h"
-#include "pw_polyfill/language_features.h"
+#include "pw_polyfill/language_feature_macros.h"
 #include "pw_polyfill/standard.h"
+#include "pw_polyfill/standard_library/bit.h"
 #include "pw_polyfill/standard_library/cstddef.h"
 #include "pw_polyfill/standard_library/iterator.h"
 #include "pw_polyfill/standard_library/type_traits.h"
+#include "pw_polyfill/standard_library/utility.h"
 
 namespace pw {
 namespace polyfill {
@@ -44,6 +46,52 @@
 static_assert(!PW_CXX_STANDARD_IS_SUPPORTED(17), "C++17 must be supported");
 #endif  // __cplusplus >= 201703L
 
+TEST(Array, ToArray_StringLiteral) {
+  std::array<char, sizeof("literally!")> array = std::to_array("literally!");
+  EXPECT_TRUE(std::strcmp(array.data(), "literally!") == 0);
+}
+
+TEST(Array, ToArray_Inline) {
+  constexpr std::array<int, 3> kArray = std::to_array({1, 2, 3});
+  static_assert(kArray.size() == 3);
+  EXPECT_TRUE(kArray[0] == 1);
+}
+
+TEST(Array, ToArray_Array) {
+  char c_array[] = "array!";
+  std::array<char, sizeof("array!")> array = std::to_array(c_array);
+  EXPECT_TRUE(std::strcmp(array.data(), "array!") == 0);
+}
+
+struct MoveOnly {
+  MoveOnly(char ch) : value(ch) {}
+
+  MoveOnly(const MoveOnly&) = delete;
+  MoveOnly& operator=(const MoveOnly&) = delete;
+
+  MoveOnly(MoveOnly&&) = default;
+  MoveOnly& operator=(MoveOnly&&) = default;
+
+  char value;
+};
+
+TEST(Array, ToArray_MoveOnly) {
+  MoveOnly c_array[]{MoveOnly('a'), MoveOnly('b')};
+  std::array<MoveOnly, 2> array = std::to_array(std::move(c_array));
+  EXPECT_TRUE(array[0].value == 'a');
+  EXPECT_TRUE(array[1].value == 'b');
+}
+
+TEST(Bit, Endian) {
+  if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) {
+    EXPECT_TRUE(std::endian::native == std::endian::big);
+  } else if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) {
+    EXPECT_TRUE(std::endian::native == std::endian::little);
+  } else {
+    FAIL();
+  }
+}
+
 TEST(Cstddef, Byte_Operators) {
   std::byte value = std::byte(0);
   EXPECT_TRUE((value | std::byte(0x12)) == std::byte(0x12));
@@ -63,7 +111,11 @@
   EXPECT_TRUE((value >>= 5) == std::byte(0x6));
 }
 
-int c_array[5423];
+// Check that consteval is at least equivalent to constexpr.
+consteval int ConstevalFunction() { return 123; }
+static_assert(ConstevalFunction() == 123);
+
+int c_array[5423] = {};
 std::array<int, 32> array;
 
 TEST(Iterator, Size) {
@@ -76,6 +128,15 @@
   EXPECT_TRUE(std::data(array) == array.data());
 }
 
+constinit bool mutable_value = true;
+
+TEST(Constinit, ValueIsMutable) {
+  ASSERT_TRUE(mutable_value);
+  mutable_value = false;
+  ASSERT_FALSE(mutable_value);
+  mutable_value = true;
+}
+
 TEST(TypeTraits, Aliases) {
   static_assert(
       std::is_same<std::aligned_storage_t<40, 40>,
@@ -120,6 +181,13 @@
                 "Alias must be defined");
 }
 
+TEST(Utility, IntegerSequence) {
+  static_assert(std::integer_sequence<int>::size() == 0);
+  static_assert(std::integer_sequence<int, 9, 8, 7>::size() == 3);
+  static_assert(std::make_index_sequence<1>::size() == 1);
+  static_assert(std::make_index_sequence<123>::size() == 123);
+}
+
 }  // namespace
 }  // namespace polyfill
 }  // namespace pw
diff --git a/pw_preprocessor/BUILD b/pw_preprocessor/BUILD
index 1b937ce..d0d14d7 100644
--- a/pw_preprocessor/BUILD
+++ b/pw_preprocessor/BUILD
@@ -24,14 +24,15 @@
 
 pw_cc_library(
     name = "pw_preprocessor",
+    srcs = ["public/pw_preprocessor/internal/arg_count_impl.h"],
     hdrs = glob(["public/pw_preprocessor/*.h"]),
     includes = ["public"],
 )
 
 TESTS = [
+    "arguments_test",
     "boolean_test",
     "concat_test",
-    "macro_arg_count_test",
     "util_test",
 ]
 
diff --git a/pw_preprocessor/BUILD.gn b/pw_preprocessor/BUILD.gn
index dff5a11..6f9a48f 100644
--- a/pw_preprocessor/BUILD.gn
+++ b/pw_preprocessor/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -25,13 +25,13 @@
 pw_source_set("pw_preprocessor") {
   public_configs = [ ":default_config" ]
   public = [
+    "public/pw_preprocessor/arguments.h",
     "public/pw_preprocessor/boolean.h",
     "public/pw_preprocessor/compiler.h",
     "public/pw_preprocessor/concat.h",
-    "public/pw_preprocessor/macro_arg_count.h",
     "public/pw_preprocessor/util.h",
   ]
-  sources = public
+  sources = [ "public/pw_preprocessor/internal/arg_count_impl.h" ]
 }
 
 pw_doc_group("docs") {
@@ -41,13 +41,18 @@
 # All pw_preprocessor test binaries.
 pw_test_group("tests") {
   tests = [
+    ":arguments_test",
     ":boolean_test",
     ":concat_test",
-    ":macro_arg_count_test",
     ":util_test",
   ]
 }
 
+pw_test("arguments_test") {
+  deps = [ ":pw_preprocessor" ]
+  sources = [ "arguments_test.cc" ]
+}
+
 pw_test("boolean_test") {
   deps = [ ":pw_preprocessor" ]
   sources = [ "boolean_test.cc" ]
@@ -58,11 +63,6 @@
   sources = [ "concat_test.cc" ]
 }
 
-pw_test("macro_arg_count_test") {
-  deps = [ ":pw_preprocessor" ]
-  sources = [ "macro_arg_count_test.cc" ]
-}
-
 pw_test("util_test") {
   deps = [ ":pw_preprocessor" ]
   sources = [ "util_test.cc" ]
diff --git a/pw_preprocessor/CMakeLists.txt b/pw_preprocessor/CMakeLists.txt
index 75dd616..3e893a0 100644
--- a/pw_preprocessor/CMakeLists.txt
+++ b/pw_preprocessor/CMakeLists.txt
@@ -12,4 +12,6 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_auto_add_simple_module(pw_preprocessor)
diff --git a/pw_preprocessor/arguments_test.cc b/pw_preprocessor/arguments_test.cc
new file mode 100644
index 0000000..4196298
--- /dev/null
+++ b/pw_preprocessor/arguments_test.cc
@@ -0,0 +1,378 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Many of these tests are static asserts. If these compile, they pass. The TEST
+// functions are used for organization only.
+
+#include "pw_preprocessor/arguments.h"
+
+#include <tuple>
+
+#include "pw_unit_test/framework.h"
+
+namespace pw {
+namespace {
+
+#define EMPTY_ARG
+
+TEST(HasArgs, WithoutArguments) {
+  static_assert(PW_HAS_ARGS() == 0);
+  static_assert(PW_HAS_ARGS(/**/) == 0);
+  static_assert(PW_HAS_ARGS(/* uhm, hi */) == 0);
+  static_assert(PW_HAS_ARGS(EMPTY_ARG) == 0);
+
+  // Test how the macro handles whitespace and comments.
+  // clang-format off
+  static_assert(PW_HAS_ARGS(     ) == 0);
+  static_assert(PW_HAS_ARGS(
+      ) == 0);
+  static_assert(PW_HAS_ARGS(
+      // wow
+      // This is a comment.
+      ) == 0);
+  // clang-format on
+
+  static_assert(PW_EMPTY_ARGS() == 1);
+  static_assert(PW_EMPTY_ARGS(/* hello */) == 1);
+  static_assert(PW_EMPTY_ARGS(
+                    // hello
+                    /* goodbye */) == 1);
+}
+
+TEST(HasArgs, WithArguments) {
+  static_assert(PW_HAS_ARGS(()) == 1);
+  static_assert(PW_HAS_ARGS(0) == 1);
+  static_assert(PW_HAS_ARGS(, ) == 1);
+  static_assert(PW_HAS_ARGS(a, b, c) == 1);
+  static_assert(PW_HAS_ARGS(PW_HAS_ARGS) == 1);
+  static_assert(PW_HAS_ARGS(PW_HAS_ARGS()) == 1);
+
+  static_assert(PW_EMPTY_ARGS(0) == 0);
+  static_assert(PW_EMPTY_ARGS(, ) == 0);
+  static_assert(PW_EMPTY_ARGS(a, b, c) == 0);
+  static_assert(PW_EMPTY_ARGS(PW_HAS_ARGS) == 0);
+  static_assert(PW_EMPTY_ARGS(PW_HAS_ARGS()) == 0);
+}
+
+constexpr int TestFunc(int arg, ...) { return arg; }
+
+#define CALL_FUNCTION(arg, ...) TestFunc(arg PW_COMMA_ARGS(__VA_ARGS__))
+
+template <typename T, typename... Args>
+constexpr T TemplateArgCount() {
+  return sizeof...(Args);
+}
+
+#define COUNT_ARGS_TEMPLATE(...) \
+  TemplateArgCount<int PW_COMMA_ARGS(__VA_ARGS__)>()
+
+TEST(CommaVarargs, NoArguments) {
+  static_assert(TestFunc(0 PW_COMMA_ARGS()) == 0);
+  static_assert(TestFunc(1 /* whoa */ PW_COMMA_ARGS(
+                    /* this macro */) /* is cool! */) == 1);
+
+  static_assert(TemplateArgCount<int PW_COMMA_ARGS()>() == 0);
+  static_assert(TemplateArgCount<int PW_COMMA_ARGS(/* nothing */)>() == 0);
+
+  static_assert(CALL_FUNCTION(2) == 2);
+  static_assert(CALL_FUNCTION(3, ) == 3);
+  static_assert(CALL_FUNCTION(4, /* nothing */) == 4);
+
+  static_assert(COUNT_ARGS_TEMPLATE() == 0);
+  static_assert(COUNT_ARGS_TEMPLATE(/* nothing */) == 0);
+}
+
+TEST(CommaVarargs, WithArguments) {
+  static_assert(TestFunc(0 PW_COMMA_ARGS(1)) == 0);
+  static_assert(TestFunc(1 PW_COMMA_ARGS(1, 2)) == 1);
+  static_assert(TestFunc(2 PW_COMMA_ARGS(1, 2, "three")) == 2);
+
+  static_assert(TemplateArgCount<int PW_COMMA_ARGS(bool)>() == 1);
+  static_assert(TemplateArgCount<int PW_COMMA_ARGS(char, const char*)>() == 2);
+  static_assert(TemplateArgCount<int PW_COMMA_ARGS(int, char, const char*)>() ==
+                3);
+
+  static_assert(CALL_FUNCTION(3) == 3);
+  static_assert(CALL_FUNCTION(4, ) == 4);
+  static_assert(CALL_FUNCTION(5, /* nothing */) == 5);
+
+  static_assert(COUNT_ARGS_TEMPLATE(int) == 1);
+  static_assert(COUNT_ARGS_TEMPLATE(int, int) == 2);
+  static_assert(COUNT_ARGS_TEMPLATE(int, int, int) == 3);
+}
+
+TEST(CommaVarargs, EmptyFinalArgument) {
+  static_assert(COUNT_ARGS_TEMPLATE(EMPTY_ARG) == 0);
+  static_assert(COUNT_ARGS_TEMPLATE(int, ) == 1);
+  static_assert(COUNT_ARGS_TEMPLATE(int, EMPTY_ARG) == 1);
+  static_assert(COUNT_ARGS_TEMPLATE(int, /* EMPTY_ARG */) == 1);
+  static_assert(COUNT_ARGS_TEMPLATE(int, int, ) == 2);
+  static_assert(COUNT_ARGS_TEMPLATE(int, int, int, ) == 3);
+  static_assert(COUNT_ARGS_TEMPLATE(int, int, int, EMPTY_ARG) == 3);
+}
+
+// This test demonstrates that PW_COMMA_ARGS behaves unexpectedly when it is
+// used when invoking another macro. DO NOT use PW_COMMA_ARGS when invoking
+// another macro!
+#define BAD_DEMO(fmt, ...) _BAD_DEMO_ADD_123(fmt PW_COMMA_ARGS(__VA_ARGS__))
+
+#define _BAD_DEMO_ADD_123(fmt, ...) \
+  _BAD_DEMO_CAPTURE_ARGS("%d: " fmt, 123 PW_COMMA_ARGS(__VA_ARGS__))
+
+#define _BAD_DEMO_CAPTURE_ARGS(...) std::make_tuple(__VA_ARGS__)
+
+TEST(CommaVarargs, MisbehavesWithMacroToMacroUse_NoArgs_ArgsAreOkay) {
+  auto [a1, a2] = BAD_DEMO("Hello world");
+  EXPECT_STREQ(a1, "%d: Hello world");
+  EXPECT_EQ(a2, 123);
+}
+
+TEST(CommaVarargs, MisbehavesWithMacroToMacroUse_WithArgs_ArgsOutOfOrder) {
+  // If there is an additional argument, the order is incorrect! The 123
+  // argument should go before the "world?" argument, but it is inserted after.
+  // This would be a compilation error if these arguments were passed to printf.
+  // What's worse is that this can silently fail if the arguments happen to be
+  // compatible types.
+  const auto [a1, a2, a3] = BAD_DEMO("Hello %s", "world?");
+  EXPECT_STREQ(a1, "%d: Hello %s");
+  EXPECT_STREQ(a2, "world?");
+  EXPECT_EQ(a3, 123);
+}
+
+TEST(CountArgs, Zero) {
+  static_assert(PW_MACRO_ARG_COUNT() == 0);
+  static_assert(PW_MACRO_ARG_COUNT(/**/) == 0);
+  static_assert(PW_MACRO_ARG_COUNT(/* uhm, hi */) == 0);
+
+  // clang-format off
+  static_assert(PW_MACRO_ARG_COUNT(     ) == 0);
+  static_assert(PW_MACRO_ARG_COUNT(
+      ) == 0);
+  static_assert(PW_MACRO_ARG_COUNT(
+      // wow
+      // This is a comment.
+      ) == 0);
+  // clang-format on
+}
+
+TEST(CountArgs, Commas) {
+  // clang-format off
+  static_assert(PW_MACRO_ARG_COUNT(,) == 2);
+  static_assert(PW_MACRO_ARG_COUNT(,,) == 3);
+  static_assert(PW_MACRO_ARG_COUNT(,,,) == 4);
+  // clang-format on
+  static_assert(PW_MACRO_ARG_COUNT(, ) == 2);
+  static_assert(PW_MACRO_ARG_COUNT(, , ) == 3);
+  static_assert(PW_MACRO_ARG_COUNT(, , , ) == 4);
+}
+
+TEST(CountArgs, Parentheses) {
+  static_assert(PW_MACRO_ARG_COUNT(()) == 1);
+  static_assert(PW_MACRO_ARG_COUNT((1, 2, 3, 4)) == 1);
+  static_assert(PW_MACRO_ARG_COUNT((1, 2, 3), (1, 2, 3, 4)) == 2);
+  static_assert(PW_MACRO_ARG_COUNT((), ()) == 2);
+  static_assert(PW_MACRO_ARG_COUNT((-), (o)) == 2);
+  static_assert(PW_MACRO_ARG_COUNT((, , (, , ), ), (123, 4)) == 2);
+  static_assert(PW_MACRO_ARG_COUNT(1, (2, 3, 4), (<5, 6>)) == 3);
+}
+
+template <typename... Args>
+constexpr size_t FunctionArgCount(Args...) {
+  return sizeof...(Args);
+}
+
+static_assert(FunctionArgCount() == 0);
+static_assert(FunctionArgCount(1) == 1);
+static_assert(FunctionArgCount(1, 2) == 2);
+
+TEST(CountFunctionArgs, NonEmptyLastArg) {
+  static_assert(PW_FUNCTION_ARG_COUNT(a) == 1);
+  static_assert(PW_FUNCTION_ARG_COUNT(1, 2) == 2);
+  static_assert(PW_FUNCTION_ARG_COUNT(1, 2, 3) == 3);
+}
+
+TEST(CountFunctionArgs, EmptyLastArg) {
+  static_assert(PW_FUNCTION_ARG_COUNT() == 0);
+  static_assert(PW_FUNCTION_ARG_COUNT(a, ) == 1);
+  static_assert(PW_FUNCTION_ARG_COUNT(1, 2, ) == 2);
+  static_assert(PW_FUNCTION_ARG_COUNT(1, 2, 3, ) == 3);
+
+  static_assert(PW_FUNCTION_ARG_COUNT(a, EMPTY_ARG) == 1);
+  static_assert(PW_FUNCTION_ARG_COUNT(1, 2, EMPTY_ARG) == 2);
+  static_assert(PW_FUNCTION_ARG_COUNT(1, 2, 3, EMPTY_ARG) == 3);
+}
+
+constexpr const char* Value(const char* str = nullptr) { return str; }
+
+TEST(LastArg, NonEmptyLastArg) {
+  constexpr const char* last = "last!";
+  static_assert(Value(PW_LAST_ARG(last)) == last);
+  static_assert(Value(PW_LAST_ARG(1, last)) == last);
+  static_assert(Value(PW_LAST_ARG(1, 2, last)) == last);
+}
+
+TEST(LastArg, EmptyLastArg) {
+  static_assert(Value(PW_LAST_ARG()) == nullptr);
+  static_assert(Value(PW_LAST_ARG(1, )) == nullptr);
+  static_assert(Value(PW_LAST_ARG(1, 2, )) == nullptr);
+  static_assert(Value(PW_LAST_ARG(1, 2, 3, )) == nullptr);
+}
+
+TEST(DropLastArg, NonEmptyLastArg) {
+  static_assert(FunctionArgCount(PW_DROP_LAST_ARG(1)) == 0);
+  static_assert(FunctionArgCount(PW_DROP_LAST_ARG(1, 2)) == 1);
+  static_assert(FunctionArgCount(PW_DROP_LAST_ARG(1, 2, 3)) == 2);
+}
+
+TEST(DropLastArg, EmptyLastArg) {
+  static_assert(FunctionArgCount(PW_DROP_LAST_ARG()) == 0);
+  static_assert(FunctionArgCount(PW_DROP_LAST_ARG(1, )) == 1);
+  static_assert(FunctionArgCount(PW_DROP_LAST_ARG(1, 2, )) == 2);
+  static_assert(FunctionArgCount(PW_DROP_LAST_ARG(1, 2, 3, )) == 3);
+}
+
+TEST(DropLastArgIfEmpty, NonEmptyLastArg) {
+  static_assert(FunctionArgCount(PW_DROP_LAST_ARG_IF_EMPTY(1)) == 1);
+  static_assert(FunctionArgCount(PW_DROP_LAST_ARG_IF_EMPTY(1, 2)) == 2);
+  static_assert(FunctionArgCount(PW_DROP_LAST_ARG_IF_EMPTY(1, 2, 3)) == 3);
+}
+
+TEST(DropLastArgIfEmpty, EmptyLastArg) {
+  static_assert(FunctionArgCount(PW_DROP_LAST_ARG_IF_EMPTY()) == 0);
+  static_assert(FunctionArgCount(PW_DROP_LAST_ARG_IF_EMPTY(1, )) == 1);
+  static_assert(FunctionArgCount(PW_DROP_LAST_ARG_IF_EMPTY(1, 2, )) == 2);
+  static_assert(FunctionArgCount(PW_DROP_LAST_ARG_IF_EMPTY(1, 2, 3, )) == 3);
+}
+
+#define SOME_VARIADIC_MACRO(...) PW_MACRO_ARG_COUNT(__VA_ARGS__)
+
+#define ANOTHER_VARIADIC_MACRO(arg, ...) SOME_VARIADIC_MACRO(__VA_ARGS__)
+
+#define ALWAYS_ONE_ARG(...) SOME_VARIADIC_MACRO((__VA_ARGS__))
+
+TEST(CountArgs, NestedMacros) {
+  static_assert(SOME_VARIADIC_MACRO() == 0);
+  static_assert(SOME_VARIADIC_MACRO(X1) == 1);
+  static_assert(SOME_VARIADIC_MACRO(X1, X2) == 2);
+  static_assert(SOME_VARIADIC_MACRO(X1, X2, X3) == 3);
+  static_assert(SOME_VARIADIC_MACRO(X1, X2, X3, X4) == 4);
+  static_assert(SOME_VARIADIC_MACRO(X1, X2, X3, X4, X5) == 5);
+
+  static_assert(ANOTHER_VARIADIC_MACRO() == 0);
+  static_assert(ANOTHER_VARIADIC_MACRO(X0) == 0);
+  static_assert(ANOTHER_VARIADIC_MACRO(X0, X1) == 1);
+  static_assert(ANOTHER_VARIADIC_MACRO(X0, X1, X2) == 2);
+  static_assert(ANOTHER_VARIADIC_MACRO(X0, X1, X2, X3) == 3);
+  static_assert(ANOTHER_VARIADIC_MACRO(X0, X1, X2, X3, X4) == 4);
+  static_assert(ANOTHER_VARIADIC_MACRO(X0, X1, X2, X3, X4, X5) == 5);
+
+  static_assert(ALWAYS_ONE_ARG() == 1);
+  static_assert(ALWAYS_ONE_ARG(X0) == 1);
+  static_assert(ALWAYS_ONE_ARG(X0, X1) == 1);
+  static_assert(ALWAYS_ONE_ARG(X0, X1, X2) == 1);
+  static_assert(ALWAYS_ONE_ARG(X0, X1, X2, X3) == 1);
+  static_assert(ALWAYS_ONE_ARG(X0, X1, X2, X3, X4) == 1);
+  static_assert(ALWAYS_ONE_ARG(X0, X1, X2, X3, X4, X5) == 1);
+}
+
+/* Tests all supported arg counts. This test was generated by the following
+   Python 3 code:
+for i in range(64 + 1):
+  args = [f'X{x}' for x in range(1, i + 1)]
+  print(f'  static_assert(PW_MACRO_ARG_COUNT({", ".join(args)}) == {i})  ')
+*/
+TEST(CountArgs, AllSupported) {
+  // clang-format off
+  static_assert(PW_MACRO_ARG_COUNT() == 0);
+  static_assert(PW_MACRO_ARG_COUNT(X1) == 1);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2) == 2);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3) == 3);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4) == 4);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5) == 5);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6) == 6);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7) == 7);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8) == 8);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9) == 9);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10) == 10);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11) == 11);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12) == 12);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13) == 13);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14) == 14);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15) == 15);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16) == 16);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17) == 17);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18) == 18);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19) == 19);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20) == 20);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21) == 21);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22) == 22);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23) == 23);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24) == 24);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25) == 25);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26) == 26);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27) == 27);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28) == 28);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29) == 29);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30) == 30);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31) == 31);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32) == 32);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33) == 33);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34) == 34);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35) == 35);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36) == 36);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37) == 37);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38) == 38);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39) == 39);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40) == 40);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41) == 41);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42) == 42);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43) == 43);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44) == 44);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45) == 45);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46) == 46);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47) == 47);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48) == 48);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49) == 49);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50) == 50);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51) == 51);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52) == 52);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53) == 53);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54) == 54);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55) == 55);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56) == 56);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56, X57) == 57);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56, X57, X58) == 58);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56, X57, X58, X59) == 59);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56, X57, X58, X59, X60) == 60);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56, X57, X58, X59, X60, X61) == 61);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56, X57, X58, X59, X60, X61, X62) == 62);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56, X57, X58, X59, X60, X61, X62, X63) == 63);
+  static_assert(PW_MACRO_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56, X57, X58, X59, X60, X61, X62, X63, X64) == 64);
+  // clang-format on
+}
+
+TEST(DelegateByArgCount, WithoutAndWithoutArguments) {
+#define TEST_SUM0() (0)
+#define TEST_SUM1(a) (a)
+#define TEST_SUM2(a, b) ((a) + (b))
+#define TEST_SUM3(a, b, c) ((a) + (b) + (c))
+
+  static_assert(PW_DELEGATE_BY_ARG_COUNT(TEST_SUM) == 0);
+  static_assert(PW_DELEGATE_BY_ARG_COUNT(TEST_SUM, 5) == 5);
+  static_assert(PW_DELEGATE_BY_ARG_COUNT(TEST_SUM, 1, 2) == 3);
+  static_assert(PW_DELEGATE_BY_ARG_COUNT(TEST_SUM, 1, 2, 3) == 6);
+}
+
+}  // namespace
+}  // namespace pw
diff --git a/pw_preprocessor/docs.rst b/pw_preprocessor/docs.rst
index ae08b13..9602a76 100644
--- a/pw_preprocessor/docs.rst
+++ b/pw_preprocessor/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-preprocessor:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_preprocessor:
 
 ---------------
 pw_preprocessor
@@ -17,6 +13,51 @@
 =======
 The preprocessor module provides several headers.
 
+pw_preprocessor/arguments.h
+---------------------------------
+Defines macros for handling variadic arguments to function-like macros. Macros
+include the following:
+
+.. c:function:: PW_DELEGATE_BY_ARG_COUNT(name, ...)
+
+  Selects and invokes a macro based on the number of arguments provided. Expands
+  to ``<name><arg_count>(...)``. For example,
+  ``PW_DELEGATE_BY_ARG_COUNT(foo_, 1, 2, 3)`` expands to ``foo_3(1, 2, 3)``.
+
+  This example shows how ``PW_DELEGATE_BY_ARG_COUNT`` could be used to log a
+  customized message based on the number of arguments provided.
+
+  .. code-block:: cpp
+
+      #define ARG_PRINT(...)  PW_DELEGATE_BY_ARG_COUNT(_ARG_PRINT, __VA_ARGS__)
+      #define _ARG_PRINT_0(a)        LOG_INFO("nothing!")
+      #define _ARG_PRINT_1(a)        LOG_INFO("1 arg: %s", a)
+      #define _ARG_PRINT_2(a, b)     LOG_INFO("2 args: %s, %s", a, b)
+      #define _ARG_PRINT_3(a, b, c)  LOG_INFO("3 args: %s, %s, %s", a, b, c)
+
+  When used, ``ARG_PRINT`` expands to the ``_ARG_PRINT_#`` macro corresponding
+  to the number of arguments.
+
+  .. code-block:: cpp
+
+      ARG_PRINT();               // Outputs: nothing!
+      ARG_PRINT("a");            // Outputs: 1 arg: a
+      ARG_PRINT("a", "b");       // Outputs: 2 args: a, b
+      ARG_PRINT("a", "b", "c");  // Outputs: 3 args: a, b, c
+
+.. c:function:: PW_COMMA_ARGS(...)
+
+  Expands to a comma followed by the arguments if any arguments are provided.
+  Otherwise, expands to nothing. If the final argument is empty, it is omitted.
+  This is useful when passing ``__VA_ARGS__`` to a variadic function or template
+  parameter list, since it removes the extra comma when no arguments are
+  provided. ``PW_COMMA_ARGS`` must NOT be used when invoking a macro from
+  another macro.
+
+  For example. ``PW_COMMA_ARGS(1, 2, 3)``, expands to ``, 1, 2, 3``, while
+  ``PW_COMMA_ARGS()`` expands to nothing. ``PW_COMMA_ARGS(1, 2, )`` expands to
+  ``, 1, 2``.
+
 pw_preprocessor/boolean.h
 -------------------------
 Defines macros for boolean logic on literal 1s and 0s. This is useful for
@@ -32,19 +73,6 @@
 macros and token pastes the results. This can be used for building names of
 classes, variables, macros, etc.
 
-pw_preprocessor/macro_arg_count.h
----------------------------------
-Defines the ``PW_ARG_COUNT(...)`` macro, which counts the number of arguments it
-was passed. It can be invoked directly or with ``__VA_ARGS__`` in another macro.
-``PW_ARG_COUNT(...)``  evaluates to a literal of the number of arguments which
-can be used directly or concatenated to build other names. Unlike many common
-implementations, this macro correctly evaluates to ``0`` when it is invoked
-without arguments.
-
-This header also defines ``PW_HAS_ARGS(...)`` and ``PW_HAS_NO_ARGS(...)``,
-which evaluate to ``1`` or ``0`` depending on whether they are invoked with
-arguments.
-
 pw_preprocessor/util.h
 ----------------------
 General purpose, useful macros.
diff --git a/pw_preprocessor/macro_arg_count_test.cc b/pw_preprocessor/macro_arg_count_test.cc
deleted file mode 100644
index 0ce8831..0000000
--- a/pw_preprocessor/macro_arg_count_test.cc
+++ /dev/null
@@ -1,267 +0,0 @@
-// Copyright 2019 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-//
-// All of these tests are static asserts. If the test compiles, it has already
-// passed. The TEST functions are used for organization only.
-
-#include "pw_preprocessor/macro_arg_count.h"
-
-#include "pw_unit_test/framework.h"
-
-namespace pw {
-namespace {
-
-TEST(HasArgs, WithoutArguments) {
-  static_assert(PW_HAS_ARGS() == 0);
-  static_assert(PW_HAS_ARGS(/**/) == 0);
-  static_assert(PW_HAS_ARGS(/* uhm, hi */) == 0);
-
-  // Test how the macro handles whitespace and comments.
-  // clang-format off
-  static_assert(PW_HAS_ARGS(     ) == 0);  // NOLINT
-  static_assert(PW_HAS_ARGS(
-      ) == 0);  // NOLINT
-  static_assert(PW_HAS_ARGS(
-      // wow
-      // This is a comment.
-      ) == 0);  // NOLINT
-  // clang-format on
-
-  static_assert(PW_HAS_NO_ARGS() == 1);
-  static_assert(PW_HAS_NO_ARGS(/* hello */) == 1);
-  static_assert(PW_HAS_NO_ARGS(
-                    // hello
-                    /* goodbye */) == 1);
-}
-
-TEST(HasArgs, WithArguments) {
-  static_assert(PW_HAS_ARGS(()) == 1);
-  static_assert(PW_HAS_ARGS(0) == 1);
-  static_assert(PW_HAS_ARGS(, ) == 1);  // NOLINT
-  static_assert(PW_HAS_ARGS(a, b, c) == 1);
-  static_assert(PW_HAS_ARGS(PW_HAS_ARGS) == 1);
-  static_assert(PW_HAS_ARGS(PW_HAS_ARGS()) == 1);
-
-  static_assert(PW_HAS_NO_ARGS(0) == 0);
-  static_assert(PW_HAS_NO_ARGS(, ) == 0);  // NOLINT
-  static_assert(PW_HAS_NO_ARGS(a, b, c) == 0);
-  static_assert(PW_HAS_NO_ARGS(PW_HAS_ARGS) == 0);
-  static_assert(PW_HAS_NO_ARGS(PW_HAS_ARGS()) == 0);
-}
-
-constexpr int TestFunc(int arg, ...) { return arg; }
-
-#define CALL_FUNCTION(arg, ...) TestFunc(arg PW_COMMA_ARGS(__VA_ARGS__))
-
-template <typename T, typename... Args>
-constexpr T TemplateArgCount() {
-  return sizeof...(Args);
-}
-
-#define COUNT_ARGS_TEMPLATE(...) \
-  TemplateArgCount<int PW_COMMA_ARGS(__VA_ARGS__)>()
-
-TEST(CommaVarargs, NoArguments) {
-  static_assert(TestFunc(0 PW_COMMA_ARGS()) == 0);
-  static_assert(TestFunc(1 /* whoa */ PW_COMMA_ARGS(
-                    /* this macro */) /* is cool! */) == 1);
-
-  static_assert(TemplateArgCount<int PW_COMMA_ARGS()>() == 0);
-  static_assert(TemplateArgCount<int PW_COMMA_ARGS(/* nothing */)>() == 0);
-
-  static_assert(CALL_FUNCTION(2) == 2);
-  static_assert(CALL_FUNCTION(3, ) == 3);
-  static_assert(CALL_FUNCTION(4, /* nothing */) == 4);
-
-  static_assert(COUNT_ARGS_TEMPLATE() == 0);
-  static_assert(COUNT_ARGS_TEMPLATE(/* nothing */) == 0);
-}
-
-TEST(CommaVarargs, WithArguments) {
-  static_assert(TestFunc(0 PW_COMMA_ARGS(1)) == 0);
-  static_assert(TestFunc(1 PW_COMMA_ARGS(1, 2)) == 1);
-  static_assert(TestFunc(2 PW_COMMA_ARGS(1, 2, "three")) == 2);
-
-  static_assert(TemplateArgCount<int PW_COMMA_ARGS(bool)>() == 1);
-  static_assert(TemplateArgCount<int PW_COMMA_ARGS(char, const char*)>() == 2);
-  static_assert(TemplateArgCount<int PW_COMMA_ARGS(int, char, const char*)>() ==
-                3);
-
-  static_assert(CALL_FUNCTION(3) == 3);
-  static_assert(CALL_FUNCTION(4, ) == 4);
-  static_assert(CALL_FUNCTION(5, /* nothing */) == 5);
-
-  static_assert(COUNT_ARGS_TEMPLATE(int) == 1);
-  static_assert(COUNT_ARGS_TEMPLATE(int, int) == 2);
-  static_assert(COUNT_ARGS_TEMPLATE(int, int, int) == 3);
-}
-
-TEST(CountArgs, Zero) {
-  static_assert(PW_ARG_COUNT() == 0);
-  static_assert(PW_ARG_COUNT(/**/) == 0);
-  static_assert(PW_ARG_COUNT(/* uhm, hi */) == 0);
-
-  // clang-format off
-  static_assert(PW_ARG_COUNT(     ) == 0);  // NOLINT
-  static_assert(PW_ARG_COUNT(
-      ) == 0);  // NOLINT
-  static_assert(PW_ARG_COUNT(
-      // wow
-      // This is a comment.
-      ) == 0);  // NOLINT
-  // clang-format on
-}
-
-TEST(CountArgs, Commas) {
-  // clang-format off
-  static_assert(PW_ARG_COUNT(,) == 2);    // NOLINT
-  static_assert(PW_ARG_COUNT(,,) == 3);   // NOLINT
-  static_assert(PW_ARG_COUNT(,,,) == 4);  // NOLINT
-  // clang-format on
-  static_assert(PW_ARG_COUNT(, ) == 2);      // NOLINT
-  static_assert(PW_ARG_COUNT(, , ) == 3);    // NOLINT
-  static_assert(PW_ARG_COUNT(, , , ) == 4);  // NOLINT
-}
-
-TEST(CountArgs, Parentheses) {
-  static_assert(PW_ARG_COUNT(()) == 1);
-  static_assert(PW_ARG_COUNT((1, 2, 3, 4)) == 1);
-  static_assert(PW_ARG_COUNT((1, 2, 3), (1, 2, 3, 4)) == 2);
-  static_assert(PW_ARG_COUNT((), ()) == 2);
-  static_assert(PW_ARG_COUNT((-), (o)) == 2);
-  static_assert(PW_ARG_COUNT((, , (, , ), ), (123, 4)) == 2);  // NOLINT
-  static_assert(PW_ARG_COUNT(1, (2, 3, 4), (<5, 6>)) == 3);
-}
-
-#define SOME_VARIADIC_MACRO(...) PW_ARG_COUNT(__VA_ARGS__)
-
-#define ANOTHER_VARIADIC_MACRO(arg, ...) SOME_VARIADIC_MACRO(__VA_ARGS__)
-
-#define ALWAYS_ONE_ARG(...) SOME_VARIADIC_MACRO((__VA_ARGS__))
-
-TEST(CountArgs, NestedMacros) {
-  static_assert(SOME_VARIADIC_MACRO() == 0);
-  static_assert(SOME_VARIADIC_MACRO(X1) == 1);
-  static_assert(SOME_VARIADIC_MACRO(X1, X2) == 2);
-  static_assert(SOME_VARIADIC_MACRO(X1, X2, X3) == 3);
-  static_assert(SOME_VARIADIC_MACRO(X1, X2, X3, X4) == 4);
-  static_assert(SOME_VARIADIC_MACRO(X1, X2, X3, X4, X5) == 5);
-
-  static_assert(ANOTHER_VARIADIC_MACRO() == 0);
-  static_assert(ANOTHER_VARIADIC_MACRO(X0) == 0);
-  static_assert(ANOTHER_VARIADIC_MACRO(X0, X1) == 1);
-  static_assert(ANOTHER_VARIADIC_MACRO(X0, X1, X2) == 2);
-  static_assert(ANOTHER_VARIADIC_MACRO(X0, X1, X2, X3) == 3);
-  static_assert(ANOTHER_VARIADIC_MACRO(X0, X1, X2, X3, X4) == 4);
-  static_assert(ANOTHER_VARIADIC_MACRO(X0, X1, X2, X3, X4, X5) == 5);
-
-  static_assert(ALWAYS_ONE_ARG() == 1);
-  static_assert(ALWAYS_ONE_ARG(X0) == 1);
-  static_assert(ALWAYS_ONE_ARG(X0, X1) == 1);
-  static_assert(ALWAYS_ONE_ARG(X0, X1, X2) == 1);
-  static_assert(ALWAYS_ONE_ARG(X0, X1, X2, X3) == 1);
-  static_assert(ALWAYS_ONE_ARG(X0, X1, X2, X3, X4) == 1);
-  static_assert(ALWAYS_ONE_ARG(X0, X1, X2, X3, X4, X5) == 1);
-}
-
-/* Tests all supported arg counts. This test was generated by the following
-   Python 3 code:
-for i in range(64 + 1):
-  args = [f'X{x}' for x in range(1, i + 1)]
-  print(f'  static_assert(PW_ARG_COUNT({", ".join(args)}) == {i})  // NOLINT')
-*/
-TEST(CountArgs, AllSupported) {
-  // clang-format off
-  static_assert(PW_ARG_COUNT() == 0);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1) == 1);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2) == 2);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3) == 3);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4) == 4);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5) == 5);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6) == 6);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7) == 7);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8) == 8);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9) == 9);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10) == 10);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11) == 11);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12) == 12);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13) == 13);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14) == 14);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15) == 15);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16) == 16);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17) == 17);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18) == 18);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19) == 19);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20) == 20);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21) == 21);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22) == 22);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23) == 23);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24) == 24);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25) == 25);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26) == 26);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27) == 27);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28) == 28);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29) == 29);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30) == 30);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31) == 31);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32) == 32);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33) == 33);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34) == 34);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35) == 35);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36) == 36);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37) == 37);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38) == 38);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39) == 39);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40) == 40);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41) == 41);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42) == 42);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43) == 43);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44) == 44);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45) == 45);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46) == 46);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47) == 47);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48) == 48);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49) == 49);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50) == 50);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51) == 51);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52) == 52);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53) == 53);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54) == 54);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55) == 55);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56) == 56);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56, X57) == 57);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56, X57, X58) == 58);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56, X57, X58, X59) == 59);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56, X57, X58, X59, X60) == 60);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56, X57, X58, X59, X60, X61) == 61);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56, X57, X58, X59, X60, X61, X62) == 62);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56, X57, X58, X59, X60, X61, X62, X63) == 63);  // NOLINT
-  static_assert(PW_ARG_COUNT(X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30, X31, X32, X33, X34, X35, X36, X37, X38, X39, X40, X41, X42, X43, X44, X45, X46, X47, X48, X49, X50, X51, X52, X53, X54, X55, X56, X57, X58, X59, X60, X61, X62, X63, X64) == 64);  // NOLINT
-  // clang-format on
-}
-
-TEST(DelegateByArgCount, WithoutAndWithoutArguments) {
-#define TEST_SUM0() (0)
-#define TEST_SUM1(a) (a)
-#define TEST_SUM2(a, b) ((a) + (b))
-#define TEST_SUM3(a, b, c) ((a) + (b) + (c))
-
-  static_assert(PW_DELEGATE_BY_ARG_COUNT(TEST_SUM) == 0);
-  static_assert(PW_DELEGATE_BY_ARG_COUNT(TEST_SUM, 5) == 5);
-  static_assert(PW_DELEGATE_BY_ARG_COUNT(TEST_SUM, 1, 2) == 3);
-  static_assert(PW_DELEGATE_BY_ARG_COUNT(TEST_SUM, 1, 2, 3) == 6);
-}
-
-}  // namespace
-}  // namespace pw
diff --git a/pw_preprocessor/public/pw_preprocessor/arguments.h b/pw_preprocessor/public/pw_preprocessor/arguments.h
new file mode 100644
index 0000000..bb7e80b
--- /dev/null
+++ b/pw_preprocessor/public/pw_preprocessor/arguments.h
@@ -0,0 +1,194 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Macros for working with arguments to function-like macros.
+#pragma once
+
+#include "pw_preprocessor/boolean.h"
+#include "pw_preprocessor/internal/arg_count_impl.h"
+
+// Expands to a comma followed by __VA_ARGS__, if __VA_ARGS__ is non-empty.
+// Otherwise, expands to nothing. If the final argument is empty, it is omitted.
+// This is useful when passing __VA_ARGS__ to a variadic function or template
+// parameter list, since it removes the extra comma when no arguments are
+// provided. PW_COMMA_ARGS must NOT be used when invoking a macro from another
+// macro.
+//
+// This is a more flexible, standard-compliant version of ##__VA_ARGS__. Unlike
+// ##__VA_ARGS__, this can be used to eliminate an unwanted comma when
+// __VA_ARGS__ expands to an empty argument because an outer macro was called
+// with __VA_ARGS__ instead of ##__VA_ARGS__. Also, since PW_COMMA_ARGS drops
+// the last argument if it is empty, both MY_MACRO(1, 2) and MY_MACRO(1, 2, )
+// can work correctly.
+//
+// PW_COMMA_ARGS must NOT be used to conditionally include a comma when invoking
+// a macro from another macro. PW_COMMA_ARGS only functions correctly when the
+// macro expands to C or C++ code! Using it with intermediate macros can result
+// in out-of-order parameters. When invoking one macro from another, simply pass
+// __VA_ARGS__. Only the final macro that expands to C/C++ code should use
+// PW_COMMA_ARGS.
+//
+// For example, the following does NOT work:
+/*
+     #define MY_MACRO(fmt, ...) \
+         NESTED_MACRO(fmt PW_COMMA_ARGS(__VA_ARGS__))  // BAD! Do not do this!
+*/
+// Instead, only use PW_COMMA_ARGS when the macro expands to C/C++ code:
+/*
+     #define MY_MACRO(fmt, ...) \
+         NESTED_MACRO(fmt, __VA_ARGS__)  // Pass __VA_ARGS__ to nested macros
+
+     #define NESTED_MACRO(fmt, ...) \
+         printf(fmt PW_COMMA_ARGS(__VA_ARGS__))  // PW_COMMA_ARGS is OK here
+*/
+#define PW_COMMA_ARGS(...)                                       \
+  _PW_IF(PW_EMPTY_ARGS(__VA_ARGS__), _PW_EXPAND, _PW_COMMA_ARGS) \
+  (PW_DROP_LAST_ARG_IF_EMPTY(__VA_ARGS__))
+
+#define _PW_COMMA_ARGS(...) , __VA_ARGS__
+
+// Allows calling a different function-like macros based on the number of
+// arguments. For example:
+//
+//   #define ARG_PRINT(...)  PW_DELEGATE_BY_ARG_COUNT(_ARG_PRINT, __VA_ARGS__)
+//   #define _ARG_PRINT1(a)        LOG_INFO("1 arg: %s", a)
+//   #define _ARG_PRINT2(a, b)     LOG_INFO("2 args: %s, %s", a, b)
+//   #define _ARG_PRINT3(a, b, c)  LOG_INFO("3 args: %s, %s, %s", a, b, c)
+//
+// This can the be called from C/C++ code:
+//
+//    ARG_PRINT("a");            // Outputs: 1 arg: a
+//    ARG_PRINT("a", "b");       // Outputs: 2 args: a, b
+//    ARG_PRINT("a", "b", "c");  // Outputs: 3 args: a, b, c
+//
+#define PW_DELEGATE_BY_ARG_COUNT(function, ...)                 \
+  _PW_DELEGATE_BY_ARG_COUNT(                                    \
+      _PW_PASTE2(function, PW_FUNCTION_ARG_COUNT(__VA_ARGS__)), \
+      PW_DROP_LAST_ARG_IF_EMPTY(__VA_ARGS__))
+
+#define _PW_DELEGATE_BY_ARG_COUNT(function, ...) function(__VA_ARGS__)
+
+// PW_MACRO_ARG_COUNT counts the number of arguments it was called with. It
+// evalulates to an integer literal in the range 0 to 64. Counting more than 64
+// arguments is not currently supported.
+//
+// PW_MACRO_ARG_COUNT is most commonly used to count __VA_ARGS__ in a variadic
+// macro. For example, the following code counts the number of arguments passed
+// to a logging macro:
+//
+/*   #define LOG_INFO(format, ...) {                                   \
+         static const int kArgCount = PW_MACRO_ARG_COUNT(__VA_ARGS__); \
+         SendLog(kArgCount, format, ##__VA_ARGS__);                    \
+       }
+*/
+// clang-format off
+#define PW_MACRO_ARG_COUNT(...)                      \
+  _PW_MACRO_ARG_COUNT_IMPL(__VA_ARGS__,              \
+                     64, 63, 62, 61, 60, 59, 58, 57, \
+                     56, 55, 54, 53, 52, 51, 50, 49, \
+                     48, 47, 46, 45, 44, 43, 42, 41, \
+                     40, 39, 38, 37, 36, 35, 34, 33, \
+                     32, 31, 30, 29, 28, 27, 26, 25, \
+                     24, 23, 22, 21, 20, 19, 18, 17, \
+                     16, 15, 14, 13, 12, 11, 10,  9, \
+                      8,  7,  6,  5, 4,  3,  2,  PW_HAS_ARGS(__VA_ARGS__))
+// clang-format on
+
+// Argument count for using with a C/C++ function or template parameter list.
+// The difference from PW_MACRO_ARG_COUNT is that the last argument is not
+// counted if it is empty. This makes it easier to drop the final comma when
+// expanding to C/C++ code.
+#define PW_FUNCTION_ARG_COUNT(...) \
+  _PW_FUNCTION_ARG_COUNT(PW_LAST_ARG(__VA_ARGS__), __VA_ARGS__)
+
+#define _PW_FUNCTION_ARG_COUNT(last_arg, ...) \
+  _PW_PASTE2(_PW_FUNCTION_ARG_COUNT_, PW_EMPTY_ARGS(last_arg))(__VA_ARGS__)
+
+#define _PW_FUNCTION_ARG_COUNT_0 PW_MACRO_ARG_COUNT
+#define _PW_FUNCTION_ARG_COUNT_1(...) \
+  PW_MACRO_ARG_COUNT(PW_DROP_LAST_ARG(__VA_ARGS__))
+
+// Evaluates to the last argument in the provided arguments.
+#define PW_LAST_ARG(...) \
+  _PW_PASTE2(_PW_LAST_ARG_, PW_MACRO_ARG_COUNT(__VA_ARGS__))(__VA_ARGS__)
+
+// Evaluates to the provided arguments, excluding the final argument.
+#define PW_DROP_LAST_ARG(...) \
+  _PW_PASTE2(_PW_DROP_LAST_ARG_, PW_MACRO_ARG_COUNT(__VA_ARGS__))(__VA_ARGS__)
+
+// Evaluates to the arguments, excluding the final argument if it is empty.
+#define PW_DROP_LAST_ARG_IF_EMPTY(...)                                       \
+  _PW_IF(                                                                    \
+      PW_EMPTY_ARGS(PW_LAST_ARG(__VA_ARGS__)), PW_DROP_LAST_ARG, _PW_EXPAND) \
+  (__VA_ARGS__)
+
+// Expands to 1 if one or more arguments are provided, 0 otherwise.
+#define PW_HAS_ARGS(...) PW_NOT(PW_EMPTY_ARGS(__VA_ARGS__))
+
+// Expands to 0 if one or more arguments are provided, 1 otherwise. This
+// approach is from Jens Gustedt's blog:
+//   https://gustedt.wordpress.com/2010/06/08/detect-empty-macro-arguments/
+//
+// Normally, with a standard-compliant C preprocessor, it's impossible to tell
+// whether a variadic macro was called with no arguments or with one argument.
+// A macro invoked with no arguments is actually passed one empty argument.
+//
+// This macro works by checking for the presence of a comma in four situations.
+// These situations give the following information about __VA_ARGS__:
+//
+//   1. It is two or more variadic arguments.
+//   2. It expands to one argument surrounded by parentheses.
+//   3. It is a function-like macro that produces a comma when invoked.
+//   4. It does not interfere with calling a macro when placed between it and
+//      parentheses.
+//
+// If a comma is not present in 1, 2, 3, but is present in 4, then __VA_ARGS__
+// is empty. For this case (0001), and only this case, a corresponding macro
+// that expands to a comma is defined. The presence of this comma determines
+// whether any arguments were passed in.
+//
+// C++20 introduces __VA_OPT__, which would greatly simplify this macro.
+#define PW_EMPTY_ARGS(...)                                             \
+  _PW_HAS_NO_ARGS(_PW_HAS_COMMA(__VA_ARGS__),                          \
+                  _PW_HAS_COMMA(_PW_MAKE_COMMA_IF_CALLED __VA_ARGS__), \
+                  _PW_HAS_COMMA(__VA_ARGS__()),                        \
+                  _PW_HAS_COMMA(_PW_MAKE_COMMA_IF_CALLED __VA_ARGS__()))
+
+// clang-format off
+
+#define _PW_HAS_COMMA(...)                                           \
+  _PW_MACRO_ARG_COUNT_IMPL(__VA_ARGS__,                              \
+                     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+                     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+                     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+                     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0)
+
+#define _PW_MACRO_ARG_COUNT_IMPL(a64, a63, a62, a61, a60, a59, a58, a57, \
+                                 a56, a55, a54, a53, a52, a51, a50, a49, \
+                                 a48, a47, a46, a45, a44, a43, a42, a41, \
+                                 a40, a39, a38, a37, a36, a35, a34, a33, \
+                                 a32, a31, a30, a29, a28, a27, a26, a25, \
+                                 a24, a23, a22, a21, a20, a19, a18, a17, \
+                                 a16, a15, a14, a13, a12, a11, a10, a09, \
+                                 a08, a07, a06, a05, a04, a03, a02, a01, \
+                                 count, ...)                             \
+  count
+
+// clang-format on
+
+#define _PW_HAS_NO_ARGS(a1, a2, a3, a4) \
+  _PW_HAS_COMMA(_PW_PASTE_RESULTS(a1, a2, a3, a4))
+#define _PW_PASTE_RESULTS(a1, a2, a3, a4) _PW_HAS_COMMA_CASE_##a1##a2##a3##a4
+#define _PW_HAS_COMMA_CASE_0001 ,
+#define _PW_MAKE_COMMA_IF_CALLED(...) ,
diff --git a/pw_preprocessor/public/pw_preprocessor/compiler.h b/pw_preprocessor/public/pw_preprocessor/compiler.h
index 550109e..8d002f9 100644
--- a/pw_preprocessor/public/pw_preprocessor/compiler.h
+++ b/pw_preprocessor/public/pw_preprocessor/compiler.h
@@ -49,7 +49,7 @@
 
 // When compiling for host using MinGW, use gnu_printf() rather than printf()
 // to support %z format specifiers.
-#if __USE_MINGW_ANSI_STDIO
+#ifdef __USE_MINGW_ANSI_STDIO
 #define _PW_PRINTF_FORMAT_TYPE gnu_printf
 #else
 #define _PW_PRINTF_FORMAT_TYPE printf
@@ -62,7 +62,7 @@
 // to keep the variable, even if it is not used. Depending on the linker
 // options, the linker may still remove this section if it is not declared in
 // the linker script and marked KEEP.
-#if __APPLE__
+#ifdef __APPLE__
 #define PW_KEEP_IN_SECTION(name) __attribute__((section("__DATA," name), used))
 #else
 #define PW_KEEP_IN_SECTION(name) __attribute__((section(name), used))
@@ -70,10 +70,13 @@
 
 // Indicate to the compiler that the annotated function won't return. Example:
 //
-//   void HandleAssertFailure(ErrorCode error_code) PW_NO_RETURN;
+//   PW_NO_RETURN void HandleAssertFailure(ErrorCode error_code);
 //
 #define PW_NO_RETURN __attribute__((noreturn))
 
+// Prevents the compiler from inlining a fuction.
+#define PW_NO_INLINE __attribute__((noinline))
+
 // Indicate to the compiler that the given section of code will not be reached.
 // Example:
 //
@@ -98,7 +101,7 @@
 //     }
 //     return hash;
 //   }
-#if __clang__
+#ifdef __clang__
 #define PW_NO_SANITIZE(check) __attribute__((no_sanitize(check)))
 #else
 #define PW_NO_SANITIZE(check)
diff --git a/pw_preprocessor/public/pw_preprocessor/concat.h b/pw_preprocessor/public/pw_preprocessor/concat.h
index 30f37ce..0d9bc11 100644
--- a/pw_preprocessor/public/pw_preprocessor/concat.h
+++ b/pw_preprocessor/public/pw_preprocessor/concat.h
@@ -14,14 +14,15 @@
 
 #pragma once
 
-#include "pw_preprocessor/macro_arg_count.h"
+#include "pw_preprocessor/arguments.h"
 
 // Expands macros and concatenates the results using preprocessor ##
 // concatentation. Supports up to 32 arguments.
-#define PW_CONCAT(...) _PW_CONCAT_IMPL1(PW_ARG_COUNT(__VA_ARGS__), __VA_ARGS__)
+#define PW_CONCAT(...) \
+  _PW_CONCAT_IMPL1(PW_MACRO_ARG_COUNT(__VA_ARGS__), __VA_ARGS__)
 
-// Expand the macro to allow PW_ARG_COUNT and any caller-provided macros to be
-// evaluated before concatenating the tokens.
+// Expand the macro to allow PW_MACRO_ARG_COUNT and any caller-provided macros
+// to be evaluated before concatenating the tokens.
 #define _PW_CONCAT_IMPL1(count, ...) _PW_CONCAT_IMPL2(count, __VA_ARGS__)
 #define _PW_CONCAT_IMPL2(count, ...) _PW_CONCAT_##count(__VA_ARGS__)
 
diff --git a/pw_preprocessor/public/pw_preprocessor/internal/arg_count_impl.h b/pw_preprocessor/public/pw_preprocessor/internal/arg_count_impl.h
new file mode 100644
index 0000000..7cd822a
--- /dev/null
+++ b/pw_preprocessor/public/pw_preprocessor/internal/arg_count_impl.h
@@ -0,0 +1,109 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+// Expands to the passed arguments.
+#define _PW_EXPAND(...) __VA_ARGS__
+
+// If-like macro for internal use.
+#define _PW_IF(boolean, true_expr, false_expr) \
+  _PW_PASTE2(_PW_IF_, boolean)(true_expr, false_expr)
+
+#define _PW_IF_0(true_expr, false_expr) false_expr
+#define _PW_IF_1(true_expr, false_expr) true_expr
+
+// Token pasting macro that doesn't rely on concat.h
+#define _PW_PASTE2(a1, a2) _PW_PASTE2_EXPANDED(a1, a2)
+#define _PW_PASTE2_EXPANDED(a1, a2) _PW_PASTE2_IMPL(a1, a2)
+#define _PW_PASTE2_IMPL(a1, a2) a1##a2
+
+/*
+for i in range(2, 33):
+  args = ', '.join(f'a{arg}' for arg in range(1, i))
+  print(f'#define _PW_LAST_ARG_{i}({args}, a{i}) a{i}')
+*/
+// clang-format off
+#define _PW_LAST_ARG_0()
+#define _PW_LAST_ARG_1(a1) a1
+#define _PW_LAST_ARG_2(a1, a2) a2
+#define _PW_LAST_ARG_3(a1, a2, a3) a3
+#define _PW_LAST_ARG_4(a1, a2, a3, a4) a4
+#define _PW_LAST_ARG_5(a1, a2, a3, a4, a5) a5
+#define _PW_LAST_ARG_6(a1, a2, a3, a4, a5, a6) a6
+#define _PW_LAST_ARG_7(a1, a2, a3, a4, a5, a6, a7) a7
+#define _PW_LAST_ARG_8(a1, a2, a3, a4, a5, a6, a7, a8) a8
+#define _PW_LAST_ARG_9(a1, a2, a3, a4, a5, a6, a7, a8, a9) a9
+#define _PW_LAST_ARG_10(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) a10
+#define _PW_LAST_ARG_11(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) a11
+#define _PW_LAST_ARG_12(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) a12
+#define _PW_LAST_ARG_13(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) a13
+#define _PW_LAST_ARG_14(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) a14
+#define _PW_LAST_ARG_15(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) a15
+#define _PW_LAST_ARG_16(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16) a16
+#define _PW_LAST_ARG_17(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17) a17
+#define _PW_LAST_ARG_18(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18) a18
+#define _PW_LAST_ARG_19(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19) a19
+#define _PW_LAST_ARG_20(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20) a20
+#define _PW_LAST_ARG_21(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21) a21
+#define _PW_LAST_ARG_22(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22) a22
+#define _PW_LAST_ARG_23(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23) a23
+#define _PW_LAST_ARG_24(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24) a24
+#define _PW_LAST_ARG_25(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25) a25
+#define _PW_LAST_ARG_26(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26) a26
+#define _PW_LAST_ARG_27(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27) a27
+#define _PW_LAST_ARG_28(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28) a28
+#define _PW_LAST_ARG_29(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29) a29
+#define _PW_LAST_ARG_30(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30) a30
+#define _PW_LAST_ARG_31(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31) a31
+#define _PW_LAST_ARG_32(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32) a32
+
+/*
+for i in range(2, 33):
+  args = ', '.join(f'a{arg}' for arg in range(1, i))
+  print(f'#define _PW_DROP_LAST_ARG_{i}({args}, a{i}) {args}')
+*/
+#define _PW_DROP_LAST_ARG_0()
+#define _PW_DROP_LAST_ARG_1(a1)
+#define _PW_DROP_LAST_ARG_2(a1, a2) a1
+#define _PW_DROP_LAST_ARG_3(a1, a2, a3) a1, a2
+#define _PW_DROP_LAST_ARG_4(a1, a2, a3, a4) a1, a2, a3
+#define _PW_DROP_LAST_ARG_5(a1, a2, a3, a4, a5) a1, a2, a3, a4
+#define _PW_DROP_LAST_ARG_6(a1, a2, a3, a4, a5, a6) a1, a2, a3, a4, a5
+#define _PW_DROP_LAST_ARG_7(a1, a2, a3, a4, a5, a6, a7) a1, a2, a3, a4, a5, a6
+#define _PW_DROP_LAST_ARG_8(a1, a2, a3, a4, a5, a6, a7, a8) a1, a2, a3, a4, a5, a6, a7
+#define _PW_DROP_LAST_ARG_9(a1, a2, a3, a4, a5, a6, a7, a8, a9) a1, a2, a3, a4, a5, a6, a7, a8
+#define _PW_DROP_LAST_ARG_10(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) a1, a2, a3, a4, a5, a6, a7, a8, a9
+#define _PW_DROP_LAST_ARG_11(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10
+#define _PW_DROP_LAST_ARG_12(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11
+#define _PW_DROP_LAST_ARG_13(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12
+#define _PW_DROP_LAST_ARG_14(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13
+#define _PW_DROP_LAST_ARG_15(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14
+#define _PW_DROP_LAST_ARG_16(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15
+#define _PW_DROP_LAST_ARG_17(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16
+#define _PW_DROP_LAST_ARG_18(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17
+#define _PW_DROP_LAST_ARG_19(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18
+#define _PW_DROP_LAST_ARG_20(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19
+#define _PW_DROP_LAST_ARG_21(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20
+#define _PW_DROP_LAST_ARG_22(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21
+#define _PW_DROP_LAST_ARG_23(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22
+#define _PW_DROP_LAST_ARG_24(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23
+#define _PW_DROP_LAST_ARG_25(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24
+#define _PW_DROP_LAST_ARG_26(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25
+#define _PW_DROP_LAST_ARG_27(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26
+#define _PW_DROP_LAST_ARG_28(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27
+#define _PW_DROP_LAST_ARG_29(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28
+#define _PW_DROP_LAST_ARG_30(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29
+#define _PW_DROP_LAST_ARG_31(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30
+#define _PW_DROP_LAST_ARG_32(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32) a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31
+// clang-format on
diff --git a/pw_preprocessor/public/pw_preprocessor/macro_arg_count.h b/pw_preprocessor/public/pw_preprocessor/macro_arg_count.h
deleted file mode 100644
index 163eb47..0000000
--- a/pw_preprocessor/public/pw_preprocessor/macro_arg_count.h
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2019 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-//
-// Macros for counting the number of arguments passed to a variadic
-// function-like macro.
-#pragma once
-
-#include "pw_preprocessor/boolean.h"
-
-// PW_ARG_COUNT counts the number of arguments it was called with. It evalulates
-// to an integer literal in the range 0 to 64. Counting more than 64 arguments
-// is not currently supported.
-//
-// PW_ARG_COUNT is most commonly used to count __VA_ARGS__ in a variadic macro.
-// For example, the following code counts the number of arguments passed to a
-// logging macro:
-//
-/*   #define LOG_INFO(format, ...) {                             \
-         static const int kArgCount = PW_ARG_COUNT(__VA_ARGS__); \
-         SendLog(kArgCount, format, ##__VA_ARGS__);              \
-       }
-*/
-// clang-format off
-#define PW_ARG_COUNT(...)                            \
-  _PW_ARG_COUNT_IMPL(__VA_ARGS__,                    \
-                     64, 63, 62, 61, 60, 59, 58, 57, \
-                     56, 55, 54, 53, 52, 51, 50, 49, \
-                     48, 47, 46, 45, 44, 43, 42, 41, \
-                     40, 39, 38, 37, 36, 35, 34, 33, \
-                     32, 31, 30, 29, 28, 27, 26, 25, \
-                     24, 23, 22, 21, 20, 19, 18, 17, \
-                     16, 15, 14, 13, 12, 11, 10,  9, \
-                      8,  7,  6,  5, 4,  3,  2,  PW_HAS_ARGS(__VA_ARGS__))
-
-// Expands to 1 if one or more arguments are provided, 0 otherwise.
-#define PW_HAS_ARGS(...) PW_NOT(PW_HAS_NO_ARGS(__VA_ARGS__))
-
-// Expands to 0 if one or more arguments are provided, 1 otherwise. This
-// approach is from Jens Gustedt's blog:
-//   https://gustedt.wordpress.com/2010/06/08/detect-empty-macro-arguments/
-//
-// Normally, with a standard-compliant C preprocessor, it's impossible to tell
-// whether a variadic macro was called with no arguments or with one argument.
-// A macro invoked with no arguments is actually passed one empty argument.
-//
-// This macro works by checking for the presence of a comma in four situations.
-// These situations give the following information about __VA_ARGS__:
-//
-//   1. It is two or more variadic arguments.
-//   2. It expands to one argument surrounded by parentheses.
-//   3. It is a function-like macro that produces a comma when invoked.
-//   4. It does not interfere with calling a macro when placed between it and
-//      parentheses.
-//
-// If a comma is not present in 1, 2, 3, but is present in 4, then __VA_ARGS__
-// is empty. For this case (0001), and only this case, a corresponding macro
-// that expands to a comma is defined. The presence of this comma determines
-// whether any arguments were passed in.
-//
-// C++20 introduces __VA_OPT__, which would greatly simplify this macro.
-#define PW_HAS_NO_ARGS(...)                                            \
-  _PW_HAS_NO_ARGS(_PW_HAS_COMMA(__VA_ARGS__),                          \
-                  _PW_HAS_COMMA(_PW_MAKE_COMMA_IF_CALLED __VA_ARGS__), \
-                  _PW_HAS_COMMA(__VA_ARGS__()),                        \
-                  _PW_HAS_COMMA(_PW_MAKE_COMMA_IF_CALLED __VA_ARGS__()))
-
-#define _PW_HAS_COMMA(...)                                           \
-  _PW_ARG_COUNT_IMPL(__VA_ARGS__,                                    \
-                     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
-                     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
-                     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
-                     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0)
-
-#define _PW_ARG_COUNT_IMPL(a64, a63, a62, a61, a60, a59, a58, a57, \
-                           a56, a55, a54, a53, a52, a51, a50, a49, \
-                           a48, a47, a46, a45, a44, a43, a42, a41, \
-                           a40, a39, a38, a37, a36, a35, a34, a33, \
-                           a32, a31, a30, a29, a28, a27, a26, a25, \
-                           a24, a23, a22, a21, a20, a19, a18, a17, \
-                           a16, a15, a14, a13, a12, a11, a10, a09, \
-                           a08, a07, a06, a05, a04, a03, a02, a01, \
-                           count, ...)                             \
-  count
-
-// clang-format on
-#define _PW_HAS_NO_ARGS(a1, a2, a3, a4) \
-  _PW_HAS_COMMA(_PW_PASTE_RESULTS(a1, a2, a3, a4))
-#define _PW_PASTE_RESULTS(a1, a2, a3, a4) _PW_HAS_COMMA_CASE_##a1##a2##a3##a4
-#define _PW_HAS_COMMA_CASE_0001 ,
-#define _PW_MAKE_COMMA_IF_CALLED(...) ,
-
-// Expands to a comma followed by __VA_ARGS__, if __VA_ARGS__ is non-empty.
-// Otherwise, expands to nothing. This is useful when passing __VA_ARGS__ to a
-// variadic function or template parameter list, since it removes the extra
-// comma when no arguments are provided. PW_COMMA_ARGS must NOT be used when
-// invoking a macro from another macro.
-//
-// This is a more flexible, standard-compliant version of ##__VA_ARGS__. Unlike
-// ##__VA_ARGS__, this can be used to eliminate an unwanted comma when
-// __VA_ARGS__ expands to an empty argument because an outer macro was called
-// with __VA_ARGS__ instead of ##__VA_ARGS__.
-//
-// PW_COMMA_ARGS must NOT be used to conditionally include a comma when invoking
-// a macro from another macro. PW_COMMA_ARGS only functions correctly when the
-// macro expands to C or C++ code! When invoking one macro from another, simply
-// pass __VA_ARGS__. Only the final macro that expands to C/C++ code should use
-// PW_COMMA_ARGS.
-//
-// For example, the following does NOT work:
-/*
-     #define MY_MACRO(fmt, ...) \
-         NESTED_MACRO(fmt PW_COMMA_ARGS(__VA_ARGS__))  // BAD! Do not do this!
-
-   Instead, only use PW_COMMA_ARGS when the macro expands to C/C++ code:
-
-     #define MY_MACRO(fmt, ...) \
-         NESTED_MACRO(fmt, __VA_ARGS__)  // Pass __VA_ARGS__ to nested macros
-
-     #define NESTED_MACRO(fmt, ...) \
-         printf(fmt PW_COMMA_ARGS(__VA_ARGS__))  // PW_COMMA_ARGS is OK here
-*/
-#define PW_COMMA_ARGS(...) _PW_COMMA_ARGS(PW_HAS_ARGS(__VA_ARGS__), __VA_ARGS__)
-
-#define _PW_COMMA_ARGS(has_args, ...) _PW_COMMA_ARGS_X(has_args, __VA_ARGS__)
-#define _PW_COMMA_ARGS_X(has_args, ...) _PW_COMMA_ARGS_##has_args(__VA_ARGS__)
-#define _PW_COMMA_ARGS_0(...)                // no args, no comma
-#define _PW_COMMA_ARGS_1(...) , __VA_ARGS__  // comma, followed by args
-
-// Allows calling a different function-like macro based on the number of
-// arguments.  For example:
-//
-//   #define ARG_PRINT(...)  PW_DELEGATE_BY_ARG_COUNT(_ARG_PRINT, __VA_ARGS__)
-//   #define _ARG_PRINT1(a)        LOG_INFO("1 arg: %s", a)
-//   #define _ARG_PRINT2(a, b)     LOG_INFO("2 args: %s, %s", a, b)
-//   #define _ARG_PRINT3(a, b, c)  LOG_INFO("3 args: %s, %s, %s", a, b, c)
-//
-// This can the be called in code:
-//    ARG_PRINT("a");            // Outputs: 1 arg: a
-//    ARG_PRINT("a", "b");       // Outputs: 2 arg: a, b
-//    ARG_PRINT("a", "b", "c");  // Outputs: 3 arg: a, b, c
-//
-#define PW_DELEGATE_BY_ARG_COUNT(func, ...) \
-  _PW_DELEGATE_BY_ARG_COUNT(func, PW_ARG_COUNT(__VA_ARGS__))(__VA_ARGS__)
-#define _PW_DELEGATE_BY_ARG_COUNT_EXPANDED(name, n) name##n
-#define _PW_DELEGATE_BY_ARG_COUNT(name, n) \
-  _PW_DELEGATE_BY_ARG_COUNT_EXPANDED(name, n)
diff --git a/pw_presubmit/BUILD.gn b/pw_presubmit/BUILD.gn
index abf0562..35fa9ff 100644
--- a/pw_presubmit/BUILD.gn
+++ b/pw_presubmit/BUILD.gn
@@ -12,13 +12,14 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_docgen/docs.gni")
+
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
   inputs = [
+    "docs/pw_presubmit_demo.gif",
     "py/pw_presubmit/cli.py",
     "py/pw_presubmit/presubmit.py",
   ]
diff --git a/pw_presubmit/docs.rst b/pw_presubmit/docs.rst
index 427f15c..921cb35 100644
--- a/pw_presubmit/docs.rst
+++ b/pw_presubmit/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-presubmit:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_presubmit:
 
 ============
 pw_presubmit
@@ -23,7 +19,7 @@
 configurations defined by your project. ``pw format`` leverages existing tools
 like ``clang-format``, and it’s simple to add support for new languages.
 
-.. image:: ../docs/images/pw_presubmit_demo.gif
+.. image:: docs/pw_presubmit_demo.gif
    :alt: ``pw format`` demo
    :align: left
 
@@ -53,7 +49,7 @@
 Python script that uses the ``pw_presubmit`` package.
 
 A project's presubmit script can be registered as a
-:ref:`pw_cli <chapter-pw-cli>` plugin, so that it can be run as ``pw
+:ref:`pw_cli <module-pw_cli>` plugin, so that it can be run as ``pw
 presubmit``.
 
 Setting up the command-line interface
diff --git a/docs/images/pw_presubmit_demo.gif b/pw_presubmit/docs/pw_presubmit_demo.gif
similarity index 100%
rename from docs/images/pw_presubmit_demo.gif
rename to pw_presubmit/docs/pw_presubmit_demo.gif
Binary files differ
diff --git a/pw_presubmit/py/BUILD.gn b/pw_presubmit/py/BUILD.gn
new file mode 100644
index 0000000..a9b96e7
--- /dev/null
+++ b/pw_presubmit/py/BUILD.gn
@@ -0,0 +1,42 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_presubmit/__init__.py",
+    "pw_presubmit/build.py",
+    "pw_presubmit/cli.py",
+    "pw_presubmit/environment.py",
+    "pw_presubmit/format_code.py",
+    "pw_presubmit/git_repo.py",
+    "pw_presubmit/install_hook.py",
+    "pw_presubmit/pigweed_presubmit.py",
+    "pw_presubmit/presubmit.py",
+    "pw_presubmit/python_checks.py",
+    "pw_presubmit/tools.py",
+  ]
+  tests = [
+    "presubmit_test.py",
+    "tools_test.py",
+  ]
+  python_deps = [
+    "$dir_pw_cli/py",
+    "$dir_pw_package/py",
+  ]
+}
diff --git a/pw_presubmit/py/pw_presubmit/build.py b/pw_presubmit/py/pw_presubmit/build.py
index 618f3a2..4a370e3 100644
--- a/pw_presubmit/py/pw_presubmit/build.py
+++ b/pw_presubmit/py/pw_presubmit/build.py
@@ -20,14 +20,46 @@
 import re
 from typing import Container, Dict, Iterable, List, Mapping, Set, Tuple
 
+from pw_package import package_manager
 from pw_presubmit import call, log_run, plural, PresubmitFailure, tools
 
 _LOG = logging.getLogger(__name__)
 
 
+def install_package(root: Path, name: str) -> None:
+    """Install package with given name in given path."""
+    mgr = package_manager.PackageManager(root)
+
+    if not mgr.list():
+        raise PresubmitFailure(
+            'no packages configured, please import your pw_package '
+            'configuration module')
+
+    if not mgr.status(name):
+        mgr.install(name)
+
+
 def gn_args(**kwargs) -> str:
-    """Builds a string to use for the --args argument to gn gen."""
-    return '--args=' + ' '.join(f'{arg}={val}' for arg, val in kwargs.items())
+    """Builds a string to use for the --args argument to gn gen.
+
+    Currently supports bool, int, and str values. In the case of str values,
+    quotation marks will be added automatically, unless the string already
+    contains one or more double quotation marks, or starts with a { or [
+    character, in which case it will be passed through as-is.
+    """
+    transformed_args = []
+    for arg, val in kwargs.items():
+        if isinstance(val, bool):
+            transformed_args.append(f'{arg}={str(val).lower()}')
+            continue
+        if (isinstance(val, str) and '"' not in val and not val.startswith("{")
+                and not val.startswith("[")):
+            transformed_args.append(f'{arg}="{val}"')
+            continue
+        # Fall-back case handles integers as well as strings that already
+        # contain double quotation marks, or look like scopes or lists.
+        transformed_args.append(f'{arg}={val}')
+    return '--args=' + ' '.join(transformed_args)
 
 
 def gn_gen(gn_source_dir: Path,
@@ -56,9 +88,18 @@
 
 def cmake(source_dir: Path,
           output_dir: Path,
+          *args: str,
           env: Mapping['str', 'str'] = None) -> None:
     """Runs CMake for Ninja on the given source and output directories."""
-    call('cmake', '-B', output_dir, '-S', source_dir, '-G', 'Ninja', env=env)
+    call('cmake',
+         '-B',
+         output_dir,
+         '-S',
+         source_dir,
+         '-G',
+         'Ninja',
+         *args,
+         env=env)
 
 
 def env_with_clang_vars() -> Mapping[str, str]:
@@ -106,7 +147,8 @@
 
 
 def check_builds_for_files(
-        extensions_to_check: Container[str],
+        bazel_extensions_to_check: Container[str],
+        gn_extensions_to_check: Container[str],
         files: Iterable[Path],
         bazel_dirs: Iterable[Path] = (),
         gn_dirs: Iterable[Tuple[Path, Path]] = (),
@@ -115,7 +157,8 @@
     """Checks that source files are in the GN and Bazel builds.
 
     Args:
-        extensions_to_check: which file suffixes to look for
+        bazel_extensions_to_check: which file suffixes to look for in Bazel
+        gn_extensions_to_check: which file suffixes to look for in GN
         files: the files that should be checked
         bazel_dirs: directories in which to run bazel query
         gn_dirs: (source_dir, output_dir) tuples with which to run gn desc
@@ -144,13 +187,18 @@
 
     missing: Dict[str, List[Path]] = collections.defaultdict(list)
 
-    for path in (p for p in files if p.suffix in extensions_to_check):
-        if bazel_dirs and path.suffix != '.rst' and path not in bazel_builds:
-            # TODO(pwbug/176) Replace this workaround for fuzzers.
-            if 'fuzz' not in str(path):
-                missing['Bazel'].append(path)
-        if (gn_dirs or gn_build_files) and path not in gn_builds:
-            missing['GN'].append(path)
+    if bazel_dirs:
+        for path in (p for p in files
+                     if p.suffix in bazel_extensions_to_check):
+            if path not in bazel_builds:
+                # TODO(pwbug/176) Replace this workaround for fuzzers.
+                if 'fuzz' not in str(path):
+                    missing['Bazel'].append(path)
+
+    if gn_dirs or gn_build_files:
+        for path in (p for p in files if p.suffix in gn_extensions_to_check):
+            if path not in gn_builds:
+                missing['GN'].append(path)
 
     for builder, paths in missing.items():
         _LOG.warning('%s missing from the %s build:\n%s',
diff --git a/pw_presubmit/py/pw_presubmit/cli.py b/pw_presubmit/py/pw_presubmit/cli.py
index 37d1a57..7b0fdd5 100644
--- a/pw_presubmit/py/pw_presubmit/cli.py
+++ b/pw_presubmit/py/pw_presubmit/cli.py
@@ -80,8 +80,8 @@
             if values not in all_steps:
                 raise parser.error(
                     f'argument --step: {values} is not the name of a '
-                    'presubmit check\n\n'
-                    f'Valid values for --step:\n{{{",".join(all_steps)}}}')
+                    'presubmit check\n\nValid values for --step:\n'
+                    f'{{{",".join(sorted(all_steps))}}}')
 
             namespace.program.append(all_steps[values])
 
@@ -108,6 +108,11 @@
         type=Path,
         help='Output directory (default: <repo root>/.presubmit)',
     )
+    parser.add_argument(
+        '--package-root',
+        type=Path,
+        help='Package root directory (default: <output directory>/packages)',
+    )
 
     exclusive = parser.add_mutually_exclusive_group()
     exclusive.add_argument(
@@ -126,7 +131,8 @@
 
 def run(
         program: Sequence[Callable],
-        output_directory: Path,
+        output_directory: Optional[Path],
+        package_root: Path,
         clear: bool,
         root: Path = None,
         repositories: Collection[Path] = (),
@@ -141,6 +147,7 @@
           defaults to the root of the current directory's repository
       program: from the --program option
       output_directory: from --output-directory option
+      package_root: from --package-root option
       clear: from the --clear option
       **other_args: remaining arguments defined by by add_arguments
 
@@ -153,9 +160,12 @@
     if not repositories:
         repositories = [root]
 
-    if not output_directory:
+    if output_directory is None:
         output_directory = root / '.presubmit'
 
+    if not package_root:
+        package_root = output_directory / 'packages'
+
     _LOG.debug('Using environment at %s', output_directory)
 
     if clear:
@@ -171,6 +181,7 @@
                      root,
                      repositories,
                      output_directory=output_directory,
+                     package_root=package_root,
                      **other_args):
         return 0
 
diff --git a/pw_presubmit/py/pw_presubmit/environment.py b/pw_presubmit/py/pw_presubmit/environment.py
index 372b299..d0d7f37 100644
--- a/pw_presubmit/py/pw_presubmit/environment.py
+++ b/pw_presubmit/py/pw_presubmit/environment.py
@@ -63,26 +63,30 @@
 def init_virtualenv(
         pigweed_root: Path,
         output_directory: Path,
-        setup_py_roots: Iterable[Union[Path, str]] = (),
+        requirements: Iterable[Union[Path, str]] = (),
+        gn_targets: Iterable[str] = (),
 ) -> None:
     """Sets up a virtualenv, assumes recent Python 3 is already installed."""
     virtualenv_source = pigweed_root.joinpath('pw_env_setup', 'py',
                                               'pw_env_setup',
                                               'virtualenv_setup')
 
-    # TODO(pwbug/138): find way to support dependent project requirements.
-
-    # For speed, don't build the venv if it exists. Use --clean to rebuild.
-    if not output_directory.joinpath('pyvenv.cfg').is_file():
-        call(
-            'python3',
-            virtualenv_source,
-            f'--venv_path={output_directory}',
-            f'--requirements={virtualenv_source / "requirements.txt"}',
-            *(f'--setup-py-root={p}' for p in [pigweed_root, *setup_py_roots]),
-        )
-
+    # Need to set VIRTUAL_ENV before invoking GN because the GN targets install
+    # directly to the current virtual env.
+    os.environ['VIRTUAL_ENV'] = str(output_directory)
     os.environ['PATH'] = os.pathsep.join((
         str(output_directory.joinpath('bin')),
         os.environ['PATH'],
     ))
+
+    if not gn_targets:
+        gn_targets = (f'{os.environ["PW_ROOT"]}#:python.install', )
+
+    call(
+        'python3',
+        virtualenv_source,
+        f'--venv_path={output_directory}',
+        f'--requirements={virtualenv_source / "requirements.txt"}',
+        *(f'--requirements={x}' for x in requirements),
+        *(f'--gn-target={t}' for t in gn_targets),
+    )
diff --git a/pw_presubmit/py/pw_presubmit/git_repo.py b/pw_presubmit/py/pw_presubmit/git_repo.py
index 8b1c8ee..0daaaea 100644
--- a/pw_presubmit/py/pw_presubmit/git_repo.py
+++ b/pw_presubmit/py/pw_presubmit/git_repo.py
@@ -13,14 +13,15 @@
 # the License.
 """Helpful commands for working with a Git repository."""
 
-import collections
+import logging
 from pathlib import Path
 import subprocess
-from typing import Collection, Dict, Iterable, List, Optional
-from typing import Pattern, Union
+from typing import Collection, Iterable, Iterator, List, NamedTuple, Optional
+from typing import Pattern, Set, Tuple, Union
 
 from pw_presubmit.tools import log_run, plural
 
+_LOG = logging.getLogger(__name__)
 PathOrStr = Union[Path, str]
 
 
@@ -143,7 +144,7 @@
     return Path(
         git_stdout('rev-parse',
                    '--show-toplevel',
-                   repo=repo_path,
+                   repo=repo_path if repo_path.is_dir() else repo_path.parent,
                    show_stderr=show_stderr))
 
 
@@ -167,25 +168,73 @@
     return root(repo).joinpath(repo_path, *additional_repo_paths)
 
 
-def find_python_packages(python_paths: Iterable[PathOrStr],
-                         repo: PathOrStr = '.') -> Dict[Path, List[Path]]:
-    """Returns Python package directories for the files in python_paths."""
-    setup_pys = [
-        file.parent.as_posix()
+class PythonPackage(NamedTuple):
+    root: Path  # Path to the file containing the setup.py
+    package: Path  # Path to the main package directory
+    packaged_files: Tuple[Path, ...]  # All sources in the main package dir
+    other_files: Tuple[Path, ...]  # Other Python files under root
+
+    def all_files(self) -> Tuple[Path, ...]:
+        return self.packaged_files + self.other_files
+
+
+def all_python_packages(repo: PathOrStr = '.') -> Iterator[PythonPackage]:
+    """Finds all Python packages in the repo based on setup.py locations."""
+    root_py_dirs = [
+        file.parent
         for file in _ls_files(['setup.py', '*/setup.py'], Path(repo))
     ]
 
-    package_dirs: Dict[Path, List[Path]] = collections.defaultdict(list)
+    for py_dir in root_py_dirs:
+        all_packaged_files = _ls_files([py_dir / '*' / '*.py'], repo=py_dir)
+        common_dir: Optional[str] = None
 
-    for python_path in (Path(p).resolve().as_posix() for p in python_paths):
-        try:
-            setup_dir = max(setup for setup in setup_pys
-                            if python_path.startswith(setup))
-            package_dirs[Path(setup_dir).resolve()].append(Path(python_path))
-        except ValueError:
-            continue
+        # Make there is only one package directory with Python files in it.
+        for file in all_packaged_files:
+            package_dir = file.relative_to(py_dir).parts[0]
 
-    return package_dirs
+            if common_dir is None:
+                common_dir = package_dir
+            elif common_dir != package_dir:
+                _LOG.warning(
+                    'There are multiple Python package directories in %s: %s '
+                    'and %s. This is not supported by pw presubmit. Each '
+                    'setup.py should correspond with a single Python package',
+                    py_dir, common_dir, package_dir)
+                break
+
+        if common_dir is not None:
+            packaged_files = tuple(_ls_files(['*/*.py'], repo=py_dir))
+            other_files = tuple(
+                f for f in _ls_files(['*.py'], repo=py_dir)
+                if f.name != 'setup.py' and f not in packaged_files)
+
+            yield PythonPackage(py_dir, py_dir / common_dir, packaged_files,
+                                other_files)
+
+
+def python_packages_containing(
+        python_paths: Iterable[Path],
+        repo: PathOrStr = '.') -> Tuple[List[PythonPackage], List[Path]]:
+    """Finds all Python packages containing the provided Python paths.
+
+    Returns:
+      ([packages], [files_not_in_packages])
+    """
+    all_packages = list(all_python_packages(repo))
+
+    packages: Set[PythonPackage] = set()
+    files_not_in_packages: List[Path] = []
+
+    for python_path in python_paths:
+        for package in all_packages:
+            if package.root in python_path.parents:
+                packages.add(package)
+                break
+        else:
+            files_not_in_packages.append(python_path)
+
+    return list(packages), files_not_in_packages
 
 
 def commit_message(commit: str = 'HEAD', repo: PathOrStr = '.') -> str:
diff --git a/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py b/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
index 6f604fe..794967d 100755
--- a/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
+++ b/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
@@ -33,7 +33,6 @@
     import pw_presubmit
 
 from pw_presubmit import build, cli, environment, format_code, git_repo
-from pw_presubmit import python_checks
 from pw_presubmit import call, filter_paths, plural, PresubmitContext
 from pw_presubmit import PresubmitFailure, Programs
 from pw_presubmit.install_hook import install_hook
@@ -49,7 +48,24 @@
 
 
 def init_virtualenv(ctx: PresubmitContext):
-    environment.init_cipd(ctx.root, ctx.output_dir)
+    environment.init_virtualenv(
+        ctx.root,
+        ctx.output_dir,
+        gn_targets=(
+            f'{ctx.root}#:python.install',
+            f'{ctx.root}#:target_support_packages.install',
+        ),
+    )
+
+
+# Trigger builds if files with these extensions change.
+_BUILD_EXTENSIONS = ('.py', '.rst', '.gn', '.gni',
+                     *format_code.C_FORMAT.extensions)
+
+
+def _at_all_optimization_levels(target):
+    for level in ['debug', 'size_optimized', 'speed_optimized']:
+        yield f'{target}_{level}'
 
 
 #
@@ -57,25 +73,54 @@
 #
 def gn_clang_build(ctx: PresubmitContext):
     build.gn_gen(ctx.root, ctx.output_dir)
-    build.ninja(ctx.output_dir, "host_clang")
+    build.ninja(ctx.output_dir, *_at_all_optimization_levels('host_clang'))
 
 
-@filter_paths(endswith=format_code.C_FORMAT.extensions)
+@filter_paths(endswith=_BUILD_EXTENSIONS)
+def gn_quick_build_check(ctx: PresubmitContext):
+    build.gn_gen(ctx.root, ctx.output_dir)
+    build.ninja(ctx.output_dir, 'host_clang_size_optimized',
+                'stm32f429i_size_optimized', 'python.tests', 'python.lint')
+
+
+@filter_paths(endswith=_BUILD_EXTENSIONS)
 def gn_gcc_build(ctx: PresubmitContext):
     build.gn_gen(ctx.root, ctx.output_dir)
-    build.ninja(ctx.output_dir, "host_gcc")
+
+    # Skip optimized host GCC builds for now, since GCC sometimes emits spurious
+    # warnings.
+    #
+    #   -02: GCC 9.3 emits spurious maybe-uninitialized warnings
+    #   -0s: GCC 8.1 (Mingw-w64) emits a spurious nonnull warning
+    #
+    # TODO(pwbug/255): Enable optimized GCC builds when this is fixed.
+    build.ninja(ctx.output_dir, 'host_gcc_debug')
 
 
-@filter_paths(endswith=format_code.C_FORMAT.extensions)
+@filter_paths(endswith=_BUILD_EXTENSIONS)
 def gn_arm_build(ctx: PresubmitContext):
     build.gn_gen(ctx.root, ctx.output_dir)
-    build.ninja(ctx.output_dir, "stm32f429i")
+    build.ninja(ctx.output_dir, *_at_all_optimization_levels('stm32f429i'))
 
 
-@filter_paths(endswith=format_code.C_FORMAT.extensions)
+@filter_paths(endswith=_BUILD_EXTENSIONS)
+def gn_nanopb_build(ctx: PresubmitContext):
+    build.install_package(ctx.package_root, 'nanopb')
+    build.gn_gen(ctx.root,
+                 ctx.output_dir,
+                 dir_pw_third_party_nanopb='"{}"'.format(ctx.package_root /
+                                                         'nanopb'))
+    build.ninja(
+        ctx.output_dir,
+        *_at_all_optimization_levels('stm32f429i'),
+        *_at_all_optimization_levels('host_clang'),
+    )
+
+
+@filter_paths(endswith=_BUILD_EXTENSIONS)
 def gn_qemu_build(ctx: PresubmitContext):
     build.gn_gen(ctx.root, ctx.output_dir)
-    build.ninja(ctx.output_dir, "qemu")
+    build.ninja(ctx.output_dir, *_at_all_optimization_levels('qemu'))
 
 
 def gn_docs_build(ctx: PresubmitContext):
@@ -84,7 +129,7 @@
 
 
 def gn_host_tools(ctx: PresubmitContext):
-    build.gn_gen(ctx.root, ctx.output_dir, pw_build_HOST_TOOLS='true')
+    build.gn_gen(ctx.root, ctx.output_dir, pw_build_HOST_TOOLS=True)
     build.ninja(ctx.output_dir)
 
 
@@ -92,15 +137,32 @@
 def oss_fuzz_build(ctx: PresubmitContext):
     build.gn_gen(ctx.root,
                  ctx.output_dir,
-                 pw_toolchain_OSS_FUZZ_ENABLED='true',
-                 pw_toolchain_SANITIZER='"address"')
+                 pw_toolchain_OSS_FUZZ_ENABLED=True,
+                 pw_toolchain_SANITIZER="address")
     build.ninja(ctx.output_dir, "host_clang")
 
 
+@filter_paths(endswith='.py')
+def python_checks(ctx: PresubmitContext):
+    build.gn_gen(ctx.root, ctx.output_dir)
+    build.ninja(
+        ctx.output_dir,
+        ':python.lint',
+        ':python.tests',
+        ':target_support_packages.lint',
+        ':target_support_packages.tests',
+    )
+
+
 @filter_paths(endswith=(*format_code.C_FORMAT.extensions, '.cmake',
                         'CMakeLists.txt'))
 def cmake_tests(ctx: PresubmitContext):
-    build.cmake(ctx.root, ctx.output_dir, env=build.env_with_clang_vars())
+    toolchain = ctx.root / 'pw_toolchain' / 'host_clang' / 'toolchain.cmake'
+
+    build.cmake(ctx.root,
+                ctx.output_dir,
+                f'-DCMAKE_TOOLCHAIN_FILE={toolchain}',
+                env=build.env_with_clang_vars())
     build.ninja(ctx.output_dir, 'pw_run_tests.modules')
 
 
@@ -188,26 +250,31 @@
 """.splitlines())
 
 _EXCLUDE_FROM_COPYRIGHT_NOTICE: Sequence[str] = (
+    # Configuration
     r'^(?:.+/)?\..+$',
+    r'\bPW_PLUGINS$',
+    # Metadata
     r'^docker/tag$',
     r'\bAUTHORS$',
     r'\bLICENSE$',
     r'\bOWNERS$',
-    r'\bPW_PLUGINS$',
-    r'\.elf$',
-    r'\.gif$',
-    r'\.jpg$',
-    r'\.json$',
-    r'\.md$',
-    r'\.png$',
-    r'\.rst$',
     r'\brequirements.txt$',
     r'\bgo.(mod|sum)$',
     r'\bpackage.json$',
     r'\byarn.lock$',
-    r'\bpw_web_ui/types/serial.d.ts$',
+    # Data files
+    r'\.elf$',
+    r'\.gif$',
+    r'\.jpg$',
+    r'\.json$',
+    r'\.png$',
+    # Documentation
+    r'\.md$',
+    r'\.rst$',
+    # Generated protobuf files
     r'\.pb\.h$',
     r'\.pb\.c$',
+    r'\_pb2.pyi?$',
 )
 
 
@@ -259,18 +326,21 @@
     errors = []
 
     for path in ctx.paths:
-        _LOG.debug('Checking %s', path)
-        with open(path) as file:
+
+        if path.stat().st_size == 0:
+            continue  # Skip empty files
+
+        with path.open() as file:
             (comment, end_block_comment,
              line) = copyright_read_first_line(file)
 
             if not line:
-                _LOG.debug('%s: invalid first line', path)
+                _LOG.warning('%s: invalid first line', path)
                 errors.append(path)
                 continue
 
             if not (comment or end_block_comment):
-                _LOG.debug('%s: invalid first line %r', path, line)
+                _LOG.warning('%s: invalid first line %r', path, line)
                 errors.append(path)
                 continue
 
@@ -286,8 +356,8 @@
                     expected_line = (comment + ' ' + expected).rstrip() + '\n'
 
                 if expected_line != actual:
-                    _LOG.debug('  bad line: %r', actual)
-                    _LOG.debug('  expected: %r', expected_line)
+                    _LOG.warning('  bad line: %r', actual)
+                    _LOG.warning('  expected: %r', expected_line)
                     errors.append(path)
                     break
 
@@ -297,14 +367,16 @@
         raise PresubmitFailure
 
 
-_SOURCES_IN_BUILD = '.rst', *format_code.C_FORMAT.extensions
+_BAZEL_SOURCES_IN_BUILD = tuple(format_code.C_FORMAT.extensions)
+_GN_SOURCES_IN_BUILD = '.rst', '.py', *_BAZEL_SOURCES_IN_BUILD
 
 
-@filter_paths(endswith=(*_SOURCES_IN_BUILD, 'BUILD', '.bzl', '.gn', '.gni'))
+@filter_paths(endswith=(*_GN_SOURCES_IN_BUILD, 'BUILD', '.bzl', '.gn', '.gni'))
 def source_is_in_build_files(ctx: PresubmitContext):
     """Checks that source files are in the GN and Bazel builds."""
     missing = build.check_builds_for_files(
-        _SOURCES_IN_BUILD,
+        _BAZEL_SOURCES_IN_BUILD,
+        _GN_SOURCES_IN_BUILD,
         ctx.paths,
         bazel_dirs=[ctx.root],
         gn_build_files=git_repo.list_files(
@@ -337,15 +409,20 @@
     """Checks that the top commit's message is correctly formatted."""
     lines = git_repo.commit_message().splitlines()
 
+    # Show limits and current commit message in log.
+    _LOG.debug('%-25s%+25s%+22s', 'Line limits', '72|', '72|')
+    for line in lines:
+        _LOG.debug(line)
+
     if not lines:
         _LOG.error('The commit message is too short!')
         raise PresubmitFailure
 
     errors = 0
 
-    if len(lines[0]) > 50:
+    if len(lines[0]) > 72:
         _LOG.warning("The commit message's first line must be no longer than "
-                     '50 characters.')
+                     '72 characters.')
         _LOG.warning('The first line is %d characters:\n  %s', len(lines[0]),
                      lines[0])
         errors += 1
@@ -388,49 +465,47 @@
 BROKEN = (
     # TODO(pwbug/45): Remove clang-tidy from BROKEN when it passes.
     clang_tidy,
-    # Host tools are not broken but take long on slow internet connections.
-    # They're still run in CQ, but not in 'pw presubmit'.
-    gn_host_tools,
-    # QEMU build. Currently doesn't have test runners, and can't build one
-    # of the fuzzing targets.
+    # QEMU build. Currently doesn't have test runners.
     gn_qemu_build,
     # Build that attempts to duplicate the build OSS-Fuzz does. Currently
     # failing.
     oss_fuzz_build,
+    bazel_test,
+    cmake_tests,
+    gn_nanopb_build,
 )
 
 QUICK = (
     commit_message_format,
     init_cipd,
     init_virtualenv,
+    source_is_in_build_files,
+    copyright_notice,
+    format_code.presubmit_checks(),
+    pw_presubmit.pragma_once,
+    gn_quick_build_check,
+    # TODO(pwbug/141): Re-enable CMake and Bazel for Mac after we have fixed the
+    # the clang issues. The problem is that all clang++ invocations need the
+    # two extra flags: "-nostdc++" and "${clang_prefix}/../lib/libc++.a".
+    cmake_tests if sys.platform != 'darwin' else (),
+)
+
+FULL = (
+    commit_message_format,
+    init_cipd,
+    init_virtualenv,
     copyright_notice,
     format_code.presubmit_checks(),
     pw_presubmit.pragma_once,
     gn_clang_build,
     gn_arm_build,
-    source_is_in_build_files,
-    python_checks.all_checks(),
-)
-
-FULL = (
-    init_cipd,
-    init_virtualenv,
-    copyright_notice,
-    format_code.presubmit_checks(),
-    pw_presubmit.pragma_once,
-    gn_clang_build,
-    gn_arm_build,
     gn_docs_build,
+    gn_host_tools,
     # On Mac OS, system 'gcc' is a symlink to 'clang' by default, so skip GCC
     # host builds on Mac for now.
     gn_gcc_build if sys.platform != 'darwin' else (),
-    # TODO(pwbug/141): Re-enable CMake and Bazel for Mac after we have fixed the
-    # the clang issues. The problem is that all clang++ invocations need the
-    # two extra flags: "-nostdc++" and "${clang_prefix}../lib/libc++.a".
-    cmake_tests if sys.platform != 'darwin' else (),
-    bazel_test if sys.platform != 'darwin' else (),
     source_is_in_build_files,
-    python_checks.all_checks(),
+    python_checks,
     build_env_setup,
 )
 
diff --git a/pw_presubmit/py/pw_presubmit/presubmit.py b/pw_presubmit/py/pw_presubmit/presubmit.py
index 3f7b532..a572a9a 100644
--- a/pw_presubmit/py/pw_presubmit/presubmit.py
+++ b/pw_presubmit/py/pw_presubmit/presubmit.py
@@ -49,8 +49,8 @@
 import re
 import subprocess
 import time
-from typing import Callable, Collection, Dict, Iterable, Iterator, List
-from typing import NamedTuple, Optional, Pattern, Sequence, Set, Tuple
+from typing import (Callable, Collection, Dict, Iterable, Iterator, List,
+                    NamedTuple, Optional, Pattern, Sequence, Set, Tuple, Union)
 
 from pw_presubmit import git_repo, tools
 from pw_presubmit.tools import plural
@@ -177,11 +177,20 @@
     repos: Tuple[Path, ...]
     output_dir: Path
     paths: Tuple[Path, ...]
+    package_root: Path
 
-    def relative_paths(self, start: Optional[Path] = None):
+    def relative_paths(self, start: Optional[Path] = None) -> Tuple[Path, ...]:
         return tuple(
             tools.relative_paths(self.paths, start if start else self.root))
 
+    def paths_by_repo(self) -> Dict[Path, List[Path]]:
+        repos = collections.defaultdict(list)
+
+        for path in self.paths:
+            repos[git_repo.root(path)].append(path)
+
+        return repos
+
 
 class _Filter(NamedTuple):
     endswith: Tuple[str, ...] = ('', )
@@ -195,13 +204,15 @@
 class Presubmit:
     """Runs a series of presubmit checks on a list of files."""
     def __init__(self, root: Path, repos: Sequence[Path],
-                 output_directory: Path, paths: Sequence[Path]):
+                 output_directory: Path, paths: Sequence[Path],
+                 package_root: Path):
         self._root = root.resolve()
         self._repos = tuple(repos)
         self._output_directory = output_directory.resolve()
         self._paths = tuple(paths)
         self._relative_paths = tuple(
             tools.relative_paths(self._paths, self._root))
+        self._package_root = package_root.resolve()
 
     def run(self, program: Program, keep_going: bool = False) -> bool:
         """Executes a series of presubmit checks on the paths."""
@@ -309,6 +320,7 @@
                 repos=self._repos,
                 output_dir=output_directory,
                 paths=paths,
+                package_root=self._package_root,
             )
 
         finally:
@@ -373,6 +385,7 @@
         paths: Sequence[str] = (),
         exclude: Sequence[Pattern] = (),
         output_directory: Optional[Path] = None,
+        package_root: Path = None,
         keep_going: bool = False) -> bool:
     """Lists files in the current Git repo and runs a Presubmit with them.
 
@@ -395,6 +408,7 @@
         paths: optional list of Git pathspecs to run the checks against
         exclude: regular expressions for Posix-style paths to exclude
         output_directory: where to place output files
+        package_root: where to place package files
         keep_going: whether to continue running checks if an error occurs
 
     Returns:
@@ -422,7 +436,16 @@
     if output_directory is None:
         output_directory = root / '.presubmit'
 
-    presubmit = Presubmit(root, repos, output_directory, files)
+    if package_root is None:
+        package_root = output_directory / 'packages'
+
+    presubmit = Presubmit(
+        root=root,
+        repos=repos,
+        output_directory=output_directory,
+        paths=files,
+        package_root=package_root,
+    )
 
     if not isinstance(program, Program):
         program = Program('', program)
@@ -529,7 +552,7 @@
 
 
 def filter_paths(endswith: Iterable[str] = (''),
-                 exclude: Iterable[str] = (),
+                 exclude: Iterable[Union[Pattern[str], str]] = (),
                  always_run: bool = False) -> Callable[[Callable], _Check]:
     """Decorator for filtering the paths list for a presubmit check function.
 
diff --git a/pw_presubmit/py/pw_presubmit/py.typed b/pw_presubmit/py/pw_presubmit/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_presubmit/py/pw_presubmit/py.typed
diff --git a/pw_presubmit/py/pw_presubmit/python_checks.py b/pw_presubmit/py/pw_presubmit/python_checks.py
index 22083f2..1f1e0cd 100644
--- a/pw_presubmit/py/pw_presubmit/python_checks.py
+++ b/pw_presubmit/py/pw_presubmit/python_checks.py
@@ -32,7 +32,8 @@
     import pw_presubmit
 
 from pw_presubmit import call, filter_paths
-from pw_presubmit.git_repo import find_python_packages, list_files
+from pw_presubmit.git_repo import python_packages_containing, list_files
+from pw_presubmit.git_repo import PythonPackage
 
 _LOG = logging.getLogger(__name__)
 
@@ -41,27 +42,29 @@
     return call('python', '-m', *args, **kwargs)
 
 
+TEST_PATTERNS = ('*_test.py', )
+
+
 @filter_paths(endswith='.py')
 def test_python_packages(ctx: pw_presubmit.PresubmitContext,
-                         patterns: Iterable[str] = '*_test.py') -> None:
+                         patterns: Iterable[str] = TEST_PATTERNS) -> None:
     """Finds and runs test files in Python package directories.
 
     Finds the Python packages containing the affected paths, then searches
     within that package for test files. All files matching the provided patterns
     are executed with Python.
     """
-    test_globs = [patterns] if isinstance(patterns, str) else list(patterns)
-
-    packages: List[Path] = []
+    packages: List[PythonPackage] = []
     for repo in ctx.repos:
-        packages += find_python_packages(ctx.paths, repo=repo)
+        packages += python_packages_containing(ctx.paths, repo=repo)[0]
 
     if not packages:
         _LOG.info('No Python packages were found.')
         return
 
     for package in packages:
-        for test in list_files(pathspecs=test_globs, repo_path=package):
+        for test in list_files(pathspecs=tuple(patterns),
+                               repo_path=package.root):
             call('python', test)
 
 
@@ -87,17 +90,27 @@
 
 @filter_paths(endswith='.py')
 def mypy(ctx: pw_presubmit.PresubmitContext) -> None:
+    """Runs mypy on all paths and their packages."""
+    packages: List[PythonPackage] = []
+    other_files: List[Path] = []
+
+    for repo, paths in ctx.paths_by_repo().items():
+        new_packages, files = python_packages_containing(paths, repo=repo)
+        packages += new_packages
+        other_files += files
+
+        for package in new_packages:
+            other_files += package.other_files
+
     # Under some circumstances, mypy cannot check multiple Python files with the
     # same module name. Group filenames so that no duplicates occur in the same
     # mypy invocation. Also, omit setup.py from mypy checks.
     filename_sets: List[Set[str]] = [set()]
-    path_sets: List[List[Path]] = [[]]
+    path_sets: List[List[Path]] = [list(p.package for p in packages)]
 
-    duplicates_ok = '__init__.py', '__main__.py'
-
-    for path in (p for p in ctx.paths if p.name != 'setup.py'):
+    for path in (p for p in other_files if p.name != 'setup.py'):
         for filenames, paths in zip(filename_sets, path_sets):
-            if path.name in duplicates_ok or path.name not in filenames:
+            if path.name not in filenames:
                 paths.append(path)
                 filenames.add(path.name)
                 break
@@ -118,7 +131,8 @@
             '--color-output',
             '--show-error-codes',
             # TODO(pwbug/146): Some imports from installed packages fail. These
-            # imports should be fixed and this option removed.
+            # imports should be fixed and this option removed. See
+            # https://mypy.readthedocs.io/en/stable/installed_packages.html
             '--ignore-missing-imports',
             env=env)
 
diff --git a/pw_presubmit/py/pw_presubmit/tools.py b/pw_presubmit/py/pw_presubmit/tools.py
index 6cb804f..bff329e 100644
--- a/pw_presubmit/py/pw_presubmit/tools.py
+++ b/pw_presubmit/py/pw_presubmit/tools.py
@@ -13,7 +13,7 @@
 # the License.
 """General purpose tools for running presubmit checks."""
 
-import collections
+import collections.abc
 from collections import Counter, defaultdict
 import logging
 import os
@@ -169,7 +169,7 @@
 
     for item in items:
         if isinstance(item, collections.abc.Iterable) and not isinstance(
-                item, (str, bytes)):
+                item, (str, bytes, bytearray)):
             yield from flatten(*item)
         else:
             yield item
diff --git a/pw_presubmit/py/setup.py b/pw_presubmit/py/setup.py
index 791691a..a738a74 100644
--- a/pw_presubmit/py/setup.py
+++ b/pw_presubmit/py/setup.py
@@ -13,7 +13,7 @@
 # the License.
 """The pw_presubmit package."""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='pw_presubmit',
@@ -22,9 +22,12 @@
     author_email='pigweed-developers@googlegroups.com',
     description='Presubmit tools and a presubmit script for Pigweed',
     install_requires=[
-        'mypy==0.770',
-        'pylint==2.5.2',
+        'mypy==0.790',
+        'pylint==2.6.0',
         'yapf==0.30.0',
+        'pw_package',
     ],
     packages=setuptools.find_packages(),
+    package_data={'pw_presubmit': ['py.typed']},
+    zip_safe=False,
 )
diff --git a/pw_protobuf/BUILD b/pw_protobuf/BUILD
index 9935c01..54b4d3a 100644
--- a/pw_protobuf/BUILD
+++ b/pw_protobuf/BUILD
@@ -34,6 +34,7 @@
         "public/pw_protobuf/decoder.h",
         "public/pw_protobuf/encoder.h",
         "public/pw_protobuf/find.h",
+        "public/pw_protobuf/serialized_size.h",
         "public/pw_protobuf/wire_format.h",
     ],
     includes = ["public"],
diff --git a/pw_protobuf/BUILD.gn b/pw_protobuf/BUILD.gn
index 99d2270..00eae35 100644
--- a/pw_protobuf/BUILD.gn
+++ b/pw_protobuf/BUILD.gn
@@ -12,7 +12,6 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/input_group.gni")
@@ -21,6 +20,7 @@
 import("$dir_pw_fuzzer/fuzzer.gni")
 import("$dir_pw_protobuf_compiler/proto.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -28,14 +28,17 @@
 pw_source_set("pw_protobuf") {
   public_configs = [ ":default_config" ]
   public_deps = [
-    "$dir_pw_status",
-    "$dir_pw_varint",
+    dir_pw_bytes,
+    dir_pw_result,
+    dir_pw_status,
+    dir_pw_varint,
   ]
   public = [
     "public/pw_protobuf/codegen.h",
     "public/pw_protobuf/decoder.h",
     "public/pw_protobuf/encoder.h",
     "public/pw_protobuf/find.h",
+    "public/pw_protobuf/serialized_size.h",
     "public/pw_protobuf/wire_format.h",
   ]
   sources = [
@@ -43,7 +46,6 @@
     "encoder.cc",
     "find.cc",
   ]
-  sources += public
 }
 
 pw_doc_group("docs") {
@@ -57,20 +59,6 @@
   ]
 }
 
-# Entrypoint for pw_protobuf's protoc plugin.
-pw_input_group("codegen_protoc_plugin") {
-  inputs = [ "py/pw_protobuf/plugin.py" ]
-  deps = [ ":codegen_protoc_lib" ]
-}
-
-# Source files for pw_protobuf's protoc plugin.
-pw_input_group("codegen_protoc_lib") {
-  inputs = [
-    "py/pw_protobuf/codegen_pwpb.py",
-    "py/pw_protobuf/proto_tree.py",
-  ]
-}
-
 pw_test_group("tests") {
   tests = [
     ":codegen_test",
@@ -97,7 +85,7 @@
 }
 
 pw_test("codegen_test") {
-  deps = [ ":codegen_test_protos_pwpb" ]
+  deps = [ ":codegen_test_protos.pwpb" ]
   sources = [ "codegen_test.cc" ]
 }
 
diff --git a/pw_protobuf/CMakeLists.txt b/pw_protobuf/CMakeLists.txt
new file mode 100644
index 0000000..66fc333
--- /dev/null
+++ b/pw_protobuf/CMakeLists.txt
@@ -0,0 +1,36 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+include($ENV{PW_ROOT}/pw_protobuf_compiler/proto.cmake)
+
+pw_auto_add_simple_module(pw_protobuf
+  PUBLIC_DEPS
+    pw_bytes
+    pw_result
+    pw_status
+    pw_varint
+  TEST_DEPS
+    pw_protobuf.codegen_test_protos.pwpb
+)
+
+pw_proto_library(pw_protobuf.codegen_test_protos
+  SOURCES
+    pw_protobuf_protos/test_protos/full_test.proto
+    pw_protobuf_protos/test_protos/imported.proto
+    pw_protobuf_protos/test_protos/importer.proto
+    pw_protobuf_protos/test_protos/non_pw_package.proto
+    pw_protobuf_protos/test_protos/proto2.proto
+    pw_protobuf_protos/test_protos/repeated.proto
+)
diff --git a/pw_protobuf/codegen_test.cc b/pw_protobuf/codegen_test.cc
index c9845c9..72e11a7 100644
--- a/pw_protobuf/codegen_test.cc
+++ b/pw_protobuf/codegen_test.cc
@@ -166,10 +166,11 @@
   };
   // clang-format on
 
-  std::span<const std::byte> proto;
-  EXPECT_EQ(encoder.Encode(&proto), Status::OK);
-  EXPECT_EQ(proto.size(), sizeof(expected_proto));
-  EXPECT_EQ(std::memcmp(proto.data(), expected_proto, sizeof(expected_proto)),
+  Result result = encoder.Encode();
+  ASSERT_EQ(result.status(), Status::Ok());
+  EXPECT_EQ(result.value().size(), sizeof(expected_proto));
+  EXPECT_EQ(std::memcmp(
+                result.value().data(), expected_proto, sizeof(expected_proto)),
             0);
 }
 
@@ -185,10 +186,11 @@
   constexpr uint8_t expected_proto[] = {
       0x08, 0x00, 0x08, 0x10, 0x08, 0x20, 0x08, 0x30};
 
-  std::span<const std::byte> proto;
-  EXPECT_EQ(encoder.Encode(&proto), Status::OK);
-  EXPECT_EQ(proto.size(), sizeof(expected_proto));
-  EXPECT_EQ(std::memcmp(proto.data(), expected_proto, sizeof(expected_proto)),
+  Result result = encoder.Encode();
+  ASSERT_EQ(result.status(), Status::Ok());
+  EXPECT_EQ(result.value().size(), sizeof(expected_proto));
+  EXPECT_EQ(std::memcmp(
+                result.value().data(), expected_proto, sizeof(expected_proto)),
             0);
 }
 
@@ -201,10 +203,11 @@
   repeated_test.WriteUint32s(values);
 
   constexpr uint8_t expected_proto[] = {0x0a, 0x04, 0x00, 0x10, 0x20, 0x30};
-  std::span<const std::byte> proto;
-  EXPECT_EQ(encoder.Encode(&proto), Status::OK);
-  EXPECT_EQ(proto.size(), sizeof(expected_proto));
-  EXPECT_EQ(std::memcmp(proto.data(), expected_proto, sizeof(expected_proto)),
+  Result result = encoder.Encode();
+  ASSERT_EQ(result.status(), Status::Ok());
+  EXPECT_EQ(result.value().size(), sizeof(expected_proto));
+  EXPECT_EQ(std::memcmp(
+                result.value().data(), expected_proto, sizeof(expected_proto)),
             0);
 }
 
@@ -221,10 +224,11 @@
   constexpr uint8_t expected_proto[] = {
       0x1a, 0x03, 't', 'h', 'e', 0x1a, 0x5, 'q',  'u', 'i', 'c', 'k',
       0x1a, 0x5,  'b', 'r', 'o', 'w',  'n', 0x1a, 0x3, 'f', 'o', 'x'};
-  std::span<const std::byte> proto;
-  EXPECT_EQ(encoder.Encode(&proto), Status::OK);
-  EXPECT_EQ(proto.size(), sizeof(expected_proto));
-  EXPECT_EQ(std::memcmp(proto.data(), expected_proto, sizeof(expected_proto)),
+  Result result = encoder.Encode();
+  ASSERT_EQ(result.status(), Status::Ok());
+  EXPECT_EQ(result.value().size(), sizeof(expected_proto));
+  EXPECT_EQ(std::memcmp(
+                result.value().data(), expected_proto, sizeof(expected_proto)),
             0);
 }
 
@@ -245,10 +249,11 @@
     0x01, 0x10, 0x02, 0x2a, 0x04, 0x08, 0x02, 0x10, 0x04};
   // clang-format on
 
-  std::span<const std::byte> proto;
-  EXPECT_EQ(encoder.Encode(&proto), Status::OK);
-  EXPECT_EQ(proto.size(), sizeof(expected_proto));
-  EXPECT_EQ(std::memcmp(proto.data(), expected_proto, sizeof(expected_proto)),
+  Result result = encoder.Encode();
+  ASSERT_EQ(result.status(), Status::Ok());
+  EXPECT_EQ(result.value().size(), sizeof(expected_proto));
+  EXPECT_EQ(std::memcmp(
+                result.value().data(), expected_proto, sizeof(expected_proto)),
             0);
 }
 
@@ -269,10 +274,11 @@
   constexpr uint8_t expected_proto[] = {
       0x08, 0x03, 0x1a, 0x06, 0x0a, 0x04, 0xde, 0xad, 0xbe, 0xef};
 
-  std::span<const std::byte> proto;
-  EXPECT_EQ(encoder.Encode(&proto), Status::OK);
-  EXPECT_EQ(proto.size(), sizeof(expected_proto));
-  EXPECT_EQ(std::memcmp(proto.data(), expected_proto, sizeof(expected_proto)),
+  Result result = encoder.Encode();
+  ASSERT_EQ(result.status(), Status::Ok());
+  EXPECT_EQ(result.value().size(), sizeof(expected_proto));
+  EXPECT_EQ(std::memcmp(
+                result.value().data(), expected_proto, sizeof(expected_proto)),
             0);
 }
 
@@ -293,8 +299,7 @@
     end.WriteNanoseconds(490367432);
   }
 
-  std::span<const std::byte> proto;
-  EXPECT_EQ(encoder.Encode(&proto), Status::OK);
+  EXPECT_EQ(encoder.Encode().status(), Status::Ok());
 }
 
 TEST(Codegen, NonPigweedPackage) {
@@ -306,8 +311,7 @@
   packed.WriteRep(std::span<const int64_t>(repeated));
   packed.WritePacked("packed");
 
-  std::span<const std::byte> proto;
-  EXPECT_EQ(encoder.Encode(&proto), Status::OK);
+  EXPECT_EQ(encoder.Encode().status(), Status::Ok());
 }
 
 }  // namespace
diff --git a/pw_protobuf/decoder.cc b/pw_protobuf/decoder.cc
index 72127be..70880ad 100644
--- a/pw_protobuf/decoder.cc
+++ b/pw_protobuf/decoder.cc
@@ -27,24 +27,24 @@
     }
   }
   if (proto_.empty()) {
-    return Status::OUT_OF_RANGE;
+    return Status::OutOfRange();
   }
   previous_field_consumed_ = false;
-  return FieldSize() == 0 ? Status::DATA_LOSS : Status::OK;
+  return FieldSize() == 0 ? Status::DataLoss() : Status::Ok();
 }
 
 Status Decoder::SkipField() {
   if (proto_.empty()) {
-    return Status::OUT_OF_RANGE;
+    return Status::OutOfRange();
   }
 
   size_t bytes_to_skip = FieldSize();
   if (bytes_to_skip == 0) {
-    return Status::DATA_LOSS;
+    return Status::DataLoss();
   }
 
   proto_ = proto_.subspan(bytes_to_skip);
-  return proto_.empty() ? Status::OUT_OF_RANGE : Status::OK;
+  return proto_.empty() ? Status::OutOfRange() : Status::Ok();
 }
 
 uint32_t Decoder::FieldNumber() const {
@@ -60,10 +60,10 @@
     return status;
   }
   if (value > std::numeric_limits<uint32_t>::max()) {
-    return Status::OUT_OF_RANGE;
+    return Status::OutOfRange();
   }
   *out = value;
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status Decoder::ReadSint32(int32_t* out) {
@@ -73,10 +73,10 @@
     return status;
   }
   if (value > std::numeric_limits<int32_t>::max()) {
-    return Status::OUT_OF_RANGE;
+    return Status::OutOfRange();
   }
   *out = value;
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status Decoder::ReadSint64(int64_t* out) {
@@ -86,7 +86,7 @@
     return status;
   }
   *out = varint::ZigZagDecode(value);
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status Decoder::ReadBool(bool* out) {
@@ -96,7 +96,7 @@
     return status;
   }
   *out = value;
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status Decoder::ReadString(std::string_view* out) {
@@ -107,7 +107,7 @@
   }
   *out = std::string_view(reinterpret_cast<const char*>(bytes.data()),
                           bytes.size());
-  return Status::OK;
+  return Status::Ok();
 }
 
 size_t Decoder::FieldSize() const {
@@ -159,17 +159,17 @@
   uint64_t key;
   size_t bytes_read = varint::Decode(proto_, &key);
   if (bytes_read == 0) {
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
 
   WireType wire_type = static_cast<WireType>(key & kWireTypeMask);
   if (wire_type != expected_type) {
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
 
   // Advance past the key.
   proto_ = proto_.subspan(bytes_read);
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status Decoder::ReadVarint(uint64_t* out) {
@@ -179,13 +179,13 @@
 
   size_t bytes_read = varint::Decode(proto_, out);
   if (bytes_read == 0) {
-    return Status::DATA_LOSS;
+    return Status::DataLoss();
   }
 
   // Advance to the next field.
   proto_ = proto_.subspan(bytes_read);
   previous_field_consumed_ = true;
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status Decoder::ReadFixed(std::byte* out, size_t size) {
@@ -197,14 +197,14 @@
   }
 
   if (proto_.size() < size) {
-    return Status::DATA_LOSS;
+    return Status::DataLoss();
   }
 
   std::memcpy(out, proto_.data(), size);
   proto_ = proto_.subspan(size);
   previous_field_consumed_ = true;
 
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status Decoder::ReadDelimited(std::span<const std::byte>* out) {
@@ -216,24 +216,24 @@
   uint64_t length;
   size_t bytes_read = varint::Decode(proto_, &length);
   if (bytes_read == 0) {
-    return Status::DATA_LOSS;
+    return Status::DataLoss();
   }
 
   proto_ = proto_.subspan(bytes_read);
   if (proto_.size() < length) {
-    return Status::DATA_LOSS;
+    return Status::DataLoss();
   }
 
   *out = proto_.first(length);
   proto_ = proto_.subspan(length);
   previous_field_consumed_ = true;
 
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status CallbackDecoder::Decode(std::span<const std::byte> proto) {
   if (handler_ == nullptr || state_ != kReady) {
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
 
   state_ = kDecodeInProgress;
@@ -242,7 +242,7 @@
   // Iterate the proto, calling the handler with each field number.
   while (state_ == kDecodeInProgress) {
     if (Status status = decoder_.Next(); !status.ok()) {
-      if (status == Status::OUT_OF_RANGE) {
+      if (status == Status::OutOfRange()) {
         // Reached the end of the proto.
         break;
       }
@@ -253,7 +253,7 @@
 
     Status status = handler_->ProcessField(*this, decoder_.FieldNumber());
     if (!status.ok()) {
-      state_ = status == Status::CANCELLED ? kDecodeCancelled : kDecodeFailed;
+      state_ = status == Status::Cancelled() ? kDecodeCancelled : kDecodeFailed;
       return status;
     }
 
@@ -265,11 +265,11 @@
   }
 
   if (state_ != kDecodeInProgress) {
-    return Status::DATA_LOSS;
+    return Status::DataLoss();
   }
 
   state_ = kReady;
-  return Status::OK;
+  return Status::Ok();
 }
 
 }  // namespace pw::protobuf
diff --git a/pw_protobuf/decoder_test.cc b/pw_protobuf/decoder_test.cc
index 7dad4b2..0b4f6ff 100644
--- a/pw_protobuf/decoder_test.cc
+++ b/pw_protobuf/decoder_test.cc
@@ -50,7 +50,7 @@
     }
 
     called = true;
-    return Status::OK;
+    return Status::Ok();
   }
 
   bool called = false;
@@ -83,45 +83,45 @@
   Decoder decoder(std::as_bytes(std::span(encoded_proto)));
 
   int32_t v1 = 0;
-  EXPECT_EQ(decoder.Next(), Status::OK);
+  EXPECT_EQ(decoder.Next(), Status::Ok());
   ASSERT_EQ(decoder.FieldNumber(), 1u);
-  EXPECT_EQ(decoder.ReadInt32(&v1), Status::OK);
+  EXPECT_EQ(decoder.ReadInt32(&v1), Status::Ok());
   EXPECT_EQ(v1, 42);
 
   int32_t v2 = 0;
-  EXPECT_EQ(decoder.Next(), Status::OK);
+  EXPECT_EQ(decoder.Next(), Status::Ok());
   ASSERT_EQ(decoder.FieldNumber(), 2u);
-  EXPECT_EQ(decoder.ReadSint32(&v2), Status::OK);
+  EXPECT_EQ(decoder.ReadSint32(&v2), Status::Ok());
   EXPECT_EQ(v2, -13);
 
   bool v3 = true;
-  EXPECT_EQ(decoder.Next(), Status::OK);
+  EXPECT_EQ(decoder.Next(), Status::Ok());
   ASSERT_EQ(decoder.FieldNumber(), 3u);
-  EXPECT_EQ(decoder.ReadBool(&v3), Status::OK);
+  EXPECT_EQ(decoder.ReadBool(&v3), Status::Ok());
   EXPECT_FALSE(v3);
 
   double v4 = 0;
-  EXPECT_EQ(decoder.Next(), Status::OK);
+  EXPECT_EQ(decoder.Next(), Status::Ok());
   ASSERT_EQ(decoder.FieldNumber(), 4u);
-  EXPECT_EQ(decoder.ReadDouble(&v4), Status::OK);
+  EXPECT_EQ(decoder.ReadDouble(&v4), Status::Ok());
   EXPECT_EQ(v4, 3.14159);
 
   uint32_t v5 = 0;
-  EXPECT_EQ(decoder.Next(), Status::OK);
+  EXPECT_EQ(decoder.Next(), Status::Ok());
   ASSERT_EQ(decoder.FieldNumber(), 5u);
-  EXPECT_EQ(decoder.ReadFixed32(&v5), Status::OK);
+  EXPECT_EQ(decoder.ReadFixed32(&v5), Status::Ok());
   EXPECT_EQ(v5, 0xdeadbeef);
 
   std::string_view v6;
   char buffer[16];
-  EXPECT_EQ(decoder.Next(), Status::OK);
+  EXPECT_EQ(decoder.Next(), Status::Ok());
   ASSERT_EQ(decoder.FieldNumber(), 6u);
-  EXPECT_EQ(decoder.ReadString(&v6), Status::OK);
+  EXPECT_EQ(decoder.ReadString(&v6), Status::Ok());
   std::memcpy(buffer, v6.data(), v6.size());
   buffer[v6.size()] = '\0';
   EXPECT_STREQ(buffer, "Hello world");
 
-  EXPECT_EQ(decoder.Next(), Status::OUT_OF_RANGE);
+  EXPECT_EQ(decoder.Next(), Status::OutOfRange());
 }
 
 TEST(Decoder, Decode_SkipsUnusedFields) {
@@ -146,14 +146,14 @@
 
   // Don't process any fields except for the fourth. Next should still iterate
   // correctly despite field values not being consumed.
-  EXPECT_EQ(decoder.Next(), Status::OK);
-  EXPECT_EQ(decoder.Next(), Status::OK);
-  EXPECT_EQ(decoder.Next(), Status::OK);
-  EXPECT_EQ(decoder.Next(), Status::OK);
+  EXPECT_EQ(decoder.Next(), Status::Ok());
+  EXPECT_EQ(decoder.Next(), Status::Ok());
+  EXPECT_EQ(decoder.Next(), Status::Ok());
+  EXPECT_EQ(decoder.Next(), Status::Ok());
   ASSERT_EQ(decoder.FieldNumber(), 4u);
-  EXPECT_EQ(decoder.Next(), Status::OK);
-  EXPECT_EQ(decoder.Next(), Status::OK);
-  EXPECT_EQ(decoder.Next(), Status::OUT_OF_RANGE);
+  EXPECT_EQ(decoder.Next(), Status::Ok());
+  EXPECT_EQ(decoder.Next(), Status::Ok());
+  EXPECT_EQ(decoder.Next(), Status::OutOfRange());
 }
 
 TEST(CallbackDecoder, Decode) {
@@ -179,7 +179,7 @@
 
   decoder.set_handler(&handler);
   EXPECT_EQ(decoder.Decode(std::as_bytes(std::span(encoded_proto))),
-            Status::OK);
+            Status::Ok());
   EXPECT_TRUE(handler.called);
   EXPECT_EQ(handler.test_int32, 42);
   EXPECT_EQ(handler.test_sint32, -13);
@@ -206,7 +206,7 @@
 
   decoder.set_handler(&handler);
   EXPECT_EQ(decoder.Decode(std::as_bytes(std::span(encoded_proto))),
-            Status::OK);
+            Status::Ok());
   EXPECT_TRUE(handler.called);
   EXPECT_EQ(handler.test_int32, 44);
 }
@@ -216,7 +216,7 @@
   TestDecodeHandler handler;
 
   decoder.set_handler(&handler);
-  EXPECT_EQ(decoder.Decode(std::span<std::byte>()), Status::OK);
+  EXPECT_EQ(decoder.Decode(std::span<std::byte>()), Status::Ok());
   EXPECT_FALSE(handler.called);
   EXPECT_EQ(handler.test_int32, 0);
   EXPECT_EQ(handler.test_sint32, 0);
@@ -231,7 +231,7 @@
 
   decoder.set_handler(&handler);
   EXPECT_EQ(decoder.Decode(std::as_bytes(std::span(encoded_proto))),
-            Status::DATA_LOSS);
+            Status::DataLoss());
 }
 
 // Only processes fields numbered 1 or 3.
@@ -241,10 +241,10 @@
                       uint32_t field_number) override {
     switch (field_number) {
       case 1:
-        EXPECT_EQ(decoder.ReadInt32(&field_one), Status::OK);
+        EXPECT_EQ(decoder.ReadInt32(&field_one), Status::Ok());
         break;
       case 3:
-        EXPECT_EQ(decoder.ReadInt32(&field_three), Status::OK);
+        EXPECT_EQ(decoder.ReadInt32(&field_three), Status::Ok());
         break;
       default:
         // Do nothing.
@@ -252,7 +252,7 @@
     }
 
     called = true;
-    return Status::OK;
+    return Status::Ok();
   }
 
   bool called = false;
@@ -286,7 +286,7 @@
 
   decoder.set_handler(&handler);
   EXPECT_EQ(decoder.Decode(std::as_bytes(std::span(encoded_proto))),
-            Status::OK);
+            Status::Ok());
   EXPECT_TRUE(handler.called);
   EXPECT_EQ(handler.field_one, 42);
   EXPECT_EQ(handler.field_three, 99);
@@ -299,17 +299,17 @@
                       uint32_t field_number) override {
     switch (field_number) {
       case 1:
-        EXPECT_EQ(decoder.ReadInt32(&field_one), Status::OK);
-        return Status::CANCELLED;
+        EXPECT_EQ(decoder.ReadInt32(&field_one), Status::Ok());
+        return Status::Cancelled();
       case 3:
-        EXPECT_EQ(decoder.ReadInt32(&field_three), Status::OK);
+        EXPECT_EQ(decoder.ReadInt32(&field_three), Status::Ok());
         break;
       default:
         // Do nothing.
         break;
     }
 
-    return Status::OK;
+    return Status::Ok();
   }
 
   int32_t field_one = 0;
@@ -336,7 +336,7 @@
 
   decoder.set_handler(&handler);
   EXPECT_EQ(decoder.Decode(std::as_bytes(std::span(encoded_proto))),
-            Status::CANCELLED);
+            Status::Cancelled());
   EXPECT_EQ(handler.field_one, 42);
   EXPECT_EQ(handler.field_three, 1111);
 }
diff --git a/pw_protobuf/decoding.rst b/pw_protobuf/decoding.rst
index d0dbea3..7120f60 100644
--- a/pw_protobuf/decoding.rst
+++ b/pw_protobuf/decoding.rst
@@ -1,8 +1,4 @@
-.. default-domain:: cpp
-
-.. highlight:: sh
-
-.. _chapter-protobuf-decoder:
+.. _module-pw_protobuf-decoding:
 
 --------
 Decoding
diff --git a/pw_protobuf/docs.rst b/pw_protobuf/docs.rst
index df6042c..ffc13bc 100644
--- a/pw_protobuf/docs.rst
+++ b/pw_protobuf/docs.rst
@@ -1,8 +1,4 @@
-.. default-domain:: cpp
-
-.. highlight:: sh
-
-.. _chapter-protobuf:
+.. _module-pw_protobuf:
 
 -----------
 pw_protobuf
@@ -36,7 +32,7 @@
 ``pw_protobuf`` splits wire format encoding and decoding operations. Links to
 the design and APIs of each are listed in below.
 
-See also :ref:`chapter-pw-protobuf-compiler` for details on ``pw_protobuf``'s
+See also :ref:`module-pw_protobuf_compiler` for details on ``pw_protobuf``'s
 build system integration.
 
 **pw_protobuf functionality**
diff --git a/pw_protobuf/encoder.cc b/pw_protobuf/encoder.cc
index 96b9b7d..66174fc 100644
--- a/pw_protobuf/encoder.cc
+++ b/pw_protobuf/encoder.cc
@@ -32,18 +32,18 @@
 
   std::span varint_buf = buffer_.last(RemainingSize());
   if (varint_buf.empty()) {
-    encode_status_ = Status::RESOURCE_EXHAUSTED;
+    encode_status_ = Status::ResourceExhausted();
     return encode_status_;
   }
 
   size_t written = pw::varint::EncodeLittleEndianBase128(value, varint_buf);
   if (written == 0) {
-    encode_status_ = Status::RESOURCE_EXHAUSTED;
+    encode_status_ = Status::ResourceExhausted();
     return encode_status_;
   }
 
   cursor_ += written;
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status Encoder::WriteRawBytes(const std::byte* ptr, size_t size) {
@@ -52,7 +52,7 @@
   }
 
   if (size > RemainingSize()) {
-    encode_status_ = Status::RESOURCE_EXHAUSTED;
+    encode_status_ = Status::ResourceExhausted();
     return encode_status_;
   }
 
@@ -61,7 +61,7 @@
   std::memmove(cursor_, ptr, size);
 
   cursor_ += size;
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status Encoder::Push(uint32_t field_number) {
@@ -70,7 +70,7 @@
   }
 
   if (blob_count_ == blob_locations_.size() || depth_ == blob_stack_.size()) {
-    encode_status_ = Status::RESOURCE_EXHAUSTED;
+    encode_status_ = Status::ResourceExhausted();
     return encode_status_;
   }
 
@@ -85,7 +85,7 @@
   if (sizeof(SizeType) > RemainingSize()) {
     // Rollback if there isn't enough space.
     cursor_ = original_cursor;
-    encode_status_ = Status::RESOURCE_EXHAUSTED;
+    encode_status_ = Status::ResourceExhausted();
     return encode_status_;
   }
 
@@ -105,7 +105,7 @@
   blob_stack_[depth_++] = size_cursor;
 
   cursor_ += sizeof(*size_cursor);
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status Encoder::Pop() {
@@ -114,7 +114,7 @@
   }
 
   if (depth_ == 0) {
-    encode_status_ = Status::FAILED_PRECONDITION;
+    encode_status_ = Status::FailedPrecondition();
     return encode_status_;
   }
 
@@ -123,19 +123,17 @@
   SizeType child_size = *blob_stack_[--depth_];
   IncreaseParentSize(child_size + VarintSizeBytes(child_size));
 
-  return Status::OK;
+  return Status::Ok();
 }
 
-Status Encoder::Encode(std::span<const std::byte>* out) {
+Result<ConstByteSpan> Encoder::Encode() {
   if (!encode_status_.ok()) {
-    *out = std::span<const std::byte>();
     return encode_status_;
   }
 
   if (blob_count_ == 0) {
     // If there are no nested blobs, the buffer already contains a valid proto.
-    *out = buffer_.first(EncodedSize());
-    return Status::OK;
+    return Result<ConstByteSpan>(buffer_.first(EncodedSize()));
   }
 
   union {
@@ -179,8 +177,7 @@
 
   // Point the cursor to the end of the encoded proto.
   cursor_ = write_cursor;
-  *out = buffer_.first(EncodedSize());
-  return Status::OK;
+  return Result<ConstByteSpan>(buffer_.first(EncodedSize()));
 }
 
 }  // namespace pw::protobuf
diff --git a/pw_protobuf/encoder_fuzzer.cc b/pw_protobuf/encoder_fuzzer.cc
index e0991d0..1ede047 100644
--- a/pw_protobuf/encoder_fuzzer.cc
+++ b/pw_protobuf/encoder_fuzzer.cc
@@ -133,7 +133,6 @@
   ASAN_POISON_MEMORY_REGION(poisoned, poisoned_length);
 
   pw::protobuf::NestedEncoder encoder(unpoisoned);
-  std::span<const std::byte> out;
 
   // Storage for generated spans
   std::vector<uint32_t> u32s;
@@ -154,7 +153,7 @@
     switch (provider.ConsumeEnum<FieldType>()) {
       case kEncodeAndClear:
         // Special "field". Encode all the fields so far and reset the encoder.
-        encoder.Encode(&out);
+        encoder.Encode();
         encoder.Clear();
         break;
       case kUint32:
@@ -278,7 +277,7 @@
     }
   }
   // Ensure we call `Encode` at least once.
-  encoder.Encode(&out);
+  encoder.Encode();
 
   // Don't forget to unpoison for the next iteration!
   ASAN_UNPOISON_MEMORY_REGION(poisoned, poisoned_length);
diff --git a/pw_protobuf/encoder_test.cc b/pw_protobuf/encoder_test.cc
index 8a86057..605467b 100644
--- a/pw_protobuf/encoder_test.cc
+++ b/pw_protobuf/encoder_test.cc
@@ -85,18 +85,20 @@
   std::byte encode_buffer[32];
   NestedEncoder encoder(encode_buffer);
 
-  EXPECT_EQ(encoder.WriteUint32(kTestProtoMagicNumberField, 42), Status::OK);
-  EXPECT_EQ(encoder.WriteSint32(kTestProtoZiggyField, -13), Status::OK);
+  EXPECT_EQ(encoder.WriteUint32(kTestProtoMagicNumberField, 42), Status::Ok());
+  EXPECT_EQ(encoder.WriteSint32(kTestProtoZiggyField, -13), Status::Ok());
   EXPECT_EQ(encoder.WriteFixed64(kTestProtoCyclesField, 0xdeadbeef8badf00d),
-            Status::OK);
-  EXPECT_EQ(encoder.WriteFloat(kTestProtoRatioField, 1.618034), Status::OK);
+            Status::Ok());
+  EXPECT_EQ(encoder.WriteFloat(kTestProtoRatioField, 1.618034), Status::Ok());
   EXPECT_EQ(encoder.WriteString(kTestProtoErrorMessageField, "broken 💩"),
-            Status::OK);
+            Status::Ok());
 
-  std::span<const std::byte> encoded;
-  EXPECT_EQ(encoder.Encode(&encoded), Status::OK);
-  EXPECT_EQ(encoded.size(), sizeof(encoded_proto));
-  EXPECT_EQ(std::memcmp(encoded.data(), encoded_proto, encoded.size()), 0);
+  Result result = encoder.Encode();
+  ASSERT_EQ(result.status(), Status::Ok());
+  EXPECT_EQ(result.value().size(), sizeof(encoded_proto));
+  EXPECT_EQ(
+      std::memcmp(result.value().data(), encoded_proto, sizeof(encoded_proto)),
+      0);
 }
 
 TEST(Encoder, EncodeInsufficientSpace) {
@@ -104,38 +106,34 @@
   NestedEncoder encoder(encode_buffer);
 
   // 2 bytes.
-  EXPECT_EQ(encoder.WriteUint32(kTestProtoMagicNumberField, 42), Status::OK);
+  EXPECT_EQ(encoder.WriteUint32(kTestProtoMagicNumberField, 42), Status::Ok());
   // 2 bytes.
-  EXPECT_EQ(encoder.WriteSint32(kTestProtoZiggyField, -13), Status::OK);
+  EXPECT_EQ(encoder.WriteSint32(kTestProtoZiggyField, -13), Status::Ok());
   // 9 bytes; not enough space! The encoder will start writing the field but
   // should rollback when it realizes it doesn't have enough space.
   EXPECT_EQ(encoder.WriteFixed64(kTestProtoCyclesField, 0xdeadbeef8badf00d),
-            Status::RESOURCE_EXHAUSTED);
+            Status::ResourceExhausted());
   // Any further write operations should fail.
   EXPECT_EQ(encoder.WriteFloat(kTestProtoRatioField, 1.618034),
-            Status::RESOURCE_EXHAUSTED);
+            Status::ResourceExhausted());
 
-  std::span<const std::byte> encoded;
-  EXPECT_EQ(encoder.Encode(&encoded), Status::RESOURCE_EXHAUSTED);
-  EXPECT_EQ(encoded.size(), 0u);
+  ASSERT_EQ(encoder.Encode().status(), Status::ResourceExhausted());
 }
 
 TEST(Encoder, EncodeInvalidArguments) {
   std::byte encode_buffer[12];
   NestedEncoder encoder(encode_buffer);
 
-  EXPECT_EQ(encoder.WriteUint32(kTestProtoMagicNumberField, 42), Status::OK);
+  EXPECT_EQ(encoder.WriteUint32(kTestProtoMagicNumberField, 42), Status::Ok());
   // Invalid proto field numbers.
-  EXPECT_EQ(encoder.WriteUint32(0, 1337), Status::INVALID_ARGUMENT);
+  EXPECT_EQ(encoder.WriteUint32(0, 1337), Status::InvalidArgument());
   encoder.Clear();
 
-  EXPECT_EQ(encoder.WriteString(1u << 31, "ha"), Status::INVALID_ARGUMENT);
+  EXPECT_EQ(encoder.WriteString(1u << 31, "ha"), Status::InvalidArgument());
   encoder.Clear();
 
-  EXPECT_EQ(encoder.WriteBool(19091, false), Status::INVALID_ARGUMENT);
-  std::span<const std::byte> encoded;
-  EXPECT_EQ(encoder.Encode(&encoded), Status::INVALID_ARGUMENT);
-  EXPECT_EQ(encoded.size(), 0u);
+  EXPECT_EQ(encoder.WriteBool(19091, false), Status::InvalidArgument());
+  ASSERT_EQ(encoder.Encode().status(), Status::InvalidArgument());
 }
 
 TEST(Encoder, Nested) {
@@ -144,47 +142,48 @@
 
   // TestProto test_proto;
   // test_proto.magic_number = 42;
-  EXPECT_EQ(encoder.WriteUint32(kTestProtoMagicNumberField, 42), Status::OK);
+  EXPECT_EQ(encoder.WriteUint32(kTestProtoMagicNumberField, 42), Status::Ok());
 
   {
     // NestedProto& nested_proto = test_proto.nested;
-    EXPECT_EQ(encoder.Push(kTestProtoNestedField), Status::OK);
+    EXPECT_EQ(encoder.Push(kTestProtoNestedField), Status::Ok());
     // nested_proto.hello = "world";
-    EXPECT_EQ(encoder.WriteString(kNestedProtoHelloField, "world"), Status::OK);
+    EXPECT_EQ(encoder.WriteString(kNestedProtoHelloField, "world"),
+              Status::Ok());
     // nested_proto.id = 999;
-    EXPECT_EQ(encoder.WriteUint32(kNestedProtoIdField, 999), Status::OK);
+    EXPECT_EQ(encoder.WriteUint32(kNestedProtoIdField, 999), Status::Ok());
 
     {
       // DoubleNestedProto& double_nested_proto = nested_proto.append_pair();
-      EXPECT_EQ(encoder.Push(kNestedProtoPairField), Status::OK);
+      EXPECT_EQ(encoder.Push(kNestedProtoPairField), Status::Ok());
       // double_nested_proto.key = "version";
       EXPECT_EQ(encoder.WriteString(kDoubleNestedProtoKeyField, "version"),
-                Status::OK);
+                Status::Ok());
       // double_nested_proto.value = "2.9.1";
       EXPECT_EQ(encoder.WriteString(kDoubleNestedProtoValueField, "2.9.1"),
-                Status::OK);
+                Status::Ok());
 
-      EXPECT_EQ(encoder.Pop(), Status::OK);
+      EXPECT_EQ(encoder.Pop(), Status::Ok());
     }  // end DoubleNestedProto
 
     {
       // DoubleNestedProto& double_nested_proto = nested_proto.append_pair();
-      EXPECT_EQ(encoder.Push(kNestedProtoPairField), Status::OK);
+      EXPECT_EQ(encoder.Push(kNestedProtoPairField), Status::Ok());
       // double_nested_proto.key = "device";
       EXPECT_EQ(encoder.WriteString(kDoubleNestedProtoKeyField, "device"),
-                Status::OK);
+                Status::Ok());
       // double_nested_proto.value = "left-soc";
       EXPECT_EQ(encoder.WriteString(kDoubleNestedProtoValueField, "left-soc"),
-                Status::OK);
+                Status::Ok());
 
-      EXPECT_EQ(encoder.Pop(), Status::OK);
+      EXPECT_EQ(encoder.Pop(), Status::Ok());
     }  // end DoubleNestedProto
 
-    EXPECT_EQ(encoder.Pop(), Status::OK);
+    EXPECT_EQ(encoder.Pop(), Status::Ok());
   }  // end NestedProto
 
   // test_proto.ziggy = -13;
-  EXPECT_EQ(encoder.WriteSint32(kTestProtoZiggyField, -13), Status::OK);
+  EXPECT_EQ(encoder.WriteSint32(kTestProtoZiggyField, -13), Status::Ok());
 
   // clang-format off
   constexpr uint8_t encoded_proto[] = {
@@ -213,10 +212,12 @@
   };
   // clang-format on
 
-  std::span<const std::byte> encoded;
-  EXPECT_EQ(encoder.Encode(&encoded), Status::OK);
-  EXPECT_EQ(encoded.size(), sizeof(encoded_proto));
-  EXPECT_EQ(std::memcmp(encoded.data(), encoded_proto, encoded.size()), 0);
+  Result result = encoder.Encode();
+  ASSERT_EQ(result.status(), Status::Ok());
+  EXPECT_EQ(result.value().size(), sizeof(encoded_proto));
+  EXPECT_EQ(
+      std::memcmp(result.value().data(), encoded_proto, sizeof(encoded_proto)),
+      0);
 }
 
 TEST(Encoder, NestedDepthLimit) {
@@ -224,16 +225,16 @@
   NestedEncoder<2, 10> encoder(encode_buffer);
 
   // One level of nesting.
-  EXPECT_EQ(encoder.Push(2), Status::OK);
+  EXPECT_EQ(encoder.Push(2), Status::Ok());
   // Two levels of nesting.
-  EXPECT_EQ(encoder.Push(1), Status::OK);
+  EXPECT_EQ(encoder.Push(1), Status::Ok());
   // Three levels of nesting: error!
-  EXPECT_EQ(encoder.Push(1), Status::RESOURCE_EXHAUSTED);
+  EXPECT_EQ(encoder.Push(1), Status::ResourceExhausted());
 
   // Further operations should fail.
-  EXPECT_EQ(encoder.Pop(), Status::RESOURCE_EXHAUSTED);
-  EXPECT_EQ(encoder.Pop(), Status::RESOURCE_EXHAUSTED);
-  EXPECT_EQ(encoder.Pop(), Status::RESOURCE_EXHAUSTED);
+  EXPECT_EQ(encoder.Pop(), Status::ResourceExhausted());
+  EXPECT_EQ(encoder.Pop(), Status::ResourceExhausted());
+  EXPECT_EQ(encoder.Pop(), Status::ResourceExhausted());
 }
 
 TEST(Encoder, NestedBlobLimit) {
@@ -241,23 +242,23 @@
   NestedEncoder<5, 3> encoder(encode_buffer);
 
   // Write first blob.
-  EXPECT_EQ(encoder.Push(1), Status::OK);
-  EXPECT_EQ(encoder.Pop(), Status::OK);
+  EXPECT_EQ(encoder.Push(1), Status::Ok());
+  EXPECT_EQ(encoder.Pop(), Status::Ok());
 
   // Write second blob.
-  EXPECT_EQ(encoder.Push(2), Status::OK);
+  EXPECT_EQ(encoder.Push(2), Status::Ok());
 
   // Write nested third blob.
-  EXPECT_EQ(encoder.Push(3), Status::OK);
-  EXPECT_EQ(encoder.Pop(), Status::OK);
+  EXPECT_EQ(encoder.Push(3), Status::Ok());
+  EXPECT_EQ(encoder.Pop(), Status::Ok());
 
   // End second blob.
-  EXPECT_EQ(encoder.Pop(), Status::OK);
+  EXPECT_EQ(encoder.Pop(), Status::Ok());
 
   // Write fourth blob: error!.
-  EXPECT_EQ(encoder.Push(4), Status::RESOURCE_EXHAUSTED);
+  EXPECT_EQ(encoder.Push(4), Status::ResourceExhausted());
   // Nothing to pop.
-  EXPECT_EQ(encoder.Pop(), Status::RESOURCE_EXHAUSTED);
+  EXPECT_EQ(encoder.Pop(), Status::ResourceExhausted());
 }
 
 TEST(Encoder, RepeatedField) {
@@ -273,10 +274,12 @@
   constexpr uint8_t encoded_proto[] = {
       0x08, 0x00, 0x08, 0x32, 0x08, 0x64, 0x08, 0x96, 0x01, 0x08, 0xc8, 0x01};
 
-  std::span<const std::byte> encoded;
-  EXPECT_EQ(encoder.Encode(&encoded), Status::OK);
-  EXPECT_EQ(encoded.size(), sizeof(encoded_proto));
-  EXPECT_EQ(std::memcmp(encoded.data(), encoded_proto, encoded.size()), 0);
+  Result result = encoder.Encode();
+  ASSERT_EQ(result.status(), Status::Ok());
+  EXPECT_EQ(result.value().size(), sizeof(encoded_proto));
+  EXPECT_EQ(
+      std::memcmp(result.value().data(), encoded_proto, sizeof(encoded_proto)),
+      0);
 }
 
 TEST(Encoder, PackedVarint) {
@@ -291,10 +294,12 @@
       0x0a, 0x07, 0x00, 0x32, 0x64, 0x96, 0x01, 0xc8, 0x01};
   //  key   size  v[0]  v[1]  v[2]  v[3]        v[4]
 
-  std::span<const std::byte> encoded;
-  EXPECT_EQ(encoder.Encode(&encoded), Status::OK);
-  EXPECT_EQ(encoded.size(), sizeof(encoded_proto));
-  EXPECT_EQ(std::memcmp(encoded.data(), encoded_proto, encoded.size()), 0);
+  Result result = encoder.Encode();
+  ASSERT_EQ(result.status(), Status::Ok());
+  EXPECT_EQ(result.value().size(), sizeof(encoded_proto));
+  EXPECT_EQ(
+      std::memcmp(result.value().data(), encoded_proto, sizeof(encoded_proto)),
+      0);
 }
 
 TEST(Encoder, PackedVarintInsufficientSpace) {
@@ -304,9 +309,7 @@
   constexpr uint32_t values[] = {0, 50, 100, 150, 200};
   encoder.WritePackedUint32(1, values);
 
-  std::span<const std::byte> encoded;
-  EXPECT_EQ(encoder.Encode(&encoded), Status::RESOURCE_EXHAUSTED);
-  EXPECT_EQ(encoded.size(), 0u);
+  EXPECT_EQ(encoder.Encode().status(), Status::ResourceExhausted());
 }
 
 TEST(Encoder, PackedFixed) {
@@ -326,10 +329,12 @@
       0x00, 0x00, 0x00, 0x96, 0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00,
       0x12, 0x08, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01};
 
-  std::span<const std::byte> encoded;
-  EXPECT_EQ(encoder.Encode(&encoded), Status::OK);
-  EXPECT_EQ(encoded.size(), sizeof(encoded_proto));
-  EXPECT_EQ(std::memcmp(encoded.data(), encoded_proto, encoded.size()), 0);
+  Result result = encoder.Encode();
+  ASSERT_EQ(result.status(), Status::Ok());
+  EXPECT_EQ(result.value().size(), sizeof(encoded_proto));
+  EXPECT_EQ(
+      std::memcmp(result.value().data(), encoded_proto, sizeof(encoded_proto)),
+      0);
 }
 
 TEST(Encoder, PackedZigzag) {
@@ -343,10 +348,12 @@
   constexpr uint8_t encoded_proto[] = {
       0x0a, 0x09, 0xc7, 0x01, 0x31, 0x01, 0x00, 0x02, 0x32, 0xc8, 0x01};
 
-  std::span<const std::byte> encoded;
-  EXPECT_EQ(encoder.Encode(&encoded), Status::OK);
-  EXPECT_EQ(encoded.size(), sizeof(encoded_proto));
-  EXPECT_EQ(std::memcmp(encoded.data(), encoded_proto, encoded.size()), 0);
+  Result result = encoder.Encode();
+  ASSERT_EQ(result.status(), Status::Ok());
+  EXPECT_EQ(result.value().size(), sizeof(encoded_proto));
+  EXPECT_EQ(
+      std::memcmp(result.value().data(), encoded_proto, sizeof(encoded_proto)),
+      0);
 }
 
 }  // namespace
diff --git a/pw_protobuf/find.cc b/pw_protobuf/find.cc
index 923097a..635f50b 100644
--- a/pw_protobuf/find.cc
+++ b/pw_protobuf/find.cc
@@ -20,12 +20,12 @@
                                        uint32_t field_number) {
   if (field_number != field_number_) {
     // Continue to the next field.
-    return Status::OK;
+    return Status::Ok();
   }
 
   found_ = true;
   if (nested_handler_ == nullptr) {
-    return Status::CANCELLED;
+    return Status::Cancelled();
   }
 
   std::span<const std::byte> submessage;
diff --git a/pw_protobuf/public/pw_protobuf/decoder.h b/pw_protobuf/public/pw_protobuf/decoder.h
index 595bc9a..6cb402e 100644
--- a/pw_protobuf/public/pw_protobuf/decoder.h
+++ b/pw_protobuf/public/pw_protobuf/decoder.h
@@ -192,7 +192,7 @@
 //           break;
 //       }
 //
-//       return Status::OK;
+//       return Status::Ok();
 //     }
 //
 //     int bar;
@@ -303,8 +303,8 @@
   // Receives a pointer to the decoder object, allowing the handler to call
   // the appropriate method to extract the field's data.
   //
-  // If the status returned is not Status::OK, the decode operation is exited
-  // with the provided status. Returning Status::CANCELLED allows a convenient
+  // If the status returned is not Status::Ok(), the decode operation is exited
+  // with the provided status. Returning Status::Cancelled() allows a convenient
   // way of stopping a decode early (for example, if a desired field is found).
   virtual Status ProcessField(CallbackDecoder& decoder,
                               uint32_t field_number) = 0;
diff --git a/pw_protobuf/public/pw_protobuf/encoder.h b/pw_protobuf/public/pw_protobuf/encoder.h
index 3e86368..c859009 100644
--- a/pw_protobuf/public/pw_protobuf/encoder.h
+++ b/pw_protobuf/public/pw_protobuf/encoder.h
@@ -17,7 +17,9 @@
 #include <cstring>
 #include <span>
 
+#include "pw_bytes/span.h"
 #include "pw_protobuf/wire_format.h"
+#include "pw_result/result.h"
 #include "pw_status/status.h"
 #include "pw_varint/varint.h"
 
@@ -31,7 +33,7 @@
   // message. This can be templated to minimize the overhead.
   using SizeType = size_t;
 
-  constexpr Encoder(std::span<std::byte> buffer,
+  constexpr Encoder(ByteSpan buffer,
                     std::span<SizeType*> locations,
                     std::span<SizeType*> stack)
       : buffer_(buffer),
@@ -40,7 +42,7 @@
         blob_count_(0),
         blob_stack_(stack),
         depth_(0),
-        encode_status_(Status::OK) {}
+        encode_status_(Status::Ok()) {}
 
   // Disallow copy/assign to avoid confusion about who owns the buffer.
   Encoder(const Encoder& other) = delete;
@@ -225,7 +227,7 @@
   }
 
   // Writes a proto bytes key-value pair.
-  Status WriteBytes(uint32_t field_number, std::span<const std::byte> value) {
+  Status WriteBytes(uint32_t field_number, ConstByteSpan value) {
     std::byte* original_cursor = cursor_;
     WriteFieldKey(field_number, WireType::kDelimited);
     WriteVarint(value.size_bytes());
@@ -259,14 +261,25 @@
   // obtained from Encode().
   void Clear() {
     cursor_ = buffer_.data();
-    encode_status_ = Status::OK;
+    encode_status_ = Status::Ok();
     blob_count_ = 0;
     depth_ = 0;
   }
 
   // Runs a final encoding pass over the intermediary data and returns the
   // encoded protobuf message.
-  Status Encode(std::span<const std::byte>* out);
+  Result<ConstByteSpan> Encode();
+
+  // DEPRECATED. Use Encode() instead.
+  // TODO(frolv): Remove this after all references to it are updated.
+  Status Encode(ConstByteSpan* out) {
+    Result result = Encode();
+    if (!result.ok()) {
+      return result.status();
+    }
+    *out = result.value();
+    return Status::Ok();
+  }
 
  private:
   constexpr bool ValidFieldNumber(uint32_t field_number) const {
@@ -278,7 +291,7 @@
   // Encodes the key for a proto field consisting of its number and wire type.
   Status WriteFieldKey(uint32_t field_number, WireType wire_type) {
     if (!ValidFieldNumber(field_number)) {
-      encode_status_ = Status::INVALID_ARGUMENT;
+      encode_status_ = Status::InvalidArgument();
       return encode_status_;
     }
 
@@ -340,7 +353,7 @@
   }
 
   // The buffer into which the proto is encoded.
-  std::span<std::byte> buffer_;
+  ByteSpan buffer_;
   std::byte* cursor_;
 
   // List of pointers to sub-messages' delimiting size fields.
@@ -359,8 +372,7 @@
 template <size_t kMaxNestedDepth = 1, size_t kMaxBlobs = 1>
 class NestedEncoder : public Encoder {
  public:
-  NestedEncoder(std::span<std::byte> buffer)
-      : Encoder(buffer, blobs_, stack_) {}
+  NestedEncoder(ByteSpan buffer) : Encoder(buffer, blobs_, stack_) {}
 
   // Disallow copy/assign to avoid confusion about who owns the buffer.
   NestedEncoder(const NestedEncoder& other) = delete;
diff --git a/pw_protobuf/public/pw_protobuf/serialized_size.h b/pw_protobuf/public/pw_protobuf/serialized_size.h
new file mode 100644
index 0000000..d671683
--- /dev/null
+++ b/pw_protobuf/public/pw_protobuf/serialized_size.h
@@ -0,0 +1,52 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <cstdint>
+
+#include "pw_protobuf/wire_format.h"
+#include "pw_varint/varint.h"
+
+namespace pw::protobuf {
+
+// Field types that directly map to fixed wire types:
+inline constexpr size_t kMaxSizeBytesFixed32 = 4;
+inline constexpr size_t kMaxSizeBytesFixed64 = 8;
+inline constexpr size_t kMaxSizeBytesSfixed32 = 4;
+inline constexpr size_t kMaxSizeBytesSfixed64 = 8;
+inline constexpr size_t kMaxSizeBytesFloat = kMaxSizeBytesFixed32;
+inline constexpr size_t kMaxSizeBytesDouble = kMaxSizeBytesFixed64;
+
+// Field types that map to varint:
+inline constexpr size_t kMaxSizeBytesUint32 = varint::kMaxVarint32SizeBytes;
+inline constexpr size_t kMaxSizeBytesUint64 = varint::kMaxVarint64SizeBytes;
+inline constexpr size_t kMaxSizeBytesSint32 = varint::kMaxVarint32SizeBytes;
+inline constexpr size_t kMaxSizeBytesSint64 = varint::kMaxVarint64SizeBytes;
+// The int32 field type does not use zigzag encoding, ergo negative values
+// can result in the worst case varint size.
+inline constexpr size_t kMaxSizeBytesInt32 = varint::kMaxVarint64SizeBytes;
+inline constexpr size_t kMaxSizeBytesInt64 = varint::kMaxVarint64SizeBytes;
+// The bool field type is backed by a varint, but has a limited value range.
+inline constexpr size_t kMaxSizeBytesBool = 1;
+
+inline constexpr size_t kMaxSizeOfFieldKey = varint::kMaxVarint32SizeBytes;
+
+inline constexpr size_t kMaxSizeOfLength = varint::kMaxVarint32SizeBytes;
+
+constexpr size_t SizeOfFieldKey(uint32_t field_number) {
+  // The wiretype is ignored as this does not impact the serialized size.
+  return varint::EncodedSize(field_number << kFieldNumberShift);
+}
+
+}  // namespace pw::protobuf
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_protobuf/py/BUILD.gn
similarity index 65%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_protobuf/py/BUILD.gn
index 3c3be32..edee3aa 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_protobuf/py/BUILD.gn
@@ -12,8 +12,17 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_protobuf/__init__.py",
+    "pw_protobuf/codegen_pwpb.py",
+    "pw_protobuf/output_file.py",
+    "pw_protobuf/plugin.py",
+    "pw_protobuf/proto_tree.py",
+  ]
 }
diff --git a/pw_protobuf/py/pw_protobuf/__init__.py b/pw_protobuf/py/pw_protobuf/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_protobuf/py/pw_protobuf/__init__.py
diff --git a/pw_protobuf/py/pw_protobuf/codegen_pwpb.py b/pw_protobuf/py/pw_protobuf/codegen_pwpb.py
index 4588753..faf6bb9 100644
--- a/pw_protobuf/py/pw_protobuf/codegen_pwpb.py
+++ b/pw_protobuf/py/pw_protobuf/codegen_pwpb.py
@@ -18,11 +18,13 @@
 import os
 import sys
 from typing import Dict, Iterable, List, Tuple
+from typing import cast
 
 import google.protobuf.descriptor_pb2 as descriptor_pb2
 
 from pw_protobuf.output_file import OutputFile
-from pw_protobuf.proto_tree import ProtoMessageField, ProtoNode
+from pw_protobuf.proto_tree import ProtoEnum, ProtoMessage, ProtoMessageField
+from pw_protobuf.proto_tree import ProtoNode
 from pw_protobuf.proto_tree import build_node_tree
 
 PLUGIN_NAME = 'pw_protobuf'
@@ -113,8 +115,12 @@
     def _relative_type_namespace(self, from_root: bool = False) -> str:
         """Returns relative namespace between method's scope and field type."""
         scope = self._root if from_root else self._scope
-        ancestor = scope.common_ancestor(self._field.type_node())
-        return self._field.type_node().cpp_namespace(ancestor)
+        type_node = self._field.type_node()
+        assert type_node is not None
+        ancestor = scope.common_ancestor(type_node)
+        namespace = type_node.cpp_namespace(ancestor)
+        assert namespace is not None
+        return namespace
 
 
 class SubMessageMethod(ProtoMethod):
@@ -501,7 +507,7 @@
 }
 
 
-def generate_code_for_message(message: ProtoNode, root: ProtoNode,
+def generate_code_for_message(message: ProtoMessage, root: ProtoNode,
                               output: OutputFile) -> None:
     """Creates a C++ class for a protobuf message."""
     assert message.type() == ProtoNode.Type.MESSAGE
@@ -544,7 +550,7 @@
     output.write_line('};')
 
 
-def define_not_in_class_methods(message: ProtoNode, root: ProtoNode,
+def define_not_in_class_methods(message: ProtoMessage, root: ProtoNode,
                                 output: OutputFile) -> None:
     """Defines methods for a message class that were previously declared."""
     assert message.type() == ProtoNode.Type.MESSAGE
@@ -567,7 +573,7 @@
             output.write_line('}')
 
 
-def generate_code_for_enum(enum: ProtoNode, root: ProtoNode,
+def generate_code_for_enum(enum: ProtoEnum, root: ProtoNode,
                            output: OutputFile) -> None:
     """Creates a C++ enum for a proto enum."""
     assert enum.type() == ProtoNode.Type.ENUM
@@ -579,12 +585,9 @@
     output.write_line('};')
 
 
-def forward_declare(node: ProtoNode, root: ProtoNode,
+def forward_declare(node: ProtoMessage, root: ProtoNode,
                     output: OutputFile) -> None:
     """Generates code forward-declaring entities in a message's namespace."""
-    if node.type() != ProtoNode.Type.MESSAGE:
-        return
-
     namespace = node.cpp_namespace(root)
     output.write_line()
     output.write_line(f'namespace {namespace} {{')
@@ -602,7 +605,7 @@
     for child in node.children():
         if child.type() == ProtoNode.Type.ENUM:
             output.write_line()
-            generate_code_for_enum(child, node, output)
+            generate_code_for_enum(cast(ProtoEnum, child), node, output)
 
     output.write_line(f'}}  // namespace {namespace}')
 
@@ -639,25 +642,28 @@
         output.write_line(f'\nnamespace {file_namespace} {{')
 
     for node in package:
-        forward_declare(node, package, output)
+        if node.type() == ProtoNode.Type.MESSAGE:
+            forward_declare(cast(ProtoMessage, node), package, output)
 
     # Define all top-level enums.
     for node in package.children():
         if node.type() == ProtoNode.Type.ENUM:
             output.write_line()
-            generate_code_for_enum(node, package, output)
+            generate_code_for_enum(cast(ProtoEnum, node), package, output)
 
     # Run through all messages in the file, generating a class for each.
     for node in package:
         if node.type() == ProtoNode.Type.MESSAGE:
             output.write_line()
-            generate_code_for_message(node, package, output)
+            generate_code_for_message(cast(ProtoMessage, node), package,
+                                      output)
 
     # Run a second pass through the classes, this time defining all of the
     # methods which were previously only declared.
     for node in package:
         if node.type() == ProtoNode.Type.MESSAGE:
-            define_not_in_class_methods(node, package, output)
+            define_not_in_class_methods(cast(ProtoMessage, node), package,
+                                        output)
 
     if package.cpp_namespace():
         output.write_line(f'\n}}  // namespace {package.cpp_namespace()}')
diff --git a/pw_protobuf/py/pw_protobuf/py.typed b/pw_protobuf/py/pw_protobuf/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_protobuf/py/pw_protobuf/py.typed
diff --git a/pw_protobuf/py/setup.py b/pw_protobuf/py/setup.py
index 7134f1a..96a8209 100644
--- a/pw_protobuf/py/setup.py
+++ b/pw_protobuf/py/setup.py
@@ -13,7 +13,7 @@
 # the License.
 """pw_protobuf"""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='pw_protobuf',
@@ -22,6 +22,8 @@
     author_email='pigweed-developers@googlegroups.com',
     description='Lightweight streaming protobuf implementation',
     packages=setuptools.find_packages(),
+    package_data={'pw_protobuf': ['py.typed']},
+    zip_safe=False,
     entry_points={
         'console_scripts': ['pw_protobuf_codegen = pw_protobuf.plugin:main']
     },
diff --git a/pw_protobuf/size_report/BUILD.gn b/pw_protobuf/size_report/BUILD.gn
index 9bccccd..4906c86 100644
--- a/pw_protobuf/size_report/BUILD.gn
+++ b/pw_protobuf/size_report/BUILD.gn
@@ -12,10 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_bloat/bloat.gni")
+
 _decoder_full = {
   deps = [
     "$dir_pw_bloat:bloat_this_binary",
diff --git a/pw_protobuf_compiler/BUILD.gn b/pw_protobuf_compiler/BUILD.gn
index 4b3c483..58dce83 100644
--- a/pw_protobuf_compiler/BUILD.gn
+++ b/pw_protobuf_compiler/BUILD.gn
@@ -12,38 +12,27 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/input_group.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_protobuf_compiler/proto.gni")
 import("$dir_pw_unit_test/test.gni")
+
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
 }
 
-_compiling_for_nanopb = pw_protobuf_GENERATORS + [ "nanopb" ] - [ "nanopb" ] !=
-                        pw_protobuf_GENERATORS
-
 pw_test_group("tests") {
-  tests = []
-  if (_compiling_for_nanopb) {
-    tests += [ ":nanopb_test" ]
-  }
+  tests = [ ":nanopb_test" ]
 }
 
-if (_compiling_for_nanopb) {
-  pw_test("nanopb_test") {
-    deps = [ ":nanopb_test_protos_nanopb" ]
-    sources = [ "nanopb_test.cc" ]
-  }
+pw_test("nanopb_test") {
+  deps = [ ":nanopb_test_protos.nanopb" ]
+  sources = [ "nanopb_test.cc" ]
+  enable_if = dir_pw_third_party_nanopb != ""
+}
 
-  pw_proto_library("nanopb_test_protos") {
-    sources = [ "pw_protobuf_compiler_protos/nanopb_test.proto" ]
-  }
-} else {
-  pw_input_group("not_needed") {
-    inputs = [ "nanopb_test.cc" ]
-  }
+pw_proto_library("nanopb_test_protos") {
+  sources = [ "pw_protobuf_compiler_protos/nanopb_test.proto" ]
 }
diff --git a/pw_protobuf_compiler/docs.rst b/pw_protobuf_compiler/docs.rst
index 4bb4cdc..3e63881 100644
--- a/pw_protobuf_compiler/docs.rst
+++ b/pw_protobuf_compiler/docs.rst
@@ -1,19 +1,13 @@
-.. default-domain:: py
-
-.. highlight:: py
-
-.. _chapter-pw-protobuf-compiler:
+.. _module-pw_protobuf_compiler:
 
 --------------------
 pw_protobuf_compiler
 --------------------
-
 The Protobuf compiler module provides build system integration and wrapper
 scripts for generating source code for Protobuf definitions.
 
 Generator support
 =================
-
 Protobuf code generation is currently supported for the following generators:
 
 +-------------+----------------+-----------------------------------------------+
@@ -28,35 +22,35 @@
 |             |                | ``dir_pw_third_party_nanopb`` must be set to  |
 |             |                | point to a local nanopb installation.         |
 +-------------+----------------+-----------------------------------------------+
-| Nanopb RPC  | ``nanopb_rpc`` | Compiles pw_rpc service code for a nanopb     |
-|             |                | server. Requires the nanopb generator to be   |
-|             |                | configured as well.                           |
+| Nanopb RPC  | ``nanopb_rpc`` | Compiles pw_rpc service and client code for   |
+|             |                | nanopb. Requires a nanopb installation.       |
 +-------------+----------------+-----------------------------------------------+
-
-The build variable ``pw_protobuf_GENERATORS`` tells the module the generators
-for which it should compile code. It is defined as a list of generator codes.
+| Raw RPC     | ``raw_rpc``    | Compiles raw binary pw_rpc service code.      |
++-------------+----------------+-----------------------------------------------+
 
 GN template
 ===========
-
 The ``pw_proto_library`` GN template is provided by the module.
 
-It tells the build system to compile a set of source proto files to a library in
-each chosen generator. A different target is created for each generator, with
-the generator's code appended as a suffix to the template's target name.
+It defines a collection of protobuf files that should be compiled together. The
+template creates a sub-target for each supported generator, named
+``<target_name>.<generator>``. These sub-targets generate their respective
+protobuf code, and expose it to the build system appropriately (e.g. a
+``pw_source_set`` for C/C++).
 
-For example, given the definitions:
+For example, given the following target:
 
-.. code::
-
-  pw_protobuf_GENERATORS = [ "pwpb", "py" ]
+.. code-block::
 
   pw_proto_library("test_protos") {
     sources = [ "test.proto" ]
   }
 
-Two targets are created, named ``test_protos_pwpb`` and ``test_protos_py``,
-containing the generated code from their respective generators.
+``test_protos.pwpb`` compiles code for pw_protobuf, and ``test_protos.nanopb``
+compiles using Nanopb (if it's installed).
+
+Protobuf code is only generated when a generator sub-target is listed as a
+dependency of another GN target.
 
 **Arguments**
 
@@ -78,11 +72,11 @@
 
   pw_proto_library("my_other_protos") {
     sources = [
-      "baz.proto", # imports foo.proto
+      "baz.proto",  # imports foo.proto
     ]
-    deps = [
-      ":my_protos",
-    ]
+
+    # Proto libraries depend on other proto libraries directly.
+    deps = [ ":my_protos" ]
   }
 
   source_set("my_cc_code") {
@@ -91,7 +85,7 @@
       "bar.cc",
       "baz.cc",
     ]
-    deps = [
-      ":my_other_protos_cc",
-    ]
+
+    # When depending on protos in a source_set, specify the generator suffix.
+    deps = [ ":my_other_protos.pwpb" ]
   }
diff --git a/pw_protobuf_compiler/proto.cmake b/pw_protobuf_compiler/proto.cmake
new file mode 100644
index 0000000..bd8584e
--- /dev/null
+++ b/pw_protobuf_compiler/proto.cmake
@@ -0,0 +1,207 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+include_guard(GLOBAL)
+
+# Declares a protocol buffers library. This function creates a library for each
+# supported protocol buffer implementation:
+#
+#   ${NAME}.pwpb - pw_protobuf generated code
+#   ${NAME}.nanopb - Nanopb generated code (requires Nanopb)
+#
+# This function also creates libraries for generating pw_rpc code:
+#
+#   ${NAME}.nanopb_rpc - generates Nanopb pw_rpc code
+#   ${NAME}.raw_rpc - generates raw pw_rpc (no protobuf library) code
+#   ${NAME}.pwpb_rpc - (Not implemented) generates pw_protobuf pw_rpc code
+#
+# Args:
+#
+#   NAME - the base name of the libraries to create
+#   SOURCES - .proto source files
+#   DEPS - dependencies on other pw_proto_library targets
+#
+function(pw_proto_library NAME)
+  cmake_parse_arguments(PARSE_ARGV 1 arg "" "" "SOURCES;DEPS")
+
+  set(out_dir "${CMAKE_CURRENT_BINARY_DIR}/protos")
+
+  # Use INTERFACE libraries to track the proto include paths that are passed to
+  # protoc.
+  set(include_deps "${arg_DEPS}")
+  list(TRANSFORM include_deps APPEND ._includes)
+
+  add_library("${NAME}._includes" INTERFACE)
+  target_include_directories("${NAME}._includes" INTERFACE ".")
+  target_link_libraries("${NAME}._includes" INTERFACE ${include_deps})
+
+  # Generate a file with all include paths needed by protoc.
+  set(include_file "${out_dir}/${NAME}.include_paths.txt")
+  file(GENERATE OUTPUT "${include_file}"
+     CONTENT
+       "$<TARGET_PROPERTY:${NAME}._includes,INTERFACE_INCLUDE_DIRECTORIES>")
+
+  # Create a protobuf target for each supported protobuf library.
+  _pw_pwpb_library(
+      "${NAME}" "${arg_SOURCES}" "${arg_DEPS}" "${include_file}" "${out_dir}")
+  _pw_raw_rpc_library(
+      "${NAME}" "${arg_SOURCES}" "${arg_DEPS}" "${include_file}" "${out_dir}")
+  _pw_nanopb_library(
+      "${NAME}" "${arg_SOURCES}" "${arg_DEPS}" "${include_file}" "${out_dir}")
+  _pw_nanopb_rpc_library(
+      "${NAME}" "${arg_SOURCES}" "${arg_DEPS}" "${include_file}" "${out_dir}")
+endfunction(pw_proto_library)
+
+# Internal function that invokes protoc through generate_protos.py.
+function(_pw_generate_protos
+      TARGET LANGUAGE PLUGIN OUTPUT_EXTS INCLUDE_FILE OUT_DIR SOURCES DEPS)
+  # Determine the names of the output files.
+  foreach(extension IN LISTS OUTPUT_EXTS)
+    foreach(source_file IN LISTS SOURCES)
+      get_filename_component(dir "${source_file}" DIRECTORY)
+      get_filename_component(name "${source_file}" NAME_WE)
+      list(APPEND outputs "${OUT_DIR}/${dir}/${name}${extension}")
+    endforeach()
+  endforeach()
+
+  # Export the output files to the caller's scope so it can use them if needed.
+  set(generated_outputs "${outputs}" PARENT_SCOPE)
+
+  if("${CMAKE_HOST_SYSTEM_NAME}" STREQUAL "Windows")
+      get_filename_component(dir "${source_file}" DIRECTORY)
+      get_filename_component(name "${source_file}" NAME_WE)
+      set(PLUGIN "${dir}/${name}.bat")
+  endif()
+
+  set(script "$ENV{PW_ROOT}/pw_protobuf_compiler/py/pw_protobuf_compiler/generate_protos.py")
+  add_custom_command(
+    COMMAND
+      python
+      "${script}"
+      --language "${LANGUAGE}"
+      --plugin-path "${PLUGIN}"
+      --module-path "${CMAKE_CURRENT_SOURCE_DIR}"
+      --include-file "${INCLUDE_FILE}"
+      --out-dir "${OUT_DIR}"
+      ${ARGN}
+      ${SOURCES}
+    DEPENDS
+      ${SOURCES}
+      ${script}
+      ${DEPS}
+    OUTPUT
+      ${outputs}
+  )
+  add_custom_target("${TARGET}" DEPENDS ${outputs})
+endfunction(_pw_generate_protos)
+
+# Internal function that creates a pwpb proto library.
+function(_pw_pwpb_library NAME SOURCES DEPS INCLUDE_FILE OUT_DIR)
+  list(TRANSFORM DEPS APPEND .pwpb)
+
+  _pw_generate_protos("${NAME}.generate.pwpb"
+      pwpb
+      "$ENV{PW_ROOT}/pw_protobuf/py/pw_protobuf/plugin.py"
+      ".pwpb.h"
+      "${INCLUDE_FILE}"
+      "${OUT_DIR}"
+      "${SOURCES}"
+      "${DEPS}"
+  )
+
+  # Create the library with the generated source files.
+  add_library("${NAME}.pwpb" INTERFACE)
+  target_include_directories("${NAME}.pwpb" INTERFACE "${OUT_DIR}")
+  target_link_libraries("${NAME}.pwpb" INTERFACE pw_protobuf ${DEPS})
+  add_dependencies("${NAME}.pwpb" "${NAME}.generate.pwpb")
+endfunction(_pw_pwpb_library)
+
+# Internal function that creates a raw_rpc proto library.
+function(_pw_raw_rpc_library NAME SOURCES DEPS INCLUDE_FILE OUT_DIR)
+  list(TRANSFORM DEPS APPEND .raw_rpc)
+
+  _pw_generate_protos("${NAME}.generate.raw_rpc"
+      raw_rpc
+      "$ENV{PW_ROOT}/pw_rpc/py/pw_rpc/plugin_raw.py"
+      ".raw_rpc.pb.h"
+      "${INCLUDE_FILE}"
+      "${OUT_DIR}"
+      "${SOURCES}"
+      "${DEPS}"
+  )
+
+  # Create the library with the generated source files.
+  add_library("${NAME}.raw_rpc" INTERFACE)
+  target_include_directories("${NAME}.raw_rpc" INTERFACE "${OUT_DIR}")
+  target_link_libraries("${NAME}.raw_rpc"
+    INTERFACE
+      pw_rpc.raw
+      pw_rpc.server
+      ${DEPS}
+  )
+  add_dependencies("${NAME}.raw_rpc" "${NAME}.generate.raw_rpc")
+endfunction(_pw_raw_rpc_library)
+
+# Internal function that creates a nanopb proto library.
+function(_pw_nanopb_library NAME SOURCES DEPS INCLUDE_FILE OUT_DIR)
+  list(TRANSFORM DEPS APPEND .nanopb)
+
+  set(nanopb_dir "$<TARGET_PROPERTY:$<IF:$<TARGET_EXISTS:protobuf-nanopb-static>,protobuf-nanopb-static,pw_build.empty>,SOURCE_DIR>")
+  set(nanopb_plugin
+      "$<IF:$<TARGET_EXISTS:protobuf-nanopb-static>,${nanopb_dir}/generator/protoc-gen-nanopb,COULD_NOT_FIND_protobuf-nanopb-static_TARGET_PLEASE_SET_UP_NANOPB>")
+
+  _pw_generate_protos("${NAME}.generate.nanopb"
+      nanopb
+      "${nanopb_plugin}"
+      ".pb.h;.pb.c"
+      "${INCLUDE_FILE}"
+      "${OUT_DIR}"
+      "${SOURCES}"
+      "${DEPS}"
+      --include-paths "${nanopb_dir}/generator/proto"
+  )
+
+  # Create the library with the generated source files.
+  add_library("${NAME}.nanopb" EXCLUDE_FROM_ALL ${generated_outputs})
+  target_include_directories("${NAME}.nanopb" PUBLIC "${OUT_DIR}")
+  target_link_libraries("${NAME}.nanopb" PUBLIC pw_third_party.nanopb ${DEPS})
+  add_dependencies("${NAME}.nanopb" "${NAME}.generate.nanopb")
+endfunction(_pw_nanopb_library)
+
+# Internal function that creates a nanopb_rpc library.
+function(_pw_nanopb_rpc_library NAME SOURCES DEPS INCLUDE_FILE OUT_DIR)
+  # Determine the names of the output files.
+  list(TRANSFORM DEPS APPEND .nanopb_rpc)
+
+  _pw_generate_protos("${NAME}.generate.nanopb_rpc"
+      nanopb_rpc
+      "$ENV{PW_ROOT}/pw_rpc/py/pw_rpc/plugin_nanopb.py"
+      ".rpc.pb.h"
+      "${INCLUDE_FILE}"
+      "${OUT_DIR}"
+      "${SOURCES}"
+      "${DEPS}"
+  )
+
+  # Create the library with the generated source files.
+  add_library("${NAME}.nanopb_rpc" INTERFACE)
+  target_include_directories("${NAME}.nanopb_rpc" INTERFACE "${OUT_DIR}")
+  target_link_libraries("${NAME}.nanopb_rpc"
+    INTERFACE
+      "${NAME}.nanopb"
+      pw_rpc.nanopb.method_union
+      pw_rpc.server
+      ${DEPS}
+  )
+  add_dependencies("${NAME}.nanopb_rpc" "${NAME}.generate.nanopb_rpc")
+endfunction(_pw_nanopb_rpc_library)
diff --git a/pw_protobuf_compiler/proto.gni b/pw_protobuf_compiler/proto.gni
index d2652b5..f7b1a28 100644
--- a/pw_protobuf_compiler/proto.gni
+++ b/pw_protobuf_compiler/proto.gni
@@ -12,87 +12,100 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
+import("$dir_pw_build/error.gni")
 import("$dir_pw_build/input_group.gni")
-import("$dir_pw_build/python_script.gni")
+import("$dir_pw_build/python_action.gni")
 import("$dir_pw_build/target_types.gni")
-import("nanopb.gni")
-declare_args() {
-  # Generators with which to compile protobuf code. These are used by the
-  # pw_proto_library template to determine which build targets to create.
-  #
-  # Supported generators:
-  #   "pwpb", "nanopb", "nanopb_rpc", "go"
-  pw_protobuf_GENERATORS = [
-    "pwpb",
-    "go",
-  ]
-}
+import("$dir_pw_third_party/nanopb/nanopb.gni")
 
-# Python script that invokes protoc.
-_gen_script_path =
-    "$dir_pw_protobuf_compiler/py/pw_protobuf_compiler/generate_protos.py"
-
+# Variables forwarded from the public pw_proto_library template to the final
+# pw_source_set.
 _forwarded_vars = [
   "testonly",
   "visibility",
 ]
 
+# Internal template that invokes protoc with a pw_python_action. This should not
+# be used outside of this file; use pw_proto_library instead.
+#
+# This creates the internal GN target $target_name.$language._gen that compiles
+# proto files with protoc.
+template("_pw_invoke_protoc") {
+  _output = rebase_path(get_target_outputs(":${invoker.base_target}._metadata"))
+
+  pw_python_action("$target_name._gen") {
+    forward_variables_from(invoker, [ "metadata" ])
+    script =
+        "$dir_pw_protobuf_compiler/py/pw_protobuf_compiler/generate_protos.py"
+
+    deps = [
+             ":${invoker.base_target}._metadata",
+             ":${invoker.base_target}._inputs",
+           ] + invoker.deps
+
+    args = [
+             "--language",
+             invoker.language,
+             "--module-path",
+             rebase_path("."),
+             "--include-file",
+             _output[0],
+             "--out-dir",
+             rebase_path(invoker.gen_dir),
+           ] + rebase_path(invoker.sources)
+
+    inputs = invoker.sources
+
+    if (defined(invoker.plugin)) {
+      inputs += [ invoker.plugin ]
+      args += [ "--plugin-path=" + rebase_path(invoker.plugin) ]
+    }
+
+    if (defined(invoker.include_paths)) {
+      args += [
+        "--include-paths",
+        string_join(";", rebase_path(invoker.include_paths)),
+      ]
+    }
+
+    outputs = []
+    foreach(extension, invoker.output_extensions) {
+      foreach(proto,
+              rebase_path(invoker.sources, get_path_info(".", "abspath"))) {
+        _output = string_replace(proto, ".proto", extension)
+        outputs += [ "${invoker.gen_dir}/$_output" ]
+      }
+    }
+
+    if (outputs == []) {
+      stamp = true
+    }
+
+    visibility = [ ":*" ]
+  }
+}
+
 # Generates pw_protobuf C++ code for proto files, creating a source_set of the
 # generated files. This is internal and should not be used outside of this file.
 # Use pw_proto_library instead.
-#
-# Args:
-#  protos: List of input .proto files.
 template("_pw_pwpb_proto_library") {
-  _proto_gen_dir = "$root_gen_dir/protos"
-  _module_path = get_path_info(".", "abspath")
-  _relative_proto_paths = rebase_path(invoker.protos, _module_path)
-
-  _outputs = []
-  foreach(_proto, _relative_proto_paths) {
-    _output = string_replace(_proto, ".proto", ".pwpb.h")
-    _outputs += [ "$_proto_gen_dir/$_output" ]
-  }
-
-  _gen_target = "${target_name}_gen"
-  pw_python_script(_gen_target) {
-    forward_variables_from(invoker, _forwarded_vars)
-    script = _gen_script_path
-    args = [
-             "--language",
-             "cc",
-             "--module-path",
-             rebase_path(_module_path),
-             "--include-file",
-             rebase_path(invoker.include_file),
-             "--out-dir",
-             rebase_path(_proto_gen_dir),
-           ] + rebase_path(invoker.protos)
-    inputs = invoker.protos
-    outputs = _outputs
-    deps = invoker.deps
-    if (defined(invoker.protoc_deps)) {
-      deps += invoker.protoc_deps
-    }
-  }
-
-  # For C++ proto files, the generated proto directory is added as an include
-  # path for the code. This requires using "all_dependent_configs" to force the
-  # include on any code that transitively depends on the generated protos.
-  _include_config_target = "${target_name}_includes"
-  config(_include_config_target) {
-    include_dirs = [ "$_proto_gen_dir" ]
+  _pw_invoke_protoc(target_name) {
+    forward_variables_from(invoker, "*", _forwarded_vars)
+    language = "pwpb"
+    plugin = "$dir_pw_protobuf/py/pw_protobuf/plugin.py"
+    deps += [ "$dir_pw_protobuf/py" ]
+    output_extensions = [ ".pwpb.h" ]
   }
 
   # Create a library with the generated source files.
   pw_source_set(target_name) {
-    all_dependent_configs = [ ":$_include_config_target" ]
-    deps = [ ":$_gen_target" ]
-    public_deps = [ dir_pw_protobuf ] + invoker.gen_deps
-    sources = get_target_outputs(":$_gen_target")
+    forward_variables_from(invoker, _forwarded_vars)
+    public_configs = [ ":${invoker.base_target}._include_path" ]
+    deps = [ ":$target_name._gen" ]
+    public_deps = [ dir_pw_protobuf ] + invoker.deps
+    sources = get_target_outputs(":$target_name._gen")
     public = filter_include(sources, [ "*.pwpb.h" ])
   }
 }
@@ -100,233 +113,146 @@
 # Generates nanopb RPC code for proto files, creating a source_set of the
 # generated files. This is internal and should not be used outside of this file.
 # Use pw_proto_library instead.
-#
-# Args:
-#  protos: List of input .proto files.
-#
 template("_pw_nanopb_rpc_proto_library") {
-  assert(defined(dir_pw_third_party_nanopb) && dir_pw_third_party_nanopb != "",
-         "\$dir_pw_third_party_nanopb must be set to compile nanopb protobufs")
-
-  _proto_gen_dir = "$root_gen_dir/protos"
-  _module_path = get_path_info(".", "abspath")
-  _relative_proto_paths = rebase_path(invoker.protos, _module_path)
-
-  _outputs = []
-  foreach(_proto, _relative_proto_paths) {
-    _output_h = string_replace(_proto, ".proto", ".rpc.pb.h")
-    _outputs += [ "$_proto_gen_dir/$_output_h" ]
-  }
-
   # Create a target which runs protoc configured with the nanopb_rpc plugin to
   # generate the C++ proto RPC headers.
-  _gen_target = "${target_name}_gen"
-  pw_python_script(_gen_target) {
-    forward_variables_from(invoker, _forwarded_vars)
-    script = _gen_script_path
-    args = [
-             "--language",
-             "nanopb_rpc",
-             "--module-path",
-             rebase_path(_module_path),
-             "--include-paths",
-             rebase_path("$dir_pw_third_party_nanopb/generator/proto"),
-             "--include-file",
-             rebase_path(invoker.include_file),
-             "--out-dir",
-             rebase_path(_proto_gen_dir),
-           ] + rebase_path(invoker.protos)
-    inputs = invoker.protos
-    outputs = _outputs
-
-    deps = invoker.deps
-    if (defined(invoker.protoc_deps)) {
-      deps += invoker.protoc_deps
-    }
-  }
-
-  # For C++ proto files, the generated proto directory is added as an include
-  # path for the code. This requires using "all_dependent_configs" to force the
-  # include on any code that transitively depends on the generated protos.
-  _include_root = rebase_path(get_path_info(".", "abspath"), "//")
-  _include_config_target = "${target_name}_includes"
-  config(_include_config_target) {
-    include_dirs = [
-      "$_proto_gen_dir",
-      "$_proto_gen_dir/$_include_root",
-    ]
+  _pw_invoke_protoc(target_name) {
+    forward_variables_from(invoker, "*", _forwarded_vars)
+    language = "nanopb_rpc"
+    plugin = "$dir_pw_rpc/py/pw_rpc/plugin_nanopb.py"
+    deps += [ "$dir_pw_rpc/py" ]
+    include_paths = [ "$dir_pw_third_party_nanopb/generator/proto" ]
+    output_extensions = [ ".rpc.pb.h" ]
   }
 
   # Create a library with the generated source files.
   pw_source_set(target_name) {
-    all_dependent_configs = [ ":$_include_config_target" ]
-    deps = [ ":$_gen_target" ]
+    forward_variables_from(invoker, _forwarded_vars)
+    public_configs = [ ":${invoker.base_target}._include_path" ]
+    deps = [ ":$target_name._gen" ]
     public_deps = [
-                    dir_pw_third_party_nanopb,
-                    "$dir_pw_rpc:nanopb_server",
-                  ] + invoker.gen_deps
-    public = get_target_outputs(":$_gen_target")
+                    ":${invoker.base_target}.nanopb",
+                    "$dir_pw_rpc:server",
+                    "$dir_pw_rpc/nanopb:method_union",
+                    "$dir_pw_third_party/nanopb",
+                  ] + invoker.deps
+    public = get_target_outputs(":$target_name._gen")
   }
 }
 
 # Generates nanopb code for proto files, creating a source_set of the generated
 # files. This is internal and should not be used outside of this file. Use
 # pw_proto_library instead.
-#
-# Args:
-#  protos: List of input .proto files.
 template("_pw_nanopb_proto_library") {
-  assert(defined(dir_pw_third_party_nanopb) && dir_pw_third_party_nanopb != "",
-         "\$dir_pw_third_party_nanopb must be set to compile nanopb protobufs")
-
-  _proto_gen_dir = "$root_gen_dir/protos"
-  _module_path = get_path_info(".", "abspath")
-  _relative_proto_paths = rebase_path(invoker.protos, _module_path)
-
-  _outputs = []
-  foreach(_proto, _relative_proto_paths) {
-    _output_h = string_replace(_proto, ".proto", ".pb.h")
-    _output_c = string_replace(_proto, ".proto", ".pb.c")
-    _outputs += [
-      "$_proto_gen_dir/$_output_h",
-      "$_proto_gen_dir/$_output_c",
-    ]
-  }
-
-  _nanopb_plugin = "$dir_pw_third_party_nanopb/generator/protoc-gen-nanopb"
-  if (host_os == "win") {
-    _nanopb_plugin += ".bat"
-  }
-
   # Create a target which runs protoc configured with the nanopb plugin to
   # generate the C proto sources.
-  _gen_target = "${target_name}_gen"
-  pw_python_script(_gen_target) {
-    forward_variables_from(invoker, _forwarded_vars)
-    script = _gen_script_path
-    args = [
-             "--language",
-             "nanopb",
-             "--module-path",
-             rebase_path(_module_path),
-             "--include-paths",
-             rebase_path("$dir_pw_third_party_nanopb/generator/proto"),
-             "--include-file",
-             rebase_path(invoker.include_file),
-             "--out-dir",
-             rebase_path(_proto_gen_dir),
-             "--custom-plugin",
-             rebase_path(_nanopb_plugin),
-           ] + rebase_path(invoker.protos)
-
-    inputs = invoker.protos
-    outputs = _outputs
-
-    deps = invoker.deps
-    if (defined(invoker.protoc_deps)) {
-      deps += invoker.protoc_deps
-    }
-  }
-
-  # For C++ proto files, the generated proto directory is added as an include
-  # path for the code. This requires using "all_dependent_configs" to force the
-  # include on any code that transitively depends on the generated protos.
-  _include_root = rebase_path(get_path_info(".", "abspath"), "//")
-  _include_config_target = "${target_name}_includes"
-  config(_include_config_target) {
-    include_dirs = [
-      "$_proto_gen_dir",
-      "$_proto_gen_dir/$_include_root",
+  _pw_invoke_protoc(target_name) {
+    forward_variables_from(invoker, "*", _forwarded_vars)
+    language = "nanopb"
+    plugin = "$dir_pw_third_party_nanopb/generator/protoc-gen-nanopb"
+    include_paths = [ "$dir_pw_third_party_nanopb/generator/proto" ]
+    output_extensions = [
+      ".pb.h",
+      ".pb.c",
     ]
   }
 
   # Create a library with the generated source files.
   pw_source_set(target_name) {
-    all_dependent_configs = [ ":$_include_config_target" ]
-    deps = [ ":$_gen_target" ]
-    public_deps = [ dir_pw_third_party_nanopb ] + invoker.gen_deps
-    sources = get_target_outputs(":$_gen_target")
+    forward_variables_from(invoker, _forwarded_vars)
+    public_configs = [ ":${invoker.base_target}._include_path" ]
+    deps = [ ":$target_name._gen" ]
+    public_deps = [ "$dir_pw_third_party/nanopb" ] + invoker.deps
+    sources = get_target_outputs(":$target_name._gen")
     public = filter_include(sources, [ "*.pb.h" ])
   }
 }
 
+# Generates raw RPC code for proto files, creating a source_set of the generated
+# files. This is internal and should not be used outside of this file. Use
+# pw_proto_library instead.
+template("_pw_raw_rpc_proto_library") {
+  # Create a target which runs protoc configured with the nanopb_rpc plugin to
+  # generate the C++ proto RPC headers.
+  _pw_invoke_protoc(target_name) {
+    forward_variables_from(invoker, "*", _forwarded_vars)
+    language = "raw_rpc"
+    plugin = "$dir_pw_rpc/py/pw_rpc/plugin_raw.py"
+    deps += [ "$dir_pw_rpc/py" ]
+    output_extensions = [ ".raw_rpc.pb.h" ]
+  }
+
+  # Create a library with the generated source files.
+  pw_source_set(target_name) {
+    forward_variables_from(invoker, _forwarded_vars)
+    public_configs = [ ":${invoker.base_target}._include_path" ]
+    deps = [ ":$target_name._gen" ]
+    public_deps = [
+                    "$dir_pw_rpc:server",
+                    "$dir_pw_rpc/raw:method_union",
+                  ] + invoker.deps
+    public = get_target_outputs(":$target_name._gen")
+  }
+}
+
 # Generates Go code for proto files, listing the proto output directory in the
 # metadata variable GOPATH. Internal use only.
-#
-# Args:
-#  protos: List of input .proto files.
 template("_pw_go_proto_library") {
   _proto_gopath = "$root_gen_dir/go"
-  _proto_gen_dir = "$_proto_gopath/src"
-  _rebased_gopath = rebase_path(_proto_gopath)
 
-  pw_python_script(target_name) {
-    forward_variables_from(invoker, _forwarded_vars)
+  _pw_invoke_protoc(target_name) {
+    forward_variables_from(invoker, "*")
+    language = "go"
     metadata = {
-      gopath = [ "GOPATH+=$_rebased_gopath" ]
+      gopath = [ "GOPATH+=" + rebase_path(_proto_gopath) ]
       external_deps = [
         "github.com/golang/protobuf/proto",
         "google.golang.org/grpc",
       ]
     }
-    script = _gen_script_path
-    args = [
-             "--language",
-             "go",
-             "--module-path",
-             rebase_path("//"),
-             "--include-file",
-             rebase_path(invoker.include_file),
-             "--out-dir",
-             rebase_path(_proto_gen_dir),
-           ] + rebase_path(invoker.protos)
-    inputs = invoker.protos
-    deps = invoker.deps + invoker.gen_deps
-    stamp = true
+    output_extensions = []  # Don't enumerate the generated .go files.
+    gen_dir = "$_proto_gopath/src"
+  }
+
+  group(target_name) {
+    deps = [ ":$target_name._gen" ]
   }
 }
 
 # Generates protobuf code from .proto definitions for various languages.
+# For each supported generator, creates a sub-target named:
 #
-# The generators to use are defined in the pw_protobuf_GENERATORS build
-# variable. Each listed generator creates a generated code target called
-#
-#   <target_name>_<generator>
-#
-# For example, with the following definitions:
-#
-#   pw_protobuf_GENERATORS = [ "pwpb", "py" ]
-#
-#   pw_proto_library("my_protos") {
-#     sources = [ "foo.proto" ]
-#   }
-#
-# Two build targets will be created for the declared "my_protos" target.
-#
-#   "my_protos_pwpb"  <-- C++ source_set containing generated proto code
-#   "my_protos_py"    <-- Python module containing generated proto code
+#   <target_name>.<generator>
 #
 # Args:
 #  sources: List of input .proto files.
 #  deps: List of other pw_proto_library dependencies.
 #  inputs: Other files on which the protos depend (e.g. nanopb .options files).
+#
 template("pw_proto_library") {
   assert(defined(invoker.sources) && invoker.sources != [],
          "pw_proto_library requires .proto source files")
 
+  _common = {
+    base_target = target_name
+    gen_dir = "$target_gen_dir/protos"
+    sources = invoker.sources
+  }
+
+  if (defined(invoker.deps)) {
+    _deps = invoker.deps
+  } else {
+    _deps = []
+  }
+
   # For each proto target, create a file which collects the base directories of
   # all of its dependencies to list as include paths to protoc.
-  _include_metadata_target = "${target_name}_include_paths"
-  _include_metadata_file = "${target_gen_dir}/${target_name}_includes.txt"
-  generated_file(_include_metadata_target) {
-    if (defined(invoker.deps)) {
-      # Collect metadata from the include path files of each dependency.
-      deps = process_file_template(invoker.deps, "{{source}}_include_paths")
-    } else {
-      deps = []
-    }
+  generated_file("$target_name._metadata") {
+    # Collect metadata from the include path files of each dependency.
+    deps = process_file_template(_deps, "{{source}}._metadata")
+
     data_keys = [ "protoc_includes" ]
-    outputs = [ _include_metadata_file ]
+    outputs = [ "$target_gen_dir/${target_name}_includes.txt" ]
 
     # Indicate this library's base directory for its dependents.
     metadata = {
@@ -334,130 +260,80 @@
     }
   }
 
-  _deps = [ ":$_include_metadata_target" ]
-
+  # Toss any additional inputs into an input group dependency.
   if (defined(invoker.inputs)) {
-    # Toss any additional inputs into an input group dependency.
-    _input_target_name = "${target_name}_inputs"
-    pw_input_group(_input_target_name) {
+    pw_input_group("$target_name._inputs") {
       inputs = invoker.inputs
+      visibility = [ ":*" ]
     }
-    _deps += [ ":$_input_target_name" ]
-  }
-
-  # If the nanopb_rpc generator is selected, make sure that nanopb is also
-  # selected.
-  has_nanopb_rpc = pw_protobuf_GENERATORS + [ "nanopb_rpc" ] -
-                   [ "nanopb_rpc" ] != pw_protobuf_GENERATORS
-  if (has_nanopb_rpc) {
-    _generators =
-        pw_protobuf_GENERATORS + [ "nanopb" ] - [ "nanopb" ] + [ "nanopb" ]
   } else {
-    _generators = pw_protobuf_GENERATORS
+    group("$target_name._inputs") {
+      visibility = [ ":*" ]
+    }
   }
 
-  foreach(_gen, _generators) {
-    _lang_target = "${target_name}_${_gen}"
-    _gen_deps = []
+  # Create a config with the generated proto directory, which is used for C++.
+  config("$target_name._include_path") {
+    include_dirs = [ _common.gen_dir ]
+    visibility = [ ":*" ]
+  }
 
-    if (_gen == "nanopb_rpc") {
-      # Generated RPC code depends on the library's core protos.
-      _gen_deps += [ ":${target_name}_nanopb" ]
+  # Enumerate all of the protobuf generator targets.
+
+  _pw_pwpb_proto_library("$target_name.pwpb") {
+    forward_variables_from(invoker, _forwarded_vars)
+    forward_variables_from(_common, "*")
+    deps = process_file_template(_deps, "{{source}}.pwpb")
+  }
+
+  if (dir_pw_third_party_nanopb != "") {
+    _pw_nanopb_rpc_proto_library("$target_name.nanopb_rpc") {
+      forward_variables_from(invoker, _forwarded_vars)
+      forward_variables_from(_common, "*")
+      deps = process_file_template(_deps, "{{source}}.nanopb_rpc")
     }
 
-    if (defined(invoker.deps)) {
-      _gen_deps += process_file_template(invoker.deps, "{{source}}_${_gen}")
-
-      if (_gen == "nanopb_rpc") {
-        # RPC dependencies also depend on their core generated protos.
-        _gen_deps += process_file_template(invoker.deps, "{{source}}_nanopb")
-      }
+    _pw_nanopb_proto_library("$target_name.nanopb") {
+      forward_variables_from(invoker, _forwarded_vars)
+      forward_variables_from(_common, "*")
+      deps = process_file_template(_deps, "{{source}}.nanopb")
+    }
+  } else {
+    pw_error("$target_name.nanopb_rpc") {
+      message =
+          "\$dir_pw_third_party_nanopb must be set to generate nanopb RPC code."
     }
 
-    if (_gen == "pwpb") {
-      _pw_pwpb_proto_library(_lang_target) {
-        forward_variables_from(invoker, _forwarded_vars)
-        protos = invoker.sources
-        deps = _deps
-        include_file = _include_metadata_file
-        gen_deps = _gen_deps
-
-        # List the pw_protobuf plugin's files as a dependency to recompile
-        # generated code if they are modified.
-        protoc_deps = [ "$dir_pw_protobuf:codegen_protoc_plugin" ]
-      }
-    } else if (_gen == "nanopb_rpc") {
-      _pw_nanopb_rpc_proto_library(_lang_target) {
-        forward_variables_from(invoker, _forwarded_vars)
-        protos = invoker.sources
-        deps = _deps
-        include_file = _include_metadata_file
-        gen_deps = _gen_deps
-
-        # List the pw_protobuf plugin's files as a dependency to recompile
-        # generated code if they are modified.
-        protoc_deps = [ "$dir_pw_rpc:nanopb_protoc_plugin" ]
-      }
-    } else if (_gen == "nanopb") {
-      _pw_nanopb_proto_library(_lang_target) {
-        forward_variables_from(invoker, _forwarded_vars)
-        protos = invoker.sources
-        deps = _deps
-        include_file = _include_metadata_file
-        gen_deps = _gen_deps
-      }
-    } else if (_gen == "go") {
-      _pw_go_proto_library(_lang_target) {
-        forward_variables_from(invoker, _forwarded_vars)
-        protos = invoker.sources
-        deps = _deps
-        include_file = _include_metadata_file
-        gen_deps = _gen_deps
-      }
-    } else {
-      assert(false,
-             string_join(
-                 " ",
-                 [
-                   "pw_proto_library doesn't know how to generate code for",
-                   "generator '$_gen'. Please add support if you require it.",
-                 ]))
+    pw_error("$target_name.nanopb") {
+      message =
+          "\$dir_pw_third_party_nanopb must be set to compile nanopb protobufs."
     }
   }
 
+  _pw_raw_rpc_proto_library("$target_name.raw_rpc") {
+    forward_variables_from(invoker, _forwarded_vars)
+    forward_variables_from(_common, "*", [ "deps" ])
+    deps = process_file_template(_deps, "{{source}}.raw_rpc")
+  }
+
+  _pw_go_proto_library("$target_name.go") {
+    sources = invoker.sources
+    deps = process_file_template(_deps, "{{source}}.go")
+    base_target = _common.base_target
+  }
+
   # All supported pw_protobuf generators.
   _protobuf_generators = [
     "pwpb",
     "nanopb",
     "nanopb_rpc",
+    "raw_rpc",
     "go",
   ]
 
-  # Create stub versions of the proto library for other protobuf generators.
-  foreach(_gen, _protobuf_generators - _generators) {
-    pw_python_script("${target_name}_${_gen}") {
-      forward_variables_from(invoker, _forwarded_vars)
-      script = string_join("/",
-                           [
-                             dir_pw_protobuf_compiler,
-                             "py",
-                             "pw_protobuf_compiler",
-                             "generator_not_selected.py",
-                           ])
-      args = [
-        "--library",
-        "${target_name}_${_gen}",
-        "--generator",
-        _gen,
-      ]
-      inputs = invoker.sources
-      stamp = true
-    }
-  }
-
   # If the user attempts to use the target directly instead of one of the
   # generator targets, run a script which prints a nice error message.
-  pw_python_script(target_name) {
+  pw_python_action(target_name) {
     script = string_join("/",
                          [
                            dir_pw_protobuf_compiler,
@@ -472,7 +348,7 @@
              get_path_info(".", "abspath"),
              "--root",
              "//",
-           ] + pw_protobuf_GENERATORS
+           ] + _protobuf_generators
     stamp = true
   }
 }
diff --git a/pw_rpc/test_impl/BUILD.gn b/pw_protobuf_compiler/py/BUILD.gn
similarity index 62%
copy from pw_rpc/test_impl/BUILD.gn
copy to pw_protobuf_compiler/py/BUILD.gn
index 68f88c1..3c5f663 100644
--- a/pw_rpc/test_impl/BUILD.gn
+++ b/pw_protobuf_compiler/py/BUILD.gn
@@ -12,19 +12,18 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
-import("$dir_pw_build/target_types.gni")
-import("$dir_pw_unit_test/test.gni")
-config("config") {
-  include_dirs = [ "public_overrides" ]
-  visibility = [ ":*" ]
-}
+import("$dir_pw_build/python.gni")
 
-pw_source_set("test_impl") {
-  public_configs = [ ":config" ]
-  public = [ "public_overrides/pw_rpc/internal/method.h" ]
-  public_deps = [ "../:server_library_deps" ]
-  visibility = [ "..:*" ]
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_protobuf_compiler/__init__.py",
+    "pw_protobuf_compiler/generate_protos.py",
+    "pw_protobuf_compiler/proto_target_invalid.py",
+    "pw_protobuf_compiler/python_protos.py",
+  ]
+  tests = [ "python_protos_test.py" ]
+  python_deps = [ "$dir_pw_cli/py" ]
 }
diff --git a/pw_protobuf_compiler/py/pw_protobuf_compiler/__init__.py b/pw_protobuf_compiler/py/pw_protobuf_compiler/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_protobuf_compiler/py/pw_protobuf_compiler/__init__.py
diff --git a/pw_protobuf_compiler/py/pw_protobuf_compiler/generate_protos.py b/pw_protobuf_compiler/py/pw_protobuf_compiler/generate_protos.py
index 79c369a..7ee524f 100644
--- a/pw_protobuf_compiler/py/pw_protobuf_compiler/generate_protos.py
+++ b/pw_protobuf_compiler/py/pw_protobuf_compiler/generate_protos.py
@@ -16,8 +16,9 @@
 import argparse
 import logging
 import os
-import shutil
+from pathlib import Path
 import sys
+import tempfile
 
 from typing import Callable, Dict, List, Optional
 
@@ -35,8 +36,13 @@
     if parser is None:
         parser = argparse.ArgumentParser(description=__doc__)
 
-    parser.add_argument('--language', default='cc', help='Output language')
-    parser.add_argument('--custom-plugin', help='Custom protoc plugin')
+    parser.add_argument('--language',
+                        required=True,
+                        choices=DEFAULT_PROTOC_ARGS,
+                        help='Output language')
+    parser.add_argument('--plugin-path',
+                        type=Path,
+                        help='Path to the protoc plugin')
     parser.add_argument('--module-path',
                         required=True,
                         help='Path to the module containing the .proto files')
@@ -60,8 +66,10 @@
 
 def protoc_cc_args(args: argparse.Namespace) -> List[str]:
     return [
-        '--plugin', f'protoc-gen-custom={shutil.which("pw_protobuf_codegen")}',
-        '--custom_out', args.out_dir
+        '--plugin',
+        f'protoc-gen-custom={args.plugin_path}',
+        '--custom_out',
+        args.out_dir,
     ]
 
 
@@ -72,53 +80,86 @@
 def protoc_nanopb_args(args: argparse.Namespace) -> List[str]:
     # nanopb needs to know of the include path to parse *.options files
     return [
-        '--plugin', f'protoc-gen-nanopb={args.custom_plugin}',
-        f'--nanopb_out=-I{args.module_path}:{args.out_dir}'
+        '--plugin',
+        f'protoc-gen-nanopb={args.plugin_path}',
+        # nanopb_opt provides the flags to use for nanopb_out. Windows doesn't
+        # like when you merge the two using the `flag,...:out` syntax.
+        f'--nanopb_opt=-I{args.module_path}',
+        f'--nanopb_out={args.out_dir}',
     ]
 
 
 def protoc_nanopb_rpc_args(args: argparse.Namespace) -> List[str]:
     return [
-        '--plugin', f'protoc-gen-custom={shutil.which("pw_rpc_codegen")}',
-        '--custom_out', args.out_dir
+        '--plugin',
+        f'protoc-gen-custom={args.plugin_path}',
+        '--custom_out',
+        args.out_dir,
+    ]
+
+
+def protoc_raw_rpc_args(args: argparse.Namespace) -> List[str]:
+    return [
+        '--plugin',
+        f'protoc-gen-custom={args.plugin_path}',
+        '--custom_out',
+        args.out_dir,
     ]
 
 
 # Default additional protoc arguments for each supported language.
 # TODO(frolv): Make these overridable with a command-line argument.
 DEFAULT_PROTOC_ARGS: Dict[str, Callable[[argparse.Namespace], List[str]]] = {
-    'cc': protoc_cc_args,
+    'pwpb': protoc_cc_args,
     'go': protoc_go_args,
     'nanopb': protoc_nanopb_args,
     'nanopb_rpc': protoc_nanopb_rpc_args,
+    'raw_rpc': protoc_raw_rpc_args,
 }
 
 
 def main() -> int:
     """Runs protoc as configured by command-line arguments."""
 
-    args = argument_parser().parse_args()
-    os.makedirs(args.out_dir, exist_ok=True)
+    parser = argument_parser()
+    args = parser.parse_args()
 
-    try:
-        lang_args = DEFAULT_PROTOC_ARGS[args.language](args)
-    except KeyError:
-        _LOG.error('Unsupported language: %s', args.language)
-        return 1
+    if args.plugin_path is None and args.language != 'go':
+        parser.error(
+            f'--plugin-path is required for --language {args.language}')
+
+    os.makedirs(args.out_dir, exist_ok=True)
 
     include_paths = [f'-I{path}' for path in args.include_paths]
     include_paths += [f'-I{line.strip()}' for line in args.include_file]
 
-    process = pw_cli.process.run(
-        'protoc',
-        '-I',
-        args.module_path,
-        '-I',
-        args.out_dir,
-        *include_paths,
-        *lang_args,
-        *args.protos,
-    )
+    wrapper_script: Optional[Path] = None
+
+    # On Windows, use a .bat version of the plugin if it exists or create a .bat
+    # wrapper to use if none exists.
+    if os.name == 'nt' and args.plugin_path:
+        if args.plugin_path.with_suffix('.bat').exists():
+            args.plugin_path = args.plugin_path.with_suffix('.bat')
+            _LOG.debug('Using Batch plugin %s', args.plugin_path)
+        else:
+            with tempfile.NamedTemporaryFile('w', suffix='.bat',
+                                             delete=False) as file:
+                file.write(f'@echo off\npython {args.plugin_path.resolve()}\n')
+
+            args.plugin_path = wrapper_script = Path(file.name)
+            _LOG.debug('Using generated plugin wrapper %s', args.plugin_path)
+
+    try:
+        process = pw_cli.process.run(
+            'protoc',
+            f'-I{args.module_path}',
+            *include_paths,
+            *DEFAULT_PROTOC_ARGS[args.language](args),
+            *args.protos,
+        )
+    finally:
+        if wrapper_script:
+            wrapper_script.unlink()
 
     if process.returncode != 0:
         print(process.output.decode(), file=sys.stderr)
diff --git a/pw_protobuf_compiler/py/pw_protobuf_compiler/generator_not_selected.py b/pw_protobuf_compiler/py/pw_protobuf_compiler/generator_not_selected.py
deleted file mode 100644
index e089cdb..0000000
--- a/pw_protobuf_compiler/py/pw_protobuf_compiler/generator_not_selected.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2020 The Pigweed Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-#     https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-"""Emits an error when using a protobuf library that is not generated"""
-
-import argparse
-import sys
-
-
-def parse_args():
-    parser = argparse.ArgumentParser(description=__doc__)
-    parser.add_argument('--library',
-                        required=True,
-                        help='The protobuf library being built')
-    parser.add_argument('--generator',
-                        required=True,
-                        help='The protobuf generator requested')
-    return parser.parse_args()
-
-
-def main(library: str, generator: str):
-    print(f'ERROR: Attempting to build protobuf library {library}, but the '
-          f'{generator} protobuf generator is not in use.')
-    print(f'To use {generator} protobufs, list "{generator}" in '
-          'pw_protobuf_GENERATORS.')
-    sys.exit(1)
-
-
-if __name__ == '__main__':
-    main(**vars(parse_args()))
diff --git a/pw_protobuf_compiler/py/pw_protobuf_compiler/proto_target_invalid.py b/pw_protobuf_compiler/py/pw_protobuf_compiler/proto_target_invalid.py
index 7878dfa..72894b4 100644
--- a/pw_protobuf_compiler/py/pw_protobuf_compiler/proto_target_invalid.py
+++ b/pw_protobuf_compiler/py/pw_protobuf_compiler/proto_target_invalid.py
@@ -58,7 +58,7 @@
     _LOG.error('Depend on one of the following targets instead:')
     _LOG.error('')
     for gen in args.generators:
-        _LOG.error('  //%s:%s_%s', relative_dir, args.target, gen)
+        _LOG.error('  //%s:%s.%s', relative_dir, args.target, gen)
     _LOG.error('')
 
     return 1
diff --git a/pw_protobuf_compiler/py/pw_protobuf_compiler/py.typed b/pw_protobuf_compiler/py/pw_protobuf_compiler/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_protobuf_compiler/py/pw_protobuf_compiler/py.typed
diff --git a/pw_protobuf_compiler/py/pw_protobuf_compiler/python_protos.py b/pw_protobuf_compiler/py/pw_protobuf_compiler/python_protos.py
index 5f22802..4b7f75a 100644
--- a/pw_protobuf_compiler/py/pw_protobuf_compiler/python_protos.py
+++ b/pw_protobuf_compiler/py/pw_protobuf_compiler/python_protos.py
@@ -148,14 +148,21 @@
         self._items: List[T] = []
         self._package = package
 
-    def __getattr__(self, attr: str):
-        """Descends into subpackages or access proto entities in a package."""
-        if attr in self._packages:
-            return self._packages[attr]
+    def _add_package(self, subpackage: str, package: '_NestedPackage') -> None:
+        self._packages[subpackage] = package
+        setattr(self, subpackage, package)
 
-        for module in self._items:
-            if hasattr(module, attr):
-                return getattr(module, attr)
+    def _add_item(self, item) -> None:
+        self._items.append(item)
+        for attr, value in vars(item).items():
+            if not attr.startswith('_'):
+                setattr(self, attr, value)
+
+    def __getattr__(self, attr: str):
+        # Fall back to item attributes, which includes private attributes.
+        for item in self._items:
+            if hasattr(item, attr):
+                return getattr(item, attr)
 
         raise AttributeError(
             f'Proto package "{self._package}" does not contain "{attr}"')
@@ -164,7 +171,19 @@
         return iter(self._packages.values())
 
     def __repr__(self) -> str:
-        return f'_NestedPackage({self._package!r})'
+        msg = [f'ProtoPackage({self._package!r}']
+
+        public_members = [
+            i for i in vars(self)
+            if i not in self._packages and not i.startswith('_')
+        ]
+        if public_members:
+            msg.append(f'members={str(public_members)}')
+
+        if self._packages:
+            msg.append(f'subpackages={str(list(self._packages))}')
+
+        return ', '.join(msg) + ')'
 
     def __str__(self) -> str:
         return self._package
@@ -196,12 +215,12 @@
         # pylint: disable=protected-access
         for i, subpackage in enumerate(subpackages, 1):
             if subpackage not in entry._packages:
-                entry._packages[subpackage] = _NestedPackage('.'.join(
-                    subpackages[:i]))
+                entry._add_package(subpackage,
+                                   _NestedPackage('.'.join(subpackages[:i])))
 
             entry = entry._packages[subpackage]
 
-        entry._items.append(item)
+        entry._add_item(item)
         # pylint: enable=protected-access
 
     return packages
diff --git a/pw_protobuf_compiler/py/setup.py b/pw_protobuf_compiler/py/setup.py
index ebee3fd..189992f 100644
--- a/pw_protobuf_compiler/py/setup.py
+++ b/pw_protobuf_compiler/py/setup.py
@@ -13,7 +13,7 @@
 # the License.
 """pw_protobuf_compiler"""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='pw_protobuf_compiler',
@@ -22,6 +22,8 @@
     author_email='pigweed-developers@googlegroups.com',
     description='Pigweed protoc wrapper',
     packages=setuptools.find_packages(),
+    package_data={'pw_protobuf_compiler': ['py.typed']},
+    zip_safe=False,
     entry_points={
         'console_scripts':
         ['generate_protos = pw_protobuf_compiler.generate_protos:main']
diff --git a/pw_random/BUILD.gn b/pw_random/BUILD.gn
index d9dc72a..1328a05 100644
--- a/pw_random/BUILD.gn
+++ b/pw_random/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_random/CMakeLists.txt
similarity index 78%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_random/CMakeLists.txt
index 3c3be32..aabdd23 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_random/CMakeLists.txt
@@ -12,8 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
+pw_auto_add_simple_module(pw_random
+  PUBLIC_DEPS
+    pw_bytes
+    pw_span
+    pw_status
+)
diff --git a/pw_random/docs.rst b/pw_random/docs.rst
index f1455ea..fd4ba16 100644
--- a/pw_random/docs.rst
+++ b/pw_random/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-random:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_random:
 
 ---------
 pw_random
diff --git a/pw_random/public/pw_random/random.h b/pw_random/public/pw_random/random.h
index c0af33c..388d88a 100644
--- a/pw_random/public/pw_random/random.h
+++ b/pw_random/public/pw_random/random.h
@@ -13,6 +13,7 @@
 // the License.
 #pragma once
 
+#include <cstddef>
 #include <cstdint>
 #include <span>
 
@@ -49,8 +50,12 @@
   // assumed to be stored in the least significant bits of `data`.
   virtual void InjectEntropyBits(uint32_t data, uint_fast8_t num_bits) = 0;
 
-  // Injects entropy into the pool.
-  virtual void InjectEntropy(ConstByteSpan data) = 0;
+  // Injects entropy into the pool byte-by-byte.
+  void InjectEntropy(ConstByteSpan data) {
+    for (std::byte b : data) {
+      InjectEntropyBits(std::to_integer<uint32_t>(b), /*num_bits=*/8);
+    }
+  }
 };
 
 }  // namespace pw::random
diff --git a/pw_random/public/pw_random/xor_shift.h b/pw_random/public/pw_random/xor_shift.h
index 0554165..4b30319 100644
--- a/pw_random/public/pw_random/xor_shift.h
+++ b/pw_random/public/pw_random/xor_shift.h
@@ -37,7 +37,7 @@
 
   // This generator uses entropy-seeded PRNG to never exhaust its random number
   // pool.
-  StatusWithSize Get(ByteSpan dest) override {
+  StatusWithSize Get(ByteSpan dest) final {
     const size_t bytes_written = dest.size_bytes();
     while (!dest.empty()) {
       uint64_t random = Regenerate();
@@ -53,7 +53,7 @@
   // before xoring the entropy with the current state. This ensures seeding
   // the random value with single bits will progressively fill the state with
   // more entropy.
-  void InjectEntropyBits(uint32_t data, uint_fast8_t num_bits) override {
+  void InjectEntropyBits(uint32_t data, uint_fast8_t num_bits) final {
     if (num_bits == 0) {
       return;
     } else if (num_bits > 32) {
@@ -68,20 +68,6 @@
     state_ ^= (data & mask);
   }
 
-  void InjectEntropy(ConstByteSpan data) override {
-    while (!data.empty()) {
-      size_t chunk_size = std::min(data.size_bytes(), sizeof(state_));
-      uint64_t entropy = 0;
-      std::memcpy(&entropy, data.data(), chunk_size);
-      // Rotate state. When chunk_size == sizeof(state_), this does nothing.
-      uint64_t old_state = state_ >> (kNumStateBits - 8 * chunk_size);
-      state_ = old_state | (state_ << (8 * chunk_size));
-      // XOR entropy into state.
-      state_ ^= entropy;
-      data = data.subspan(chunk_size);
-    }
-  }
-
  private:
   // Calculate and return the next value based on the "xorshift*" algorithm
   uint64_t Regenerate() {
diff --git a/pw_random/xor_shift_test.cc b/pw_random/xor_shift_test.cc
index 660c981..6b7e379 100644
--- a/pw_random/xor_shift_test.cc
+++ b/pw_random/xor_shift_test.cc
@@ -44,7 +44,7 @@
   XorShiftStarRng64 rng(seed1);
   for (size_t i = 0; i < result1_count; ++i) {
     uint64_t val = 0;
-    EXPECT_EQ(rng.GetInt(val).status(), Status::OK);
+    EXPECT_EQ(rng.GetInt(val).status(), Status::Ok());
     EXPECT_EQ(val, result1[i]);
   }
 }
@@ -53,7 +53,7 @@
   XorShiftStarRng64 rng(seed2);
   for (size_t i = 0; i < result2_count; ++i) {
     uint64_t val = 0;
-    EXPECT_EQ(rng.GetInt(val).status(), Status::OK);
+    EXPECT_EQ(rng.GetInt(val).status(), Status::Ok());
     EXPECT_EQ(val, result2[i]);
   }
 }
@@ -62,7 +62,7 @@
   XorShiftStarRng64 rng(seed1);
   uint64_t val = 0;
   rng.InjectEntropyBits(0x1, 1);
-  EXPECT_EQ(rng.GetInt(val).status(), Status::OK);
+  EXPECT_EQ(rng.GetInt(val).status(), Status::Ok());
   EXPECT_NE(val, result1[0]);
 }
 
@@ -72,14 +72,14 @@
   XorShiftStarRng64 rng_1(seed1);
   uint64_t first_val = 0;
   rng_1.InjectEntropyBits(0x1, 1);
-  EXPECT_EQ(rng_1.GetInt(first_val).status(), Status::OK);
+  EXPECT_EQ(rng_1.GetInt(first_val).status(), Status::Ok());
 
   // Use the same starting seed.
   XorShiftStarRng64 rng_2(seed1);
   uint64_t second_val = 0;
   // Use a different number of entropy bits.
   rng_2.InjectEntropyBits(0x1, 2);
-  EXPECT_EQ(rng_2.GetInt(second_val).status(), Status::OK);
+  EXPECT_EQ(rng_2.GetInt(second_val).status(), Status::Ok());
 
   EXPECT_NE(first_val, second_val);
 }
@@ -91,7 +91,7 @@
   XorShiftStarRng64 rng_1(seed1);
   uint64_t first_val = 0;
   rng_1.InjectEntropyBits(0x6, 3);
-  EXPECT_EQ(rng_1.GetInt(first_val).status(), Status::OK);
+  EXPECT_EQ(rng_1.GetInt(first_val).status(), Status::Ok());
 
   // Use the same starting seed.
   XorShiftStarRng64 rng_2(seed1);
@@ -100,7 +100,7 @@
   rng_2.InjectEntropyBits(0x1, 1);
   rng_2.InjectEntropyBits(0x1, 1);
   rng_2.InjectEntropyBits(0x0, 1);
-  EXPECT_EQ(rng_2.GetInt(second_val).status(), Status::OK);
+  EXPECT_EQ(rng_2.GetInt(second_val).status(), Status::Ok());
 
   EXPECT_EQ(first_val, second_val);
 }
@@ -114,7 +114,7 @@
                                                    std::byte(0x17),
                                                    std::byte(0x02)};
   rng.InjectEntropy(entropy);
-  EXPECT_EQ(rng.GetInt(val).status(), Status::OK);
+  EXPECT_EQ(rng.GetInt(val).status(), Status::Ok());
   EXPECT_NE(val, result1[0]);
 }
 
diff --git a/pw_result/BUILD.gn b/pw_result/BUILD.gn
index b839de8..31cd575 100644
--- a/pw_result/BUILD.gn
+++ b/pw_result/BUILD.gn
@@ -12,12 +12,13 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
+import("$dir_pw_bloat/bloat.gni")
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -42,4 +43,27 @@
 
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
+  report_deps = [ ":result_size" ]
+}
+
+pw_size_report("result_size") {
+  title = "pw::Result vs. pw::Status and out pointer"
+
+  binaries = [
+    {
+      target = "size_report:result_simple"
+      base = "size_report:pointer_simple"
+      label = "Simple function"
+    },
+    {
+      target = "size_report:result_noinline"
+      base = "size_report:pointer_noinline"
+      label = "Simple function without inlining"
+    },
+    {
+      target = "size_report:result_read"
+      base = "size_report:pointer_read"
+      label = "Returning a larger object (std::span)"
+    },
+  ]
 }
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_result/CMakeLists.txt
similarity index 78%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_result/CMakeLists.txt
index 3c3be32..20b5db0 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_result/CMakeLists.txt
@@ -12,8 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
+pw_auto_add_simple_module(pw_result
+  PUBLIC_DEPS
+    pw_assert
+    pw_status
+)
diff --git a/pw_result/docs.rst b/pw_result/docs.rst
index 5671d8a..bfc37cd 100644
--- a/pw_result/docs.rst
+++ b/pw_result/docs.rst
@@ -1,8 +1,4 @@
-.. default-domain:: cpp
-
-.. highlight:: sh
-
-.. _chapter-pw-result:
+.. _module-pw_result:
 
 ---------
 pw_result
@@ -24,3 +20,15 @@
 Compatibility
 =============
 Works with C++11, but some features require C++17.
+
+Size report
+===========
+The table below showcases the difference in size between functions returning a
+Status with an output pointer, and functions returning a Result, in various
+situations.
+
+Note that these are simplified examples which do not necessarily reflect the
+usage of Result in real code. Make sure to always run your own size reports to
+check if Result is suitable for you.
+
+.. include:: result_size
diff --git a/pw_result/public/pw_result/result.h b/pw_result/public/pw_result/result.h
index 2cf8d48..7093551 100644
--- a/pw_result/public/pw_result/result.h
+++ b/pw_result/public/pw_result/result.h
@@ -26,18 +26,19 @@
 template <typename T>
 class Result {
  public:
-  constexpr Result(T&& value) : value_(std::move(value)), status_(Status::OK) {}
-  constexpr Result(const T& value) : value_(value), status_(Status::OK) {}
+  constexpr Result(T&& value)
+      : value_(std::move(value)), status_(Status::Ok()) {}
+  constexpr Result(const T& value) : value_(value), status_(Status::Ok()) {}
 
   template <typename... Args>
   constexpr Result(std::in_place_t, Args&&... args)
-      : value_(std::forward<Args>(args)...), status_(Status::OK) {}
+      : value_(std::forward<Args>(args)...), status_(Status::Ok()) {}
 
-  constexpr Result(Status status) : status_(status) {
-    PW_CHECK(status_ != Status::OK);
-  }
-  constexpr Result(Status::Code code) : status_(code) {
-    PW_CHECK(status_ != Status::OK);
+  // TODO(pwbug/246): This can be constexpr when tokenized asserts are fixed.
+  Result(Status status) : status_(status) { PW_CHECK(status_ != Status::Ok()); }
+  // TODO(pwbug/246): This can be constexpr when tokenized asserts are fixed.
+  Result(Status::Code code) : status_(code) {
+    PW_CHECK(status_ != Status::Ok());
   }
 
   constexpr Result(const Result&) = default;
@@ -49,17 +50,20 @@
   constexpr Status status() const { return status_; }
   constexpr bool ok() const { return status_.ok(); }
 
-  constexpr T& value() & {
+  // TODO(pwbug/246): This can be constexpr when tokenized asserts are fixed.
+  T& value() & {
     PW_CHECK_OK(status_);
     return value_;
   }
 
-  constexpr const T& value() const& {
+  // TODO(pwbug/246): This can be constexpr when tokenized asserts are fixed.
+  const T& value() const& {
     PW_CHECK_OK(status_);
     return value_;
   }
 
-  constexpr T&& value() && {
+  // TODO(pwbug/246): This can be constexpr when tokenized asserts are fixed.
+  T&& value() && {
     PW_CHECK_OK(status_);
     return std::move(value_);
   }
diff --git a/pw_result/result_test.cc b/pw_result/result_test.cc
index b29c240..ef913fd 100644
--- a/pw_result/result_test.cc
+++ b/pw_result/result_test.cc
@@ -22,19 +22,19 @@
 TEST(Result, CreateOk) {
   Result<const char*> res("hello");
   EXPECT_TRUE(res.ok());
-  EXPECT_EQ(res.status(), Status::OK);
+  EXPECT_EQ(res.status(), Status::Ok());
   EXPECT_EQ(res.value(), "hello");
 }
 
 TEST(Result, CreateNotOk) {
-  Result<int> res(Status::DATA_LOSS);
+  Result<int> res(Status::DataLoss());
   EXPECT_FALSE(res.ok());
-  EXPECT_EQ(res.status(), Status::DATA_LOSS);
+  EXPECT_EQ(res.status(), Status::DataLoss());
 }
 
 TEST(Result, ValueOr) {
   Result<int> good(3);
-  Result<int> bad(Status::DATA_LOSS);
+  Result<int> bad(Status::DataLoss());
   EXPECT_EQ(good.value_or(42), 3);
   EXPECT_EQ(bad.value_or(42), 42);
 }
@@ -55,7 +55,7 @@
 
 Result<float> Divide(float a, float b) {
   if (b == 0) {
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
   return a / b;
 }
@@ -69,7 +69,7 @@
 TEST(Divide, ReturnNotOk) {
   Result<float> res = Divide(10, 0);
   EXPECT_FALSE(res.ok());
-  EXPECT_EQ(res.status(), Status::INVALID_ARGUMENT);
+  EXPECT_EQ(res.status(), Status::InvalidArgument());
 }
 
 }  // namespace
diff --git a/pw_result/size_report/BUILD b/pw_result/size_report/BUILD
new file mode 100644
index 0000000..1a67937
--- /dev/null
+++ b/pw_result/size_report/BUILD
@@ -0,0 +1,78 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_binary",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])  # Apache License 2.0
+
+pw_cc_binary(
+    name = "pointer_simple",
+    srcs = ["pointer_simple.cc"],
+    deps = [
+        "//pw_result",
+        "//pw_log",
+    ],
+)
+
+pw_cc_binary(
+    name = "result_simple",
+    srcs = ["result_simple.cc"],
+    deps = [
+        "//pw_result",
+        "//pw_log",
+    ],
+)
+
+pw_cc_binary(
+    name = "pointer_noinline",
+    srcs = ["pointer_noinline.cc"],
+    deps = [
+        "//pw_result",
+        "//pw_log",
+    ],
+)
+
+pw_cc_binary(
+    name = "result_noinline",
+    srcs = ["result_noinline.cc"],
+    deps = [
+        "//pw_result",
+        "//pw_log",
+    ],
+)
+
+pw_cc_binary(
+    name = "pointer_read",
+    srcs = ["pointer_read.cc"],
+    deps = [
+        "//pw_result",
+        "//pw_log",
+        "//pw_span",
+    ],
+)
+
+pw_cc_binary(
+    name = "result_read",
+    srcs = ["result_read.cc"],
+    deps = [
+        "//pw_result",
+        "//pw_log",
+        "//pw_span",
+    ],
+)
diff --git a/pw_result/size_report/BUILD.gn b/pw_result/size_report/BUILD.gn
new file mode 100644
index 0000000..6e1d6d5
--- /dev/null
+++ b/pw_result/size_report/BUILD.gn
@@ -0,0 +1,73 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+pw_executable("pointer_simple") {
+  sources = [ "pointer_simple.cc" ]
+  deps = [
+    "..",
+    dir_pw_log,
+  ]
+}
+
+pw_executable("result_simple") {
+  sources = [ "result_simple.cc" ]
+  deps = [
+    "..",
+    dir_pw_log,
+  ]
+}
+
+pw_executable("pointer_noinline") {
+  sources = [ "pointer_noinline.cc" ]
+  deps = [
+    "..",
+    dir_pw_log,
+    dir_pw_preprocessor,
+  ]
+}
+
+pw_executable("result_noinline") {
+  sources = [ "result_noinline.cc" ]
+  deps = [
+    "..",
+    dir_pw_log,
+    dir_pw_preprocessor,
+  ]
+}
+
+pw_executable("pointer_read") {
+  sources = [ "pointer_read.cc" ]
+  deps = [
+    "..",
+    dir_pw_bytes,
+    dir_pw_log,
+    dir_pw_preprocessor,
+    dir_pw_span,
+  ]
+}
+
+pw_executable("result_read") {
+  sources = [ "result_read.cc" ]
+  deps = [
+    "..",
+    dir_pw_bytes,
+    dir_pw_log,
+    dir_pw_preprocessor,
+    dir_pw_span,
+  ]
+}
diff --git a/pw_result/size_report/pointer_noinline.cc b/pw_result/size_report/pointer_noinline.cc
new file mode 100644
index 0000000..522c9fe
--- /dev/null
+++ b/pw_result/size_report/pointer_noinline.cc
@@ -0,0 +1,37 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_log/log.h"
+#include "pw_preprocessor/compiler.h"
+#include "pw_status/status.h"
+
+PW_NO_INLINE pw::Status Divide(float a, float b, float* out) {
+  if (b == 0) {
+    return pw::Status::InvalidArgument();
+  }
+  *out = a / b;
+  return pw::Status::Ok();
+}
+
+int volatile* unoptimizable;
+
+int main() {
+  float f;
+  if (Divide(*unoptimizable, *unoptimizable, &f).ok()) {
+    PW_LOG_INFO("result is %f", f);
+    return 0;
+  }
+
+  return 1;
+}
diff --git a/pw_result/size_report/pointer_read.cc b/pw_result/size_report/pointer_read.cc
new file mode 100644
index 0000000..fb4f948
--- /dev/null
+++ b/pw_result/size_report/pointer_read.cc
@@ -0,0 +1,57 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <cstring>
+#include <span>
+
+#include "pw_bytes/array.h"
+#include "pw_log/log.h"
+#include "pw_preprocessor/compiler.h"
+#include "pw_status/status.h"
+
+namespace {
+
+// clang-format off
+constexpr auto kArray = pw::bytes::Array<
+    0x0a, 0x14, 0x00, 0x00, 0x00, 0x00, 0x32, 0x00,
+    0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x96, 0x00,
+    0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x12, 0x08,
+    0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01>();
+// clang-format on
+
+PW_NO_INLINE pw::Status Read(size_t offset,
+                             size_t size,
+                             std::span<const std::byte>* out) {
+  if (offset + size >= std::size(kArray)) {
+    return pw::Status::OutOfRange();
+  }
+
+  *out = std::span<const std::byte>(std::data(kArray) + offset, size);
+  return pw::Status::Ok();
+}
+
+}  // namespace
+
+size_t volatile* unoptimizable;
+
+int main() {
+  std::span<const std::byte> data;
+  pw::Status status = Read(*unoptimizable, *unoptimizable, &data);
+  if (!status.ok()) {
+    return 1;
+  }
+
+  PW_LOG_INFO("Read %u bytes", static_cast<unsigned>(data.size()));
+  return 0;
+}
diff --git a/pw_checksum/ccitt_crc16_test_c.c b/pw_result/size_report/pointer_simple.cc
similarity index 60%
copy from pw_checksum/ccitt_crc16_test_c.c
copy to pw_result/size_report/pointer_simple.cc
index 72f3cec..7c1440e 100644
--- a/pw_checksum/ccitt_crc16_test_c.c
+++ b/pw_result/size_report/pointer_simple.cc
@@ -12,8 +12,25 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-#include "pw_checksum/ccitt_crc16.h"
+#include "pw_log/log.h"
+#include "pw_status/status.h"
 
-uint16_t CallChecksumCcittCrc16(const void* data, size_t size_bytes) {
-  return pw_ChecksumCcittCrc16(data, size_bytes, 0xFFFF);
+pw::Status Divide(float a, float b, float* out) {
+  if (b == 0) {
+    return pw::Status::InvalidArgument();
+  }
+  *out = a / b;
+  return pw::Status::Ok();
+}
+
+int volatile* unoptimizable;
+
+int main() {
+  float f;
+  if (Divide(*unoptimizable, *unoptimizable, &f).ok()) {
+    PW_LOG_INFO("result is %f", f);
+    return 0;
+  }
+
+  return 1;
 }
diff --git a/pw_result/size_report/result_noinline.cc b/pw_result/size_report/result_noinline.cc
new file mode 100644
index 0000000..b008e52
--- /dev/null
+++ b/pw_result/size_report/result_noinline.cc
@@ -0,0 +1,35 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_log/log.h"
+#include "pw_preprocessor/compiler.h"
+#include "pw_result/result.h"
+
+PW_NO_INLINE pw::Result<float> Divide(float a, float b) {
+  if (b == 0) {
+    return pw::Status::InvalidArgument();
+  }
+  return a / b;
+}
+
+float volatile* unoptimizable;
+
+int main() {
+  if (pw::Result result = Divide(*unoptimizable, *unoptimizable); result.ok()) {
+    PW_LOG_INFO("result is %f", result.value());
+    return 0;
+  }
+
+  return 1;
+}
diff --git a/pw_result/size_report/result_read.cc b/pw_result/size_report/result_read.cc
new file mode 100644
index 0000000..351be04
--- /dev/null
+++ b/pw_result/size_report/result_read.cc
@@ -0,0 +1,54 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <cstring>
+#include <span>
+
+#include "pw_bytes/array.h"
+#include "pw_log/log.h"
+#include "pw_preprocessor/compiler.h"
+#include "pw_result/result.h"
+
+namespace {
+
+// clang-format off
+constexpr auto kArray = pw::bytes::Array<
+    0x0a, 0x14, 0x00, 0x00, 0x00, 0x00, 0x32, 0x00,
+    0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x96, 0x00,
+    0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x12, 0x08,
+    0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01>();
+// clang-format on
+
+PW_NO_INLINE pw::Result<std::span<const std::byte>> Read(size_t offset,
+                                                         size_t size) {
+  if (offset + size >= std::size(kArray)) {
+    return pw::Status::OutOfRange();
+  }
+
+  return std::span<const std::byte>(std::data(kArray) + offset, size);
+}
+
+}  // namespace
+
+size_t volatile* unoptimizable;
+
+int main() {
+  pw::Result result = Read(*unoptimizable, *unoptimizable);
+  if (!result.ok()) {
+    return 1;
+  }
+
+  PW_LOG_INFO("Read %u bytes", static_cast<unsigned>(result.value().size()));
+  return 0;
+}
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_result/size_report/result_simple.cc
similarity index 60%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to pw_result/size_report/result_simple.cc
index 1670b7d..333eca8 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_result/size_report/result_simple.cc
@@ -12,6 +12,23 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-#include "pw_boot_armv7m/boot.h"
+#include "pw_log/log.h"
+#include "pw_result/result.h"
 
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+pw::Result<float> Divide(float a, float b) {
+  if (b == 0) {
+    return pw::Status::InvalidArgument();
+  }
+  return a / b;
+}
+
+float volatile* unoptimizable;
+
+int main() {
+  if (pw::Result result = Divide(*unoptimizable, *unoptimizable); result.ok()) {
+    PW_LOG_INFO("result is %f", result.value());
+    return 0;
+  }
+
+  return 1;
+}
diff --git a/pw_ring_buffer/BUILD.gn b/pw_ring_buffer/BUILD.gn
index 27a3fa8..6c5a17b 100644
--- a/pw_ring_buffer/BUILD.gn
+++ b/pw_ring_buffer/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
diff --git a/pw_ring_buffer/docs.rst b/pw_ring_buffer/docs.rst
index c3ab200..2209468 100644
--- a/pw_ring_buffer/docs.rst
+++ b/pw_ring_buffer/docs.rst
@@ -1,6 +1,4 @@
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_ring_buffer:
 
 --------------
 pw_ring_buffer
diff --git a/pw_ring_buffer/prefixed_entry_ring_buffer.cc b/pw_ring_buffer/prefixed_entry_ring_buffer.cc
index 31eb222..d122162 100644
--- a/pw_ring_buffer/prefixed_entry_ring_buffer.cc
+++ b/pw_ring_buffer/prefixed_entry_ring_buffer.cc
@@ -33,24 +33,24 @@
   if ((buffer.data() == nullptr) ||  //
       (buffer.size_bytes() == 0) ||  //
       (buffer.size_bytes() > kMaxBufferBytes)) {
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
 
   buffer_ = buffer.data();
   buffer_bytes_ = buffer.size_bytes();
 
   Clear();
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status PrefixedEntryRingBuffer::InternalPushBack(std::span<const byte> data,
                                                  byte user_preamble_data,
                                                  bool drop_elements_if_needed) {
   if (buffer_ == nullptr) {
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
   if (data.size_bytes() == 0) {
-    return Status::INVALID_ARGUMENT;
+    return Status::InvalidArgument();
   }
 
   // Prepare the preamble, and ensure we can fit the preamble and entry.
@@ -59,7 +59,7 @@
   size_t total_write_bytes =
       (user_preamble_ ? 1 : 0) + varint_bytes + data.size_bytes();
   if (buffer_bytes_ < total_write_bytes) {
-    return Status::OUT_OF_RANGE;
+    return Status::OutOfRange();
   }
 
   if (drop_elements_if_needed) {
@@ -70,7 +70,7 @@
     }
   } else if (RawAvailableBytes() < total_write_bytes) {
     // TryPushBack() case: don't evict items.
-    return Status::RESOURCE_EXHAUSTED;
+    return Status::ResourceExhausted();
   }
 
   // Write the new entry into the ring buffer.
@@ -80,7 +80,7 @@
   RawWrite(std::span(varint_buf, varint_bytes));
   RawWrite(data);
   entry_count_++;
-  return Status::OK;
+  return Status::Ok();
 }
 
 auto GetOutput(std::span<byte> data_out, size_t* write_index) {
@@ -90,8 +90,8 @@
     memcpy(data_out.data() + *write_index, src.data(), copy_size);
     *write_index += copy_size;
 
-    return (copy_size == src.size_bytes()) ? Status::OK
-                                           : Status::RESOURCE_EXHAUSTED;
+    return (copy_size == src.size_bytes()) ? Status::Ok()
+                                           : Status::ResourceExhausted();
   };
 }
 
@@ -119,10 +119,10 @@
 template <typename T>
 Status PrefixedEntryRingBuffer::InternalRead(T read_output, bool get_preamble) {
   if (buffer_ == nullptr) {
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
   if (EntryCount() == 0) {
-    return Status::OUT_OF_RANGE;
+    return Status::OutOfRange();
   }
 
   // Figure out where to start reading (wrapped); accounting for preamble.
@@ -150,10 +150,10 @@
 
 Status PrefixedEntryRingBuffer::PopFront() {
   if (buffer_ == nullptr) {
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
   if (EntryCount() == 0) {
-    return Status::OUT_OF_RANGE;
+    return Status::OutOfRange();
   }
 
   // Advance the read pointer past the front entry to the next one.
@@ -161,16 +161,16 @@
   size_t entry_bytes = info.preamble_bytes + info.data_bytes;
   read_idx_ = IncrementIndex(read_idx_, entry_bytes);
   entry_count_--;
-  return Status::OK;
+  return Status::Ok();
 }
 
 Status PrefixedEntryRingBuffer::Dering() {
   if (buffer_ == nullptr) {
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
   // Check if by luck we're already deringed.
   if (read_idx_ == 0) {
-    return Status::OK;
+    return Status::Ok();
   }
 
   auto buffer_span = std::span(buffer_, buffer_bytes_);
@@ -184,7 +184,7 @@
   }
   write_idx_ -= read_idx_;
   read_idx_ = 0;
-  return Status::OK;
+  return Status::Ok();
 }
 
 size_t PrefixedEntryRingBuffer::FrontEntryDataSizeBytes() {
diff --git a/pw_ring_buffer/prefixed_entry_ring_buffer_test.cc b/pw_ring_buffer/prefixed_entry_ring_buffer_test.cc
index 271ce74..6469f42 100644
--- a/pw_ring_buffer/prefixed_entry_ring_buffer_test.cc
+++ b/pw_ring_buffer/prefixed_entry_ring_buffer_test.cc
@@ -35,20 +35,20 @@
 
   EXPECT_EQ(ring.EntryCount(), 0u);
   EXPECT_EQ(ring.SetBuffer(std::span<byte>(nullptr, 10u)),
-            Status::INVALID_ARGUMENT);
-  EXPECT_EQ(ring.SetBuffer(std::span(buf, 0u)), Status::INVALID_ARGUMENT);
+            Status::InvalidArgument());
+  EXPECT_EQ(ring.SetBuffer(std::span(buf, 0u)), Status::InvalidArgument());
   EXPECT_EQ(ring.FrontEntryDataSizeBytes(), 0u);
 
-  EXPECT_EQ(ring.PushBack(buf), Status::FAILED_PRECONDITION);
+  EXPECT_EQ(ring.PushBack(buf), Status::FailedPrecondition());
   EXPECT_EQ(ring.EntryCount(), 0u);
-  EXPECT_EQ(ring.PeekFront(buf, &count), Status::FAILED_PRECONDITION);
+  EXPECT_EQ(ring.PeekFront(buf, &count), Status::FailedPrecondition());
   EXPECT_EQ(count, 0u);
   EXPECT_EQ(ring.EntryCount(), 0u);
   EXPECT_EQ(ring.PeekFrontWithPreamble(buf, &count),
-            Status::FAILED_PRECONDITION);
+            Status::FailedPrecondition());
   EXPECT_EQ(count, 0u);
   EXPECT_EQ(ring.EntryCount(), 0u);
-  EXPECT_EQ(ring.PopFront(), Status::FAILED_PRECONDITION);
+  EXPECT_EQ(ring.PopFront(), Status::FailedPrecondition());
   EXPECT_EQ(ring.EntryCount(), 0u);
 }
 
@@ -83,23 +83,23 @@
   // out and happen to see a previous value.
   size_t read_size = 500U;
 
-  EXPECT_EQ(ring.SetBuffer(test_buffer), Status::OK);
+  EXPECT_EQ(ring.SetBuffer(test_buffer), Status::Ok());
 
   EXPECT_EQ(ring.EntryCount(), 0u);
-  EXPECT_EQ(ring.PopFront(), Status::OUT_OF_RANGE);
+  EXPECT_EQ(ring.PopFront(), Status::OutOfRange());
   EXPECT_EQ(ring.EntryCount(), 0u);
   EXPECT_EQ(ring.PushBack(std::span(single_entry_data, 0u)),
-            Status::INVALID_ARGUMENT);
+            Status::InvalidArgument());
   EXPECT_EQ(ring.EntryCount(), 0u);
   EXPECT_EQ(
       ring.PushBack(std::span(single_entry_data, sizeof(test_buffer) + 5)),
-      Status::OUT_OF_RANGE);
+      Status::OutOfRange());
   EXPECT_EQ(ring.EntryCount(), 0u);
-  EXPECT_EQ(ring.PeekFront(read_buffer, &read_size), Status::OUT_OF_RANGE);
+  EXPECT_EQ(ring.PeekFront(read_buffer, &read_size), Status::OutOfRange());
   EXPECT_EQ(read_size, 0u);
   read_size = 500U;
   EXPECT_EQ(ring.PeekFrontWithPreamble(read_buffer, &read_size),
-            Status::OUT_OF_RANGE);
+            Status::OutOfRange());
   EXPECT_EQ(read_size, 0u);
 
   size_t user_preamble_bytes = (user_data ? 1 : 0);
@@ -115,12 +115,12 @@
     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), 0u);
 
     ASSERT_EQ(ring.PushBack(std::span(single_entry_data, data_size), byte(i)),
-              Status::OK);
+              Status::Ok());
     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), data_size);
     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), single_entry_total_size);
 
     read_size = 500U;
-    ASSERT_EQ(ring.PeekFront(read_buffer, &read_size), Status::OK);
+    ASSERT_EQ(ring.PeekFront(read_buffer, &read_size), Status::Ok());
     ASSERT_EQ(read_size, data_size);
 
     // ASSERT_THAT(std::span(expect_buffer).last(data_size),
@@ -131,9 +131,10 @@
               0);
 
     read_size = 500U;
-    ASSERT_EQ(ring.PeekFrontWithPreamble(read_buffer, &read_size), Status::OK);
+    ASSERT_EQ(ring.PeekFrontWithPreamble(read_buffer, &read_size),
+              Status::Ok());
     ASSERT_EQ(read_size, single_entry_total_size);
-    ASSERT_EQ(ring.PopFront(), Status::OK);
+    ASSERT_EQ(ring.PopFront(), Status::Ok());
 
     if (user_data) {
       expect_buffer[0] = byte(i);
@@ -167,7 +168,7 @@
   PrefixedEntryRingBuffer ring(user_data);
   byte test_buffer[single_entry_test_buffer_size];
 
-  EXPECT_EQ(ring.SetBuffer(test_buffer), Status::OK);
+  EXPECT_EQ(ring.SetBuffer(test_buffer), Status::Ok());
   EXPECT_EQ(ring.EntryCount(), 0u);
 
   constexpr size_t data_size = sizeof(single_entry_data) - (user_data ? 1 : 0);
@@ -181,7 +182,7 @@
     for (j = 0; j < kSingleEntryCycles; j++) {
       memset(write_buffer, j + seed, sizeof(write_buffer));
 
-      ASSERT_EQ(ring.PushBack(write_buffer), Status::OK);
+      ASSERT_EQ(ring.PushBack(write_buffer), Status::Ok());
 
       size_t expected_count = (j < kCountingUpMaxExpectedEntries)
                                   ? j + 1
@@ -195,11 +196,11 @@
       byte read_buffer[sizeof(write_buffer)];
       size_t read_size;
       memset(write_buffer, fill_val + j, sizeof(write_buffer));
-      ASSERT_EQ(ring.PeekFront(read_buffer, &read_size), Status::OK);
+      ASSERT_EQ(ring.PeekFront(read_buffer, &read_size), Status::Ok());
 
       ASSERT_EQ(memcmp(write_buffer, read_buffer, data_size), 0);
 
-      ASSERT_EQ(ring.PopFront(), Status::OK);
+      ASSERT_EQ(ring.PopFront(), Status::Ok());
     }
   }
 }
@@ -221,13 +222,13 @@
   PrefixedEntryRingBuffer ring(user_data);
   byte test_buffer[single_entry_test_buffer_size];
 
-  EXPECT_EQ(ring.SetBuffer(test_buffer), Status::OK);
+  EXPECT_EQ(ring.SetBuffer(test_buffer), Status::Ok());
 
   auto output = [](std::span<const byte> src) -> Status {
     for (byte b : src) {
       read_buffer.push_back(b);
     }
-    return Status::OK;
+    return Status::Ok();
   };
 
   size_t user_preamble_bytes = (user_data ? 1 : 0);
@@ -243,12 +244,12 @@
     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), 0u);
 
     ASSERT_EQ(ring.PushBack(std::span(single_entry_data, data_size), byte(i)),
-              Status::OK);
+              Status::Ok());
     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), data_size);
     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), single_entry_total_size);
 
     read_buffer.clear();
-    ASSERT_EQ(ring.PeekFront(output), Status::OK);
+    ASSERT_EQ(ring.PeekFront(output), Status::Ok());
     ASSERT_EQ(read_buffer.size(), data_size);
 
     ASSERT_EQ(memcmp(std::span(expect_buffer).last(data_size).data(),
@@ -257,9 +258,9 @@
               0);
 
     read_buffer.clear();
-    ASSERT_EQ(ring.PeekFrontWithPreamble(output), Status::OK);
+    ASSERT_EQ(ring.PeekFrontWithPreamble(output), Status::Ok());
     ASSERT_EQ(read_buffer.size(), single_entry_total_size);
-    ASSERT_EQ(ring.PopFront(), Status::OK);
+    ASSERT_EQ(ring.PopFront(), Status::Ok());
 
     if (user_data) {
       expect_buffer[0] = byte(i);
@@ -292,7 +293,7 @@
   PrefixedEntryRingBuffer ring;
 
   byte test_buffer[kTestBufferSize];
-  EXPECT_EQ(ring.SetBuffer(test_buffer), Status::OK);
+  EXPECT_EQ(ring.SetBuffer(test_buffer), Status::Ok());
 
   // Entry data is entry size - preamble (single byte in this case).
   byte single_entry_buffer[kEntrySizeBytes - 1u];
@@ -335,7 +336,7 @@
     EXPECT_EQ(ring.EntryCount(), kTotalEntryCount);
     EXPECT_EQ(expected_result.size(), ring.TotalUsedBytes());
 
-    ASSERT_EQ(ring.Dering(), Status::OK);
+    ASSERT_EQ(ring.Dering(), Status::Ok());
 
     // Check values after doing the dering.
     EXPECT_EQ(ring.EntryCount(), kTotalEntryCount);
@@ -347,11 +348,11 @@
       for (byte b : src) {
         actual_result.push_back(b);
       }
-      return Status::OK;
+      return Status::Ok();
     };
     while (ring.EntryCount()) {
-      ASSERT_EQ(ring.PeekFrontWithPreamble(output), Status::OK);
-      ASSERT_EQ(ring.PopFront(), Status::OK);
+      ASSERT_EQ(ring.PeekFrontWithPreamble(output), Status::Ok());
+      ASSERT_EQ(ring.PopFront(), Status::Ok());
     }
 
     // Ensure the actual result out of the ring buffer matches our manually
@@ -396,7 +397,7 @@
     T item;
   } aliased;
   size_t bytes_read = 0;
-  PW_CHECK_INT_EQ(ring.PeekFront(aliased.buffer, &bytes_read), Status::OK);
+  PW_CHECK_OK(ring.PeekFront(aliased.buffer, &bytes_read));
   PW_CHECK_INT_EQ(bytes_read, sizeof(T));
   return aliased.item;
 }
@@ -404,7 +405,7 @@
 TEST(PrefixedEntryRingBuffer, TryPushBack) {
   PrefixedEntryRingBuffer ring;
   byte test_buffer[kTestBufferSize];
-  EXPECT_EQ(ring.SetBuffer(test_buffer), Status::OK);
+  EXPECT_EQ(ring.SetBuffer(test_buffer), Status::Ok());
 
   // Fill up the ring buffer with a constant.
   int total_items = 0;
@@ -413,7 +414,7 @@
     if (status.ok()) {
       total_items++;
     } else {
-      EXPECT_EQ(status, Status::RESOURCE_EXHAUSTED);
+      EXPECT_EQ(status, Status::ResourceExhausted());
       break;
     }
   }
@@ -421,13 +422,13 @@
 
   // Should be unable to push more items.
   for (int i = 0; i < total_items; ++i) {
-    EXPECT_EQ(TryPushBack<int>(ring, 100), Status::RESOURCE_EXHAUSTED);
+    EXPECT_EQ(TryPushBack<int>(ring, 100), Status::ResourceExhausted());
     EXPECT_EQ(PeekFront<int>(ring), 5);
   }
 
   // Fill up the ring buffer with a constant.
   for (int i = 0; i < total_items; ++i) {
-    EXPECT_EQ(PushBack<int>(ring, 100), Status::OK);
+    EXPECT_EQ(PushBack<int>(ring, 100), Status::Ok());
   }
   EXPECT_EQ(PeekFront<int>(ring), 100);
 }
diff --git a/pw_rpc/BUILD b/pw_rpc/BUILD
index 1d5a7db..a7c4a83 100644
--- a/pw_rpc/BUILD
+++ b/pw_rpc/BUILD
@@ -23,10 +23,30 @@
 licenses(["notice"])  # Apache License 2.0
 
 pw_cc_library(
-    name = "pw_rpc",
+    name = "client",
+    srcs = [
+        "client.cc",
+        "base_client_call.cc",
+    ],
+    hdrs = [
+        "public/pw_rpc/client.h",
+        "public/pw_rpc/internal/base_client_call.h",
+    ],
+    deps = [
+        ":common",
+    ]
+)
+
+pw_cc_library(
+    name = "server",
     srcs = [
         "base_server_writer.cc",
         "public/pw_rpc/internal/base_server_writer.h",
+        "public/pw_rpc/internal/call.h",
+        "public/pw_rpc/internal/hash.h",
+        "public/pw_rpc/internal/method.h",
+        "public/pw_rpc/internal/method_union.h",
+        "public/pw_rpc/internal/server.h",
         "server.cc",
         "service.cc",
     ],
@@ -34,22 +54,10 @@
         "public/pw_rpc/server.h",
         "public/pw_rpc/server_context.h",
         "public/pw_rpc/service.h",
-        "public/pw_rpc/internal/channel.h",
-        "public/pw_rpc/internal/server.h",
-        "public/pw_rpc/internal/hash.h",
-        # TODO(hepler): Only building the test version of the server for now.
-        "test_impl/public_overrides/pw_rpc/internal/method.h",
     ],
-    includes = [
-        "public",
-        "test_impl/public_overrides/",
-    ],
+    includes = ["public"],
     deps = [
         ":common",
-        "//pw_assert",
-        "//pw_log",
-        "//pw_span",
-        "//pw_status",
     ],
 )
 
@@ -58,16 +66,14 @@
     srcs = [
         "channel.cc",
         "packet.cc",
+        "public/pw_rpc/internal/channel.h",
+        "public/pw_rpc/internal/method_type.h",
+        "public/pw_rpc/internal/packet.h",
     ],
     hdrs = [
         "public/pw_rpc/channel.h",
-        "public/pw_rpc/internal/base_method.h",
-        "public/pw_rpc/internal/call.h",
-        "public/pw_rpc/internal/packet.h",
-        "public/pw_rpc/server.h",
     ],
     includes = ["public"],
-    visibility = ["//visibility:private"],
     deps = [
         "//pw_assert",
         "//pw_log",
@@ -77,11 +83,23 @@
 )
 
 pw_cc_library(
+    name = "service_method_traits",
+    hdrs = [
+        "public/pw_rpc/internal/service_method_traits.h",
+    ],
+    includes = ["public"],
+    deps = [ ":server"  ],
+)
+
+pw_cc_library(
     name = "internal_test_utils",
-    hdrs = ["pw_rpc_private/internal_test_utils.h"],
+    hdrs = [
+        "public/pw_rpc/internal/test_method.h",
+        "pw_rpc_private/internal_test_utils.h",
+    ],
     visibility = ["//visibility:private"],
     deps = [
-        ":common",
+        ":server",
         "//pw_span",
     ],
 )
@@ -93,13 +111,21 @@
     srcs = [
         "nanopb/codegen_test.cc",
         "nanopb/echo_service_test.cc",
-        "nanopb/method.cc",
-        "nanopb/method_test.cc",
+        "nanopb/nanopb_client_call.cc",
+        "nanopb/nanopb_client_call_test.cc",
+        "nanopb/nanopb_common.cc",
+        "nanopb/nanopb_method.cc",
+        "nanopb/nanopb_method_test.cc",
+        "nanopb/nanopb_method_union_test.cc",
         "nanopb/public/pw_rpc/echo_service_nanopb.h",
-        "nanopb/public/pw_rpc/internal/service_method_traits.h",
-        "nanopb/public/pw_rpc/test_method_context.h",
-        "nanopb/public_overrides/pw_rpc/internal/method.h",
-        "nanopb/service_method_traits_test.cc",
+        "nanopb/public/pw_rpc/internal/nanopb_common.h",
+        "nanopb/public/pw_rpc/internal/nanopb_method.h",
+        "nanopb/public/pw_rpc/internal/nanopb_method_union.h",
+        "nanopb/public/pw_rpc/internal/nanopb_service_method_traits.h",
+        "nanopb/public/pw_rpc/nanopb_client_call.h",
+        "nanopb/public/pw_rpc/nanopb_test_method_context.h",
+        "nanopb/pw_rpc_nanopb_private/internal_test_utils.h",
+        "nanopb/nanopb_service_method_traits_test.cc",
         "nanopb/test.pb.c",
         "nanopb/test.pb.h",
         "nanopb/test_rpc.pb.h",
@@ -113,7 +139,29 @@
     ],
     deps = [
         ":internal_test_utils",
-        ":pw_rpc",
+        ":server",
+    ],
+)
+
+pw_cc_test(
+    name = "base_client_call_test",
+    srcs = [
+        "base_client_call_test.cc",
+    ],
+    deps = [
+        ":client",
+        ":internal_test_utils",
+    ],
+)
+
+pw_cc_test(
+    name = "client_test",
+    srcs = [
+        "client_test.cc",
+    ],
+    deps = [
+        ":client",
+        ":internal_test_utils",
     ],
 )
 
@@ -121,7 +169,7 @@
     name = "channel_test",
     srcs = ["channel_test.cc"],
     deps = [
-        ":common",
+        ":server",
         ":test_utils_test_server",
     ],
 )
@@ -132,7 +180,7 @@
         "packet_test.cc",
     ],
     deps = [
-        ":common",
+        ":server",
     ],
 )
 
@@ -150,7 +198,19 @@
     ],
     deps = [
         ":internal_test_utils",
-        ":pw_rpc",
+        ":server",
+        "//pw_assert",
+    ],
+)
+
+pw_cc_test(
+    name = "service_test",
+    srcs = [
+        "service_test.cc",
+    ],
+    deps = [
+        ":internal_test_utils",
+        ":server",
         "//pw_assert",
     ],
 )
diff --git a/pw_rpc/BUILD.gn b/pw_rpc/BUILD.gn
index 2334642..01228ad 100644
--- a/pw_rpc/BUILD.gn
+++ b/pw_rpc/BUILD.gn
@@ -12,120 +12,98 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
+import("$dir_pw_bloat/bloat.gni")
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_protobuf_compiler/proto.gni")
+import("$dir_pw_third_party/nanopb/nanopb.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
   visibility = [ ":*" ]
 }
 
-# pw_rpc servers depend on the protobuf library used to encode and decode
-# requests and responses when invoking methods. This template is used to create
-# instances of the pw_rpc server library with different implementations.
-#
-# The implementation parameter must refer to a library that provides the
-# definition of the Method class in pw_rpc/internal/method.h. The Method class
-# provides the Invoke function, which handles the server use to call into the
-# RPC functions themselves.
-template("_pw_rpc_server_library") {
-  assert(defined(invoker.implementation),
-         "_pw_rpc_server_library requires an implementation to be set")
-  _target_name = target_name
-
-  pw_source_set(_target_name) {
-    forward_variables_from(invoker, "*")
-
-    public_deps = [
-      ":server_library_deps",
-      implementation,
-    ]
-    deps = [
-      dir_pw_assert,
-      dir_pw_log,
-    ]
-    public = [
-      "public/pw_rpc/server.h",
-      "public/pw_rpc/server_context.h",
-    ]
-    sources = [
-      "base_server_writer.cc",
-      "public/pw_rpc/internal/base_server_writer.h",
-      "public/pw_rpc/internal/server.h",
-      "server.cc",
-      "service.cc",
-    ]
-    allow_circular_includes_from = [ implementation ]
-    friend = [ "./*" ]
-  }
-
-  pw_source_set("internal_test_utils_$_target_name") {
-    public = [ "pw_rpc_private/internal_test_utils.h" ]
-    public_configs = [ ":private_includes" ]
-    public_deps = [
-      ":$_target_name",
-      ":common",
-      dir_pw_span,
-      dir_pw_unit_test,
-    ]
-    visibility = [ "./*" ]
-  }
-}
-
-# Provides the public RPC service definition (but not implementation). Can be
-# used to expose global service registration without depending on the complete
-# RPC library.
-pw_source_set("service") {
+pw_source_set("server") {
   public_configs = [ ":default_config" ]
-  public_deps = [ "$dir_pw_containers:intrusive_list" ]
-  public = [ "public/pw_rpc/service.h" ]
-}
-
-# Put these dependencies into a group since they need to be shared by the server
-# library and its implementation library.
-group("server_library_deps") {
-  public_configs = [ ":default_config" ]
-  public_deps = [
-    ":common",
-    ":service",
-    dir_pw_span,
-    dir_pw_status,
+  public_deps = [ ":common" ]
+  deps = [ dir_pw_log ]
+  public = [
+    "public/pw_rpc/server.h",
+    "public/pw_rpc/server_context.h",
+    "public/pw_rpc/service.h",
   ]
-  visibility = [ "./*" ]
+  sources = [
+    "base_server_writer.cc",
+    "public/pw_rpc/internal/base_server_writer.h",
+    "public/pw_rpc/internal/call.h",
+    "public/pw_rpc/internal/hash.h",
+    "public/pw_rpc/internal/method.h",
+    "public/pw_rpc/internal/method_union.h",
+    "public/pw_rpc/internal/server.h",
+    "server.cc",
+    "service.cc",
+  ]
+  friend = [ "./*" ]
 }
 
-# Classes with no dependencies on the protobuf library for method invocations.
+pw_source_set("client") {
+  public_configs = [ ":default_config" ]
+  public_deps = [ ":common" ]
+  deps = [ dir_pw_log ]
+  public = [
+    "public/pw_rpc/client.h",
+    "public/pw_rpc/internal/base_client_call.h",
+  ]
+  sources = [
+    "base_client_call.cc",
+    "client.cc",
+  ]
+}
+
+# Classes shared by the server and client.
 pw_source_set("common") {
   public_configs = [ ":default_config" ]
   public_deps = [
-    ":protos_pwpb",
+    ":protos.pwpb",
+    "$dir_pw_containers:intrusive_list",
     dir_pw_assert,
-    dir_pw_log,
+    dir_pw_bytes,
     dir_pw_span,
     dir_pw_status,
   ]
+  deps = [ dir_pw_log ]
   public = [ "public/pw_rpc/channel.h" ]
   sources = [
     "channel.cc",
     "packet.cc",
-    "public/pw_rpc/internal/base_method.h",
-    "public/pw_rpc/internal/call.h",
     "public/pw_rpc/internal/channel.h",
-    "public/pw_rpc/internal/hash.h",
+    "public/pw_rpc/internal/method_type.h",
     "public/pw_rpc/internal/packet.h",
   ]
   friend = [ "./*" ]
+}
+
+pw_source_set("service_method_traits") {
+  public = [ "public/pw_rpc/internal/service_method_traits.h" ]
+  public_deps = [ ":server" ]
   visibility = [ "./*" ]
 }
 
-# RPC server that uses Nanopb to encode and decode protobufs. RPCs use Nanopb
-# structs as their requests and responses.
-_pw_rpc_server_library("nanopb_server") {
-  implementation = "nanopb"
+pw_source_set("test_utils") {
+  public = [
+    "public/pw_rpc/internal/test_method.h",
+    "pw_rpc_private/internal_test_utils.h",
+  ]
+  public_configs = [ ":private_includes" ]
+  public_deps = [
+    ":client",
+    ":server",
+    dir_pw_span,
+  ]
+  visibility = [ "./*" ]
 }
 
 config("private_includes") {
@@ -142,16 +120,6 @@
   inputs = [ "pw_rpc_protos/echo.options" ]
 }
 
-# Source files for pw_protobuf's protoc plugin.
-pw_input_group("nanopb_protoc_plugin") {
-  inputs = [
-    "py/pw_rpc/codegen_nanopb.py",
-    "py/pw_rpc/plugin.py",
-    "py/pw_rpc/ids.py",
-  ]
-  deps = [ "$dir_pw_protobuf:codegen_protoc_lib" ]
-}
-
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
   inputs = [
@@ -159,58 +127,124 @@
     "pw_rpc_protos/packet.proto",
   ]
   group_deps = [ "nanopb:docs" ]
+  report_deps = [ ":server_size" ]
+}
+
+pw_size_report("server_size") {
+  title = "Pigweed RPC server size report"
+
+  binaries = [
+    {
+      target = "size_report:server_only"
+      base = "size_report:base"
+      label = "Server by itself"
+    },
+  ]
+
+  if (dir_pw_third_party_nanopb != "") {
+    binaries += [
+      {
+        target = "size_report:server_with_echo_service"
+        base = "size_report:base_with_nanopb"
+        label = "Server with a registered nanopb EchoService"
+      },
+    ]
+  }
 }
 
 pw_test_group("tests") {
   tests = [
+    ":base_client_call_test",
     ":base_server_writer_test",
     ":channel_test",
+    ":client_test",
+    ":ids_test",
     ":packet_test",
     ":server_test",
+    ":service_test",
   ]
-  group_deps = [ "nanopb:tests" ]
-}
-
-# RPC server for tests only. A mock method implementation is used.
-_pw_rpc_server_library("test_server") {
-  implementation = "test_impl"
-  visibility = [ ":*" ]
+  group_deps = [
+    "nanopb:tests",
+    "raw:tests",
+  ]
 }
 
 pw_proto_library("test_protos") {
   sources = [ "pw_rpc_test_protos/test.proto" ]
+  inputs = [ "pw_rpc_test_protos/test.options" ]
   visibility = [ "./*" ]
 }
 
 pw_test("base_server_writer_test") {
   deps = [
-    ":internal_test_utils_test_server",
-    ":test_server",
+    ":server",
+    ":test_utils",
   ]
   sources = [ "base_server_writer_test.cc" ]
 }
 
 pw_test("channel_test") {
   deps = [
-    ":common",
-    ":internal_test_utils_test_server",
+    ":server",
+    ":test_utils",
   ]
   sources = [ "channel_test.cc" ]
 }
 
+action("generate_ids_test") {
+  outputs = [ "$target_gen_dir/generated_ids_test.cc" ]
+  script = "py/ids_test.py"
+  args = [ "--generate-cc-test" ] + rebase_path(outputs)
+  deps = [ "$dir_pw_build/py" ]
+}
+
+pw_test("ids_test") {
+  deps = [
+    ":generate_ids_test",
+    ":server",
+  ]
+  sources = get_target_outputs(":generate_ids_test")
+}
+
 pw_test("packet_test") {
   deps = [
-    ":common",
+    ":server",
+    dir_pw_bytes,
     dir_pw_protobuf,
   ]
   sources = [ "packet_test.cc" ]
 }
 
+pw_test("service_test") {
+  deps = [
+    ":protos.pwpb",
+    ":server",
+    dir_pw_assert,
+  ]
+  sources = [ "service_test.cc" ]
+}
+
+pw_test("client_test") {
+  deps = [
+    ":client",
+    ":test_utils",
+  ]
+  sources = [ "client_test.cc" ]
+}
+
+pw_test("base_client_call_test") {
+  deps = [
+    ":client",
+    ":test_utils",
+  ]
+  sources = [ "base_client_call_test.cc" ]
+}
+
 pw_test("server_test") {
   deps = [
-    ":internal_test_utils_test_server",
-    ":protos_pwpb",
-    ":test_server",
+    ":protos.pwpb",
+    ":server",
+    ":test_utils",
     dir_pw_assert,
   ]
   sources = [ "server_test.cc" ]
diff --git a/pw_rpc/CMakeLists.txt b/pw_rpc/CMakeLists.txt
index 2d62392..bbaf74b 100644
--- a/pw_rpc/CMakeLists.txt
+++ b/pw_rpc/CMakeLists.txt
@@ -12,8 +12,71 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-pw_auto_add_simple_module(pw_rpc
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+include($ENV{PW_ROOT}/pw_protobuf_compiler/proto.cmake)
+
+if(NOT "${dir_pw_third_party_nanopb}" STREQUAL "")
+  add_subdirectory(nanopb)
+endif()
+
+add_subdirectory(raw)
+
+pw_add_module_library(pw_rpc.server
+  SOURCES
+    base_server_writer.cc
+    server.cc
+    service.cc
+  PUBLIC_DEPS
+    pw_rpc.common
+  PRIVATE_DEPS
+    pw_log
+)
+
+pw_add_module_library(pw_rpc.client
+  SOURCES
+    base_client_call.cc
+    client.cc
+  PUBLIC_DEPS
+    pw_rpc.common
+  PRIVATE_DEPS
+    pw_log
+)
+
+pw_add_module_library(pw_rpc.common
+  SOURCES
+    channel.cc
+    packet.cc
   PUBLIC_DEPS
     pw_assert
+    pw_bytes
+    pw_containers
     pw_span
+    pw_status
+    pw_rpc.protos.pwpb
+  PRIVATE_DEPS
+    pw_log
+)
+
+add_library(pw_rpc.test_utils INTERFACE)
+target_include_directories(pw_rpc.test_utils INTERFACE .)
+
+pw_proto_library(pw_rpc.protos
+  SOURCES
+    pw_rpc_protos/packet.proto
+)
+
+pw_proto_library(pw_rpc.echo_proto
+  SOURCES
+    pw_rpc_protos/echo.proto
+)
+
+pw_proto_library(pw_rpc.test_protos
+  SOURCES
+    pw_rpc_test_protos/test.proto
+)
+
+pw_auto_add_module_tests(pw_rpc
+  PRIVATE_DEPS
+    pw_rpc.client
+    pw_rpc.server
 )
diff --git a/pw_rpc/base_client_call.cc b/pw_rpc/base_client_call.cc
new file mode 100644
index 0000000..ec2a3d3
--- /dev/null
+++ b/pw_rpc/base_client_call.cc
@@ -0,0 +1,59 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_rpc/internal/base_client_call.h"
+
+#include "pw_rpc/client.h"
+
+namespace pw::rpc::internal {
+
+void BaseClientCall::Cancel() {
+  if (active()) {
+    channel_->Send(NewPacket(PacketType::CANCEL_SERVER_STREAM));
+  }
+}
+
+std::span<std::byte> BaseClientCall::AcquirePayloadBuffer() {
+  if (!active()) {
+    return {};
+  }
+
+  request_ = channel_->AcquireBuffer();
+  return request_.payload(NewPacket(PacketType::REQUEST));
+}
+
+Status BaseClientCall::ReleasePayloadBuffer(
+    std::span<const std::byte> payload) {
+  if (!active()) {
+    return Status::FailedPrecondition();
+  }
+
+  return channel_->Send(request_, NewPacket(PacketType::REQUEST, payload));
+}
+
+Packet BaseClientCall::NewPacket(PacketType type,
+                                 std::span<const std::byte> payload) const {
+  return Packet(type, channel_->id(), service_id_, method_id_, payload);
+}
+
+void BaseClientCall::Register() { channel_->client()->RegisterCall(*this); }
+
+void BaseClientCall::Unregister() {
+  if (active()) {
+    channel_->client()->RemoveCall(*this);
+    active_ = false;
+  }
+}
+
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/base_client_call_test.cc b/pw_rpc/base_client_call_test.cc
new file mode 100644
index 0000000..44a3243
--- /dev/null
+++ b/pw_rpc/base_client_call_test.cc
@@ -0,0 +1,72 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_rpc/internal/base_client_call.h"
+
+#include "gtest/gtest.h"
+#include "pw_rpc_private/internal_test_utils.h"
+
+namespace pw::rpc::internal {
+namespace {
+
+TEST(BaseClientCall, RegistersAndRemovesItselfFromClient) {
+  ClientContextForTest context;
+  EXPECT_EQ(context.client().active_calls(), 0u);
+
+  {
+    BaseClientCall call(&context.channel(),
+                        context.kServiceId,
+                        context.kMethodId,
+                        [](BaseClientCall&, const Packet&) {});
+    EXPECT_EQ(context.client().active_calls(), 1u);
+  }
+
+  EXPECT_EQ(context.client().active_calls(), 0u);
+}
+
+class FakeClientCall : public BaseClientCall {
+ public:
+  constexpr FakeClientCall(rpc::Channel* channel,
+                           uint32_t service_id,
+                           uint32_t method_id,
+                           ResponseHandler handler)
+      : BaseClientCall(channel, service_id, method_id, handler) {}
+
+  Status SendPacket(std::span<const std::byte> payload) {
+    std::span buffer = AcquirePayloadBuffer();
+    std::memcpy(buffer.data(), payload.data(), payload.size());
+    return ReleasePayloadBuffer(buffer.first(payload.size()));
+  }
+};
+
+TEST(BaseClientCall, SendsPacketWithPayload) {
+  ClientContextForTest context;
+  FakeClientCall call(&context.channel(),
+                      context.kServiceId,
+                      context.kMethodId,
+                      [](BaseClientCall&, const Packet&) {});
+
+  constexpr std::byte payload[]{std::byte{0x08}, std::byte{0x39}};
+  call.SendPacket(payload);
+
+  EXPECT_EQ(context.output().packet_count(), 1u);
+  Packet packet = context.output().sent_packet();
+  EXPECT_EQ(packet.channel_id(), context.channel().id());
+  EXPECT_EQ(packet.service_id(), context.kServiceId);
+  EXPECT_EQ(packet.method_id(), context.kMethodId);
+  EXPECT_EQ(std::memcmp(packet.payload().data(), payload, sizeof(payload)), 0);
+}
+
+}  // namespace
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/base_server_writer.cc b/pw_rpc/base_server_writer.cc
index 6ddd136..1d9ea4d 100644
--- a/pw_rpc/base_server_writer.cc
+++ b/pw_rpc/base_server_writer.cc
@@ -50,11 +50,16 @@
     return;
   }
 
-  call_.server().RemoveWriter(*this);
-  state_ = kClosed;
+  // If the ServerWriter implementer or user forgets to release an acquired
+  // buffer before finishing, release it here.
+  if (!response_.empty()) {
+    ReleasePayloadBuffer();
+  }
+
+  Close();
 
   // Send a control packet indicating that the stream (and RPC) has terminated.
-  call_.channel().Send(Packet(PacketType::STREAM_END,
+  call_.channel().Send(Packet(PacketType::SERVER_STREAM_END,
                               call_.channel().id(),
                               call_.service().id(),
                               method().id(),
@@ -67,20 +72,43 @@
     return {};
   }
 
-  response_ = call_.channel().AcquireBuffer();
-  return response_.payload(RpcPacket());
+  // Only allow having one active buffer at a time.
+  if (response_.empty()) {
+    response_ = call_.channel().AcquireBuffer();
+  }
+
+  return response_.payload(ResponsePacket());
 }
 
 Status BaseServerWriter::ReleasePayloadBuffer(
     std::span<const std::byte> payload) {
   if (!open()) {
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
-  return call_.channel().Send(response_, RpcPacket(payload));
+  return call_.channel().Send(response_, ResponsePacket(payload));
 }
 
-Packet BaseServerWriter::RpcPacket(std::span<const std::byte> payload) const {
-  return Packet(PacketType::RPC,
+Status BaseServerWriter::ReleasePayloadBuffer() {
+  if (!open()) {
+    return Status::FailedPrecondition();
+  }
+
+  call_.channel().Release(response_);
+  return Status::Ok();
+}
+
+void BaseServerWriter::Close() {
+  if (!open()) {
+    return;
+  }
+
+  call_.server().RemoveWriter(*this);
+  state_ = kClosed;
+}
+
+Packet BaseServerWriter::ResponsePacket(
+    std::span<const std::byte> payload) const {
+  return Packet(PacketType::RESPONSE,
                 call_.channel().id(),
                 call_.service().id(),
                 method().id(),
diff --git a/pw_rpc/base_server_writer_test.cc b/pw_rpc/base_server_writer_test.cc
index e8999dc..d5767df 100644
--- a/pw_rpc/base_server_writer_test.cc
+++ b/pw_rpc/base_server_writer_test.cc
@@ -15,10 +15,12 @@
 #include "pw_rpc/internal/base_server_writer.h"
 
 #include <algorithm>
+#include <array>
 #include <cstdint>
 #include <cstring>
 
 #include "gtest/gtest.h"
+#include "pw_rpc/internal/test_method.h"
 #include "pw_rpc/server_context.h"
 #include "pw_rpc/service.h"
 #include "pw_rpc_private/internal_test_utils.h"
@@ -27,10 +29,9 @@
 
 class TestService : public Service {
  public:
-  constexpr TestService(uint32_t id)
-      : Service(id, std::span(&method, 1)), method(8) {}
+  constexpr TestService(uint32_t id) : Service(id, method) {}
 
-  internal::Method method;
+  static constexpr internal::TestMethodUnion method = internal::TestMethod(8);
 };
 
 namespace internal {
@@ -39,7 +40,7 @@
 using std::byte;
 
 TEST(BaseServerWriter, ConstructWithContext_StartsOpen) {
-  ServerContextForTest<TestService> context;
+  ServerContextForTest<TestService> context(TestService::method.method());
 
   BaseServerWriter writer(context.get());
 
@@ -47,7 +48,7 @@
 }
 
 TEST(BaseServerWriter, Move_ClosesOriginal) {
-  ServerContextForTest<TestService> context;
+  ServerContextForTest<TestService> context(TestService::method.method());
 
   BaseServerWriter moved(context.get());
   BaseServerWriter writer(std::move(moved));
@@ -69,6 +70,9 @@
                 std::min(buffer.size(), response.size()));
     return ReleasePayloadBuffer(buffer.first(response.size()));
   }
+
+  ByteSpan PayloadBuffer() { return AcquirePayloadBuffer(); }
+  const Channel::OutputBuffer& output_buffer() { return buffer(); }
 };
 
 TEST(ServerWriter, DefaultConstruct_Closed) {
@@ -78,7 +82,7 @@
 }
 
 TEST(ServerWriter, Construct_RegistersWithServer) {
-  ServerContextForTest<TestService> context;
+  ServerContextForTest<TestService> context(TestService::method.method());
   FakeServerWriter writer(context.get());
 
   auto& writers = context.server().writers();
@@ -89,7 +93,7 @@
 }
 
 TEST(ServerWriter, Destruct_RemovesFromServer) {
-  ServerContextForTest<TestService> context;
+  ServerContextForTest<TestService> context(TestService::method.method());
   { FakeServerWriter writer(context.get()); }
 
   auto& writers = context.server().writers();
@@ -97,7 +101,7 @@
 }
 
 TEST(ServerWriter, Finish_RemovesFromServer) {
-  ServerContextForTest<TestService> context;
+  ServerContextForTest<TestService> context(TestService::method.method());
   FakeServerWriter writer(context.get());
 
   writer.Finish();
@@ -107,22 +111,22 @@
 }
 
 TEST(ServerWriter, Finish_SendsCancellationPacket) {
-  ServerContextForTest<TestService> context;
+  ServerContextForTest<TestService> context(TestService::method.method());
   FakeServerWriter writer(context.get());
 
   writer.Finish();
 
   const Packet& packet = context.output().sent_packet();
-  EXPECT_EQ(packet.type(), PacketType::STREAM_END);
+  EXPECT_EQ(packet.type(), PacketType::SERVER_STREAM_END);
   EXPECT_EQ(packet.channel_id(), context.kChannelId);
   EXPECT_EQ(packet.service_id(), context.kServiceId);
   EXPECT_EQ(packet.method_id(), context.get().method().id());
   EXPECT_TRUE(packet.payload().empty());
-  EXPECT_EQ(packet.status(), Status::OK);
+  EXPECT_EQ(packet.status(), Status::Ok());
 }
 
 TEST(ServerWriter, Close) {
-  ServerContextForTest<TestService> context;
+  ServerContextForTest<TestService> context(TestService::method.method());
   FakeServerWriter writer(context.get());
 
   ASSERT_TRUE(writer.open());
@@ -130,30 +134,45 @@
   EXPECT_FALSE(writer.open());
 }
 
+TEST(ServerWriter, Close_ReleasesBuffer) {
+  ServerContextForTest<TestService> context(TestService::method.method());
+  FakeServerWriter writer(context.get());
+
+  ASSERT_TRUE(writer.open());
+  auto buffer = writer.PayloadBuffer();
+  buffer[0] = std::byte{0};
+  EXPECT_FALSE(writer.output_buffer().empty());
+  writer.Finish();
+  EXPECT_FALSE(writer.open());
+  EXPECT_TRUE(writer.output_buffer().empty());
+}
+
 TEST(ServerWriter, Open_SendsPacketWithPayload) {
-  ServerContextForTest<TestService> context;
+  ServerContextForTest<TestService> context(TestService::method.method());
   FakeServerWriter writer(context.get());
 
   constexpr byte data[] = {byte{0xf0}, byte{0x0d}};
-  ASSERT_EQ(Status::OK, writer.Write(data));
+  ASSERT_EQ(Status::Ok(), writer.Write(data));
 
   byte encoded[64];
-  auto sws = context.packet(data).Encode(encoded);
-  ASSERT_EQ(Status::OK, sws.status());
+  auto result = context.packet(data).Encode(encoded);
+  ASSERT_EQ(Status::Ok(), result.status());
 
-  EXPECT_EQ(sws.size(), context.output().sent_data().size());
+  EXPECT_EQ(result.value().size(), context.output().sent_data().size());
   EXPECT_EQ(
-      0, std::memcmp(encoded, context.output().sent_data().data(), sws.size()));
+      0,
+      std::memcmp(
+          encoded, context.output().sent_data().data(), result.value().size()));
 }
 
 TEST(ServerWriter, Closed_IgnoresPacket) {
-  ServerContextForTest<TestService> context;
+  ServerContextForTest<TestService> context(TestService::method.method());
   FakeServerWriter writer(context.get());
 
   writer.Finish();
 
   constexpr byte data[] = {byte{0xf0}, byte{0x0d}};
-  EXPECT_EQ(Status::FAILED_PRECONDITION, writer.Write(data));
+  EXPECT_EQ(Status::FailedPrecondition(), writer.Write(data));
 }
 
 }  // namespace
diff --git a/pw_rpc/channel.cc b/pw_rpc/channel.cc
index b34ebf2..fb5eab2 100644
--- a/pw_rpc/channel.cc
+++ b/pw_rpc/channel.cc
@@ -28,17 +28,16 @@
 }
 
 Status Channel::Send(OutputBuffer& buffer, const internal::Packet& packet) {
-  StatusWithSize encoded = packet.Encode(buffer.buffer_);
+  Result encoded = packet.Encode(buffer.buffer_);
   buffer.buffer_ = {};
 
   if (!encoded.ok()) {
     PW_LOG_ERROR("Failed to encode response packet to channel buffer");
     output().SendAndReleaseBuffer(0);
-    return Status::INTERNAL;
+    return Status::Internal();
   }
 
-  output().SendAndReleaseBuffer(encoded.size());
-  return Status::OK;
+  return output().SendAndReleaseBuffer(encoded.value().size());
 }
 
 }  // namespace pw::rpc::internal
diff --git a/pw_rpc/channel_test.cc b/pw_rpc/channel_test.cc
index 74e691a..cdddc14 100644
--- a/pw_rpc/channel_test.cc
+++ b/pw_rpc/channel_test.cc
@@ -28,14 +28,14 @@
    public:
     NameTester(const char* name) : ChannelOutput(name) {}
     std::span<std::byte> AcquireBuffer() override { return {}; }
-    void SendAndReleaseBuffer(size_t) override {}
+    Status SendAndReleaseBuffer(size_t) override { return Status::Ok(); }
   };
 
   EXPECT_STREQ("hello_world", NameTester("hello_world").name());
   EXPECT_EQ(nullptr, NameTester(nullptr).name());
 }
 
-constexpr Packet kTestPacket(PacketType::RPC, 1, 42, 100);
+constexpr Packet kTestPacket(PacketType::RESPONSE, 1, 42, 100);
 const size_t kReservedSize = 2 /* type */ + 2 /* channel */ + 5 /* service */ +
                              5 /* method */ + 2 /* payload key */ +
                              2 /* status */;
@@ -59,7 +59,7 @@
   Channel::OutputBuffer output_buffer = channel.AcquireBuffer();
   EXPECT_TRUE(output_buffer.payload(kTestPacket).empty());
 
-  EXPECT_EQ(Status::INTERNAL, channel.Send(output_buffer, kTestPacket));
+  EXPECT_EQ(Status::Internal(), channel.Send(output_buffer, kTestPacket));
 }
 
 TEST(Channel, OutputBuffer_ExactFit) {
@@ -72,7 +72,7 @@
   EXPECT_EQ(payload.size(), output.buffer().size() - kReservedSize);
   EXPECT_EQ(output.buffer().data() + kReservedSize, payload.data());
 
-  EXPECT_EQ(Status::OK, channel.Send(output_buffer, kTestPacket));
+  EXPECT_EQ(Status::Ok(), channel.Send(output_buffer, kTestPacket));
 }
 
 TEST(Channel, OutputBuffer_PayloadDoesNotFit_ReportsError) {
@@ -83,7 +83,7 @@
   byte data[1] = {};
   packet.set_payload(data);
 
-  EXPECT_EQ(Status::INTERNAL, channel.Send(packet));
+  EXPECT_EQ(Status::Internal(), channel.Send(packet));
 }
 
 TEST(Channel, OutputBuffer_ExtraRoom) {
@@ -96,7 +96,17 @@
   EXPECT_EQ(payload.size(), output.buffer().size() - kReservedSize);
   EXPECT_EQ(output.buffer().data() + kReservedSize, payload.data());
 
-  EXPECT_EQ(Status::OK, channel.Send(output_buffer, kTestPacket));
+  EXPECT_EQ(Status::Ok(), channel.Send(output_buffer, kTestPacket));
+}
+
+TEST(Channel, OutputBuffer_ReturnsStatusFromChannelOutputSend) {
+  TestOutput<kReservedSize * 3> output;
+  internal::Channel channel(100, &output);
+
+  Channel::OutputBuffer output_buffer = channel.AcquireBuffer();
+  output.set_send_status(Status::Aborted());
+
+  EXPECT_EQ(Status::Aborted(), channel.Send(output_buffer, kTestPacket));
 }
 
 }  // namespace
diff --git a/pw_rpc/client.cc b/pw_rpc/client.cc
new file mode 100644
index 0000000..07c26ee
--- /dev/null
+++ b/pw_rpc/client.cc
@@ -0,0 +1,101 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_rpc/client.h"
+
+#include "pw_log/log.h"
+#include "pw_rpc/internal/packet.h"
+
+namespace pw::rpc {
+namespace {
+
+using internal::BaseClientCall;
+using internal::Packet;
+using internal::PacketType;
+
+}  // namespace
+
+Status Client::ProcessPacket(ConstByteSpan data) {
+  Result<Packet> result = Packet::FromBuffer(data);
+  if (!result.ok()) {
+    PW_LOG_WARN("RPC client failed to decode incoming packet");
+    return Status::DataLoss();
+  }
+
+  Packet& packet = result.value();
+
+  if (packet.destination() != Packet::kClient) {
+    return Status::InvalidArgument();
+  }
+
+  if (packet.channel_id() == Channel::kUnassignedChannelId ||
+      packet.service_id() == 0 || packet.method_id() == 0) {
+    PW_LOG_WARN("RPC client received a malformed packet");
+    return Status::DataLoss();
+  }
+
+  auto call = std::find_if(calls_.begin(), calls_.end(), [&](auto& c) {
+    return c.channel().id() == packet.channel_id() &&
+           c.service_id() == packet.service_id() &&
+           c.method_id() == packet.method_id();
+  });
+
+  auto channel = std::find_if(channels_.begin(), channels_.end(), [&](auto& c) {
+    return c.id() == packet.channel_id();
+  });
+
+  if (channel == channels_.end()) {
+    PW_LOG_WARN("RPC client received a packet for an unregistered channel");
+    return Status::NotFound();
+  }
+
+  if (call == calls_.end()) {
+    PW_LOG_WARN("RPC client received a packet for a request it did not make");
+    channel->Send(Packet::ClientError(packet, Status::FailedPrecondition()));
+    return Status::NotFound();
+  }
+
+  switch (packet.type()) {
+    case PacketType::RESPONSE:
+    case PacketType::SERVER_ERROR:
+      call->HandleResponse(packet);
+      break;
+    case PacketType::SERVER_STREAM_END:
+      call->HandleResponse(packet);
+      RemoveCall(*call);
+      break;
+    default:
+      return Status::Unimplemented();
+  }
+
+  return Status::Ok();
+}
+
+Status Client::RegisterCall(BaseClientCall& call) {
+  auto existing_call = std::find_if(calls_.begin(), calls_.end(), [&](auto& c) {
+    return c.channel().id() == call.channel().id() &&
+           c.service_id() == call.service_id() &&
+           c.method_id() == call.method_id();
+  });
+  if (existing_call != calls_.end()) {
+    PW_LOG_WARN(
+        "RPC client tried to call same method multiple times; aborting.");
+    return Status::FailedPrecondition();
+  }
+
+  calls_.push_front(call);
+  return Status::Ok();
+}
+
+}  // namespace pw::rpc
diff --git a/pw_rpc/client_test.cc b/pw_rpc/client_test.cc
new file mode 100644
index 0000000..3174cfc
--- /dev/null
+++ b/pw_rpc/client_test.cc
@@ -0,0 +1,88 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_rpc/client.h"
+
+#include "gtest/gtest.h"
+#include "pw_rpc/internal/packet.h"
+#include "pw_rpc_private/internal_test_utils.h"
+
+namespace pw::rpc {
+namespace {
+
+using internal::BaseClientCall;
+using internal::Packet;
+using internal::PacketType;
+
+class TestClientCall : public BaseClientCall {
+ public:
+  constexpr TestClientCall(Channel* channel,
+                           uint32_t service_id,
+                           uint32_t method_id)
+      : BaseClientCall(channel, service_id, method_id, ProcessPacket) {}
+
+  static void ProcessPacket(BaseClientCall& call, const Packet& packet) {
+    static_cast<TestClientCall&>(call).HandlePacket(packet);
+  }
+
+  void HandlePacket(const Packet&) { invoked_ = true; }
+
+  constexpr bool invoked() const { return invoked_; }
+
+ private:
+  bool invoked_ = false;
+};
+
+TEST(Client, ProcessPacket_InvokesARegisteredClientCall) {
+  ClientContextForTest context;
+
+  TestClientCall call(
+      &context.channel(), context.kServiceId, context.kMethodId);
+  EXPECT_EQ(context.SendResponse(Status::Ok(), {}), Status::Ok());
+
+  EXPECT_TRUE(call.invoked());
+}
+
+TEST(Client, ProcessPacket_SendsClientErrorOnUnregisteredCall) {
+  ClientContextForTest context;
+
+  EXPECT_EQ(context.SendResponse(Status::OK, {}), Status::NotFound());
+
+  ASSERT_EQ(context.output().packet_count(), 1u);
+  const Packet& packet = context.output().sent_packet();
+  EXPECT_EQ(packet.type(), PacketType::CLIENT_ERROR);
+  EXPECT_EQ(packet.channel_id(), context.kChannelId);
+  EXPECT_EQ(packet.service_id(), context.kServiceId);
+  EXPECT_EQ(packet.method_id(), context.kMethodId);
+  EXPECT_TRUE(packet.payload().empty());
+  EXPECT_EQ(packet.status(), Status::FailedPrecondition());
+}
+
+TEST(Client, ProcessPacket_ReturnsDataLossOnBadPacket) {
+  ClientContextForTest context;
+
+  constexpr std::byte bad_packet[]{
+      std::byte{0xab}, std::byte{0xcd}, std::byte{0xef}};
+  EXPECT_EQ(context.client().ProcessPacket(bad_packet), Status::DataLoss());
+}
+
+TEST(Client, ProcessPacket_ReturnsInvalidArgumentOnServerPacket) {
+  ClientContextForTest context;
+  EXPECT_EQ(context.SendPacket(PacketType::REQUEST), Status::InvalidArgument());
+  EXPECT_EQ(context.SendPacket(PacketType::CANCEL_SERVER_STREAM),
+            Status::InvalidArgument());
+}
+
+}  // namespace
+}  // namespace pw::rpc
diff --git a/pw_rpc/docs.rst b/pw_rpc/docs.rst
index 09351b7..2e62c31 100644
--- a/pw_rpc/docs.rst
+++ b/pw_rpc/docs.rst
@@ -1,8 +1,4 @@
-.. default-domain:: cpp
-
-.. highlight:: sh
-
-.. _chapter-pw-rpc:
+.. _module-pw_rpc:
 
 ------
 pw_rpc
@@ -10,9 +6,151 @@
 The ``pw_rpc`` module provides a system for defining and invoking remote
 procedure calls (RPCs) on a device.
 
+.. admonition:: Try it out!
+
+  For a quick intro to ``pw_rpc``, see the
+  :ref:`module-pw_hdlc_lite-rpc-example` in the :ref:`module-pw_hdlc_lite`
+  module.
+
 .. attention::
 
-  Under construction.
+  This documentation is under construction.
+
+Creating an RPC
+===============
+
+1. RPC service declaration
+--------------------------
+Pigweed RPCs are declared in a protocol buffer service definition.
+
+* `Protocol Buffer service documentation
+  <https://developers.google.com/protocol-buffers/docs/proto3#services>`_
+* `gRPC service definition documentation
+  <https://grpc.io/docs/what-is-grpc/core-concepts/#service-definition>`_
+
+.. code-block:: protobuf
+
+  syntax = "proto3";
+
+  package foo.bar;
+
+  message Request {}
+
+  message Response {
+    int32 number = 1;
+  }
+
+  service TheService {
+    rpc MethodOne(Request) returns (Response) {}
+    rpc MethodTwo(Request) returns (stream Response) {}
+  }
+
+This protocol buffer is declared in a ``BUILD.gn`` file as follows:
+
+.. code-block:: python
+
+  import("//build_overrides/pigweed.gni")
+  import("$dir_pw_protobuf_compiler/proto.gni")
+
+  pw_proto_library("the_service_proto") {
+    sources = [ "foo_bar/the_service.proto" ]
+  }
+
+2. RPC service definition
+-------------------------
+``pw_rpc`` generates a C++ base class for each RPC service declared in a .proto
+file. The serivce class is implemented by inheriting from this generated base
+and defining a method for each RPC.
+
+A service named ``TheService`` in package ``foo.bar`` will generate the
+following class:
+
+.. cpp:class:: template <typename Implementation> foo::bar::generated::TheService
+
+A Nanopb implementation of this service would be as follows:
+
+.. code-block:: cpp
+
+  namespace foo::bar {
+
+  class TheService : public generated::TheService<TheService> {
+   public:
+    pw::Status MethodOne(ServerContext& ctx,
+                         const foo_bar_Request& request,
+                         foo_bar_Response& response) {
+      // implementation
+      return pw::Status::OK;
+    }
+
+    void MethodTwo(ServerContext& ctx,
+                   const foo_bar_Request& request,
+                   ServerWriter<foo_bar_Response>& response) {
+      // implementation
+      response.Write(foo_bar_Response{.number = 123});
+    }
+  };
+
+  }  // namespace foo::bar
+
+The Nanopb implementation would be declared in a ``BUILD.gn``:
+
+.. code-block:: python
+
+  import("//build_overrides/pigweed.gni")
+
+  import("$dir_pw_build/target_types.gni")
+
+  pw_source_set("the_service") {
+    public_configs = [ ":public" ]
+    public = [ "public/foo_bar/service.h" ]
+    public_deps = [ ":the_service_proto_nanopb_rpc" ]
+  }
+
+.. attention::
+
+  pw_rpc's generated classes will support using ``pw_protobuf`` or raw buffers
+  (no protobuf library) in the future.
+
+3. Register the service with a server
+-------------------------------------
+This example code sets up an RPC server with an :ref:`HDLC<module-pw_hdlc_lite>`
+channel output and the example service.
+
+.. code-block:: cpp
+
+  // Set up the output channel for the pw_rpc server to use. This configures the
+  // pw_rpc server to use HDLC over UART; projects not using UART and HDLC must
+  // adapt this as necessary.
+  pw::stream::SysIoWriter writer;
+  pw::rpc::RpcChannelOutput<kMaxTransmissionUnit> hdlc_channel_output(
+      writer, pw::hdlc_lite::kDefaultRpcAddress, "HDLC output");
+
+  pw::rpc::Channel channels[] = {
+      pw::rpc::Channel::Create<1>(&hdlc_channel_output)};
+
+  // Declare the pw_rpc server with the HDLC channel.
+  pw::rpc::Server server(channels);
+
+  pw::rpc::TheService the_service;
+
+  void RegisterServices() {
+    // Register the foo.bar.TheService example service.
+    server.Register(the_service);
+
+    // Register other services
+  }
+
+  int main() {
+    // Set up the server.
+    RegisterServices();
+
+    // Declare a buffer for decoding incoming HDLC frames.
+    std::array<std::byte, kMaxTransmissionUnit> input_buffer;
+
+    PW_LOG_INFO("Starting pw_rpc server");
+    pw::hdlc_lite::ReadAndProcessPackets(
+        server, hdlc_channel_output, input_buffer);
+  }
 
 Services
 ========
@@ -23,6 +161,11 @@
 ``pw_rpc`` supports multiple protobuf libraries, and the generated code API
 depends on which is used.
 
+.. _module-pw_rpc-protobuf-library-apis:
+
+Protobuf library APIs
+=====================
+
 .. toctree::
   :maxdepth: 1
 
@@ -73,63 +216,111 @@
   :lines: 14-
 
 The packet type and RPC type determine which fields are present in a Pigweed RPC
-packet. This table describes the meaning of and fields included with each packet
-type when sent from client to server and server to client.
+packet. Each packet type is only sent by either the client or the server.
+These tables describe the meaning of and fields included with each packet type.
 
-+-------------+----------------------------------+--------------------------------+
-| packet type |         client-to-server         |         server-to-client       |
-+=============+==================================+================================+
-| RPC         | RPC request                      | RPC response                   |
-|             |                                  |                                |
-|             | .. code-block:: text             | .. code-block:: text           |
-|             |                                  |                                |
-|             |   - channel_id                   |   - channel_id                 |
-|             |   - service_id                   |   - service_id                 |
-|             |   - method_id                    |   - method_id                  |
-|             |   - payload                      |   - payload                    |
-|             |     (unless first client stream) |   - status                     |
-|             |                                  |     (unless in server stream)  |
-|             |                                  |                                |
-+-------------+----------------------------------+--------------------------------+
-| STREAM_END  | Client stream finished           | Server stream and RPC finished |
-|             |                                  |                                |
-|             | .. code-block:: text             | .. code-block:: text           |
-|             |                                  |                                |
-|             |   - channel_id                   |   - channel_id                 |
-|             |   - service_id                   |   - service_id                 |
-|             |   - method_id                    |   - method_id                  |
-|             |                                  |   - status                     |
-|             |                                  |                                |
-+-------------+----------------------------------+--------------------------------+
-| CANCEL      | Cancel server stream             | (not used)                     |
-|             |                                  |                                |
-|             | .. code-block:: text             |                                |
-|             |                                  |                                |
-|             |   - channel_id                   |                                |
-|             |   - service_id                   |                                |
-|             |   - method_id                    |                                |
-|             |                                  |                                |
-+-------------+----------------------------------+--------------------------------+
-| ERROR       | (not used)                       | Unexpected or malformed packet |
-|             |                                  |                                |
-|             |                                  | .. code-block:: text           |
-|             |                                  |                                |
-|             |                                  |   - channel_id                 |
-|             |                                  |   - service_id (if relevant)   |
-|             |                                  |   - method_id (if relevant)    |
-|             |                                  |   - status                     |
-|             |                                  |                                |
-+-------------+----------------------------------+--------------------------------+
+Client-to-server packets
+^^^^^^^^^^^^^^^^^^^^^^^^
++---------------------------+----------------------------------+
+| packet type               | description                      |
++===========================+==================================+
+| REQUEST                   | RPC request                      |
+|                           |                                  |
+|                           | .. code-block:: text             |
+|                           |                                  |
+|                           |   - channel_id                   |
+|                           |   - service_id                   |
+|                           |   - method_id                    |
+|                           |   - payload                      |
+|                           |     (unless first client stream) |
+|                           |                                  |
++---------------------------+----------------------------------+
+| CLIENT_STREAM_END         | Client stream finished           |
+|                           |                                  |
+|                           | .. code-block:: text             |
+|                           |                                  |
+|                           |   - channel_id                   |
+|                           |   - service_id                   |
+|                           |   - method_id                    |
+|                           |                                  |
+|                           |                                  |
++---------------------------+----------------------------------+
+| CLIENT_ERROR              | Received unexpected packet       |
+|                           |                                  |
+|                           | .. code-block:: text             |
+|                           |                                  |
+|                           |   - channel_id                   |
+|                           |   - service_id                   |
+|                           |   - method_id                    |
+|                           |   - status                       |
++---------------------------+----------------------------------+
+| CANCEL_SERVER_STREAM      | Cancel a server stream           |
+|                           |                                  |
+|                           | .. code-block:: text             |
+|                           |                                  |
+|                           |   - channel_id                   |
+|                           |   - service_id                   |
+|                           |   - method_id                    |
+|                           |                                  |
++---------------------------+----------------------------------+
 
-Error packets
--------------
-The server sends ``ERROR`` packets when it receives a packet it cannot process.
-The status field indicates the type of error.
+**Errors**
 
-* ``DATA_LOSS`` -- Failed to decode a packet.
-* ``NOT_FOUND`` -- The requested service or method does not exist. In the
-  ``ERROR`` packet, the service ID is always set, but the method ID is only set
-  if the requested service exists.
+The client sends ``CLIENT_ERROR`` packets to a server when it receives a packet
+it did not request. If the RPC is a streaming RPC, the server should abort it.
+
+The status code indicates the type of error. If the client does not distinguish
+between the error types, it can send whichever status is most relevant. The
+status code is logged, but all status codes result in the same action by the
+server: aborting the RPC.
+
+* ``NOT_FOUND`` -- Received a packet for a service method the client does not
+  recognize.
+* ``FAILED_PRECONDITION`` -- Received a packet for a service method that the
+  client did not invoke.
+
+Server-to-client packets
+^^^^^^^^^^^^^^^^^^^^^^^^
++-------------------+--------------------------------+
+| packet type       | description                    |
++===================+================================+
+| RESPONSE          | RPC response                   |
+|                   |                                |
+|                   | .. code-block:: text           |
+|                   |                                |
+|                   |   - channel_id                 |
+|                   |   - service_id                 |
+|                   |   - method_id                  |
+|                   |   - payload                    |
+|                   |   - status                     |
+|                   |     (unless in server stream)  |
++-------------------+--------------------------------+
+| SERVER_STREAM_END | Server stream and RPC finished |
+|                   |                                |
+|                   | .. code-block:: text           |
+|                   |                                |
+|                   |   - channel_id                 |
+|                   |   - service_id                 |
+|                   |   - method_id                  |
+|                   |   - status                     |
++-------------------+--------------------------------+
+| SERVER_ERROR      | Received unexpected packet     |
+|                   |                                |
+|                   | .. code-block:: text           |
+|                   |                                |
+|                   |   - channel_id                 |
+|                   |   - service_id (if relevant)   |
+|                   |   - method_id (if relevant)    |
+|                   |   - status                     |
++-------------------+--------------------------------+
+
+**Errors**
+
+The server sends ``SERVER_ERROR`` packets when it receives a packet it cannot
+process. The client should abort any RPC for which it receives an error. The
+status field indicates the type of error.
+
+* ``NOT_FOUND`` -- The requested service or method does not exist.
 * ``FAILED_PRECONDITION`` -- Attempted to cancel an RPC that is not pending.
 * ``RESOURCE_EXHAUSTED`` -- The request came on a new channel, but a channel
   could not be allocated for it.
@@ -155,19 +346,19 @@
 
     client -> server [
         label = "request",
-        leftnote = "PacketType.RPC\nchannel ID\nservice ID\nmethod ID\npayload"
+        leftnote = "PacketType.REQUEST\nchannel ID\nservice ID\nmethod ID\npayload"
     ];
 
     client <- server [
         label = "response",
-        rightnote = "PacketType.RPC\nchannel ID\nservice ID\nmethod ID\npayload\nstatus"
+        rightnote = "PacketType.RESPONSE\nchannel ID\nservice ID\nmethod ID\npayload\nstatus"
     ];
   }
 
 Server streaming RPC
 ^^^^^^^^^^^^^^^^^^^^
 In a server streaming RPC, the client sends a single request and the server
-sends any number of responses followed by a ``STREAM_END`` packet.
+sends any number of responses followed by a ``SERVER_STREAM_END`` packet.
 
 .. seqdiag::
   :scale: 110
@@ -177,23 +368,23 @@
 
     client -> server [
         label = "request",
-        leftnote = "PacketType.RPC\nchannel ID\nservice ID\nmethod ID\npayload"
+        leftnote = "PacketType.REQUEST\nchannel ID\nservice ID\nmethod ID\npayload"
     ];
 
     client <-- server [
         noactivate,
         label = "responses (zero or more)",
-        rightnote = "PacketType.RPC\nchannel ID\nservice ID\nmethod ID\npayload"
+        rightnote = "PacketType.RESPONSE\nchannel ID\nservice ID\nmethod ID\npayload"
     ];
 
     client <- server [
         label = "done",
-        rightnote = "PacketType.STREAM_END\nchannel ID\nservice ID\nmethod ID\nstatus"
+        rightnote = "PacketType.SERVER_STREAM_END\nchannel ID\nservice ID\nmethod ID\nstatus"
     ];
   }
 
 Server streaming RPCs may be cancelled by the client. The client sends a
-``CANCEL`` packet to terminate the RPC.
+``CANCEL_SERVER_STREAM`` packet to terminate the RPC.
 
 .. seqdiag::
   :scale: 110
@@ -203,31 +394,31 @@
 
     client -> server [
         label = "request",
-        leftnote = "PacketType.RPC\nchannel ID\nservice ID\nmethod ID\npayload"
+        leftnote = "PacketType.REQUEST\nchannel ID\nservice ID\nmethod ID\npayload"
     ];
 
     client <-- server [
         noactivate,
         label = "responses (zero or more)",
-        rightnote = "PacketType.RPC\nchannel ID\nservice ID\nmethod ID\npayload"
+        rightnote = "PacketType.RESPONSE\nchannel ID\nservice ID\nmethod ID\npayload"
     ];
 
     client -> server [
         noactivate,
         label = "cancel",
-        leftnote  = "PacketType.CANCEL\nchannel ID\nservice ID\nmethod ID"
+        leftnote  = "PacketType.CANCEL_SERVER_STREAM\nchannel ID\nservice ID\nmethod ID"
     ];
 
     client <- server [
         label = "done",
-        rightnote = "PacketType.STREAM_END\nchannel ID\nservice ID\nmethod ID\nstatus"
+        rightnote = "PacketType.SERVER_STREAM_END\nchannel ID\nservice ID\nmethod ID\nstatus"
     ];
   }
 
 Client streaming RPC
 ^^^^^^^^^^^^^^^^^^^^
 In a client streaming RPC, the client sends any number of RPC requests followed
-by a ``STREAM_END`` packet. The server then sends a single response.
+by a ``CLIENT_STREAM_END`` packet. The server then sends a single response.
 
 The first client-to-server RPC packet does not include a payload.
 
@@ -243,24 +434,24 @@
 
     client -> server [
         label = "start",
-        leftnote = "PacketType.RPC\nchannel ID\nservice ID\nmethod ID"
+        leftnote = "PacketType.REQUEST\nchannel ID\nservice ID\nmethod ID"
     ];
 
     client --> server [
         noactivate,
         label = "requests (zero or more)",
-        leftnote = "PacketType.RPC\nchannel ID\nservice ID\nmethod ID\npayload"
+        leftnote = "PacketType.REQUEST\nchannel ID\nservice ID\nmethod ID\npayload"
     ];
 
     client -> server [
         noactivate,
         label = "done",
-        leftnote = "PacketType.STREAM_END\nchannel ID\nservice ID\nmethod ID"
+        leftnote = "PacketType.CLIENT_STREAM_END\nchannel ID\nservice ID\nmethod ID"
     ];
 
     client <- server [
         label = "response",
-        rightnote = "PacketType.RPC\nchannel ID\nservice ID\nmethod ID\npayload\nstatus"
+        rightnote = "PacketType.RESPONSE\nchannel ID\nservice ID\nmethod ID\npayload\nstatus"
     ];
   }
 
@@ -275,28 +466,28 @@
 
     client -> server [
         label = "start",
-        leftnote = "PacketType.RPC\nchannel ID\nservice ID\nmethod ID"
+        leftnote = "PacketType.REQUEST\nchannel ID\nservice ID\nmethod ID"
     ];
 
     client --> server [
         noactivate,
         label = "requests (zero or more)",
-        leftnote = "PacketType.RPC\nchannel ID\nservice ID\nmethod ID\npayload"
+        leftnote = "PacketType.REQUEST\nchannel ID\nservice ID\nmethod ID\npayload"
     ];
 
     client <- server [
         label = "response",
-        rightnote = "PacketType.RPC\nchannel ID\nservice ID\nmethod ID\npayload\nstatus"
+        rightnote = "PacketType.RESPONSE\nchannel ID\nservice ID\nmethod ID\npayload\nstatus"
     ];
   }
 
 Bidirectional streaming RPC
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^
 In a bidirectional streaming RPC, the client sends any number of requests and
-the server sends any number of responses. The client sends a ``STREAM_END``
-packet when it has finished sending requests. The server sends a ``STREAM_END``
-packet after it receives the client's ``STREAM_END`` and finished sending its
-responses.
+the server sends any number of responses. The client sends a
+``CLIENT_STREAM_END`` packet when it has finished sending requests. The server
+sends a ``SERVER_STREAM_END`` packet after it receives the client's
+``CLIENT_STREAM_END`` and finished sending its responses.
 
 The first client-to-server RPC packet does not include a payload.
 
@@ -312,13 +503,13 @@
 
     client -> server [
         label = "start",
-        leftnote = "PacketType.RPC\nchannel ID\nservice ID\nmethod ID"
+        leftnote = "PacketType.REQUEST\nchannel ID\nservice ID\nmethod ID"
     ];
 
     client --> server [
         noactivate,
         label = "requests (zero or more)",
-        leftnote = "PacketType.RPC\nchannel ID\nservice ID\nmethod ID\npayload"
+        leftnote = "PacketType.REQUEST\nchannel ID\nservice ID\nmethod ID\npayload"
     ];
 
     ... (messages in any order) ...
@@ -326,13 +517,13 @@
     client <-- server [
         noactivate,
         label = "responses (zero or more)",
-        rightnote = "PacketType.RPC\nchannel ID\nservice ID\nmethod ID\npayload"
+        rightnote = "PacketType.RESPONSE\nchannel ID\nservice ID\nmethod ID\npayload"
     ];
 
     client -> server [
         noactivate,
         label = "done",
-        leftnote = "PacketType.STREAM_END\nchannel ID\nservice ID\nmethod ID"
+        leftnote = "PacketType.CLIENT_STREAM_END\nchannel ID\nservice ID\nmethod ID"
     ];
 
     client <-- server [
@@ -343,13 +534,14 @@
 
     client <- server [
         label = "done",
-        rightnote = "PacketType.STREAM_END\nchannel ID\nservice ID\nmethod ID\nstatus"
+        rightnote = "PacketType.SERVER_STREAM_END\nchannel ID\nservice ID\nmethod ID\nstatus"
     ];
   }
 
-The server may terminate the RPC at any time by sending a ``STREAM_END`` packet
-with the status, even if the client has not sent its ``STREAM_END``. The client
-may cancel the RPC at any time by sending a ``CANCEL`` packet.
+The server may terminate the RPC at any time by sending a ``SERVER_STREAM_END``
+packet with the status, even if the client has not sent its ``STREAM_END``. The
+client may cancel the RPC at any time by sending a ``CANCEL_SERVER_STREAM``
+packet.
 
 .. seqdiag::
   :scale: 110
@@ -377,7 +569,7 @@
     client -> server [
         noactivate,
         label = "cancel",
-        leftnote = "PacketType.CANCEL\nchannel ID\nservice ID\nmethod ID"
+        leftnote = "PacketType.CANCEL_SERVER_STREAM\nchannel ID\nservice ID\nmethod ID"
     ];
 
     client <- server [
@@ -394,6 +586,17 @@
 
   Document the public interface
 
+Size report
+-----------
+The following size report showcases the memory usage of the core RPC server. It
+is configured with a single channel using a basic transport interface that
+directly reads from and writes to ``pw_sys_io``. The transport has a 128-byte
+packet buffer, which comprises the plurality of the example's RAM usage. This is
+not a suitable transport for an actual product; a real implementation would have
+additional overhead proportional to the complexity of the transport.
+
+.. include:: server_size
+
 RPC server implementation
 -------------------------
 
@@ -473,3 +676,70 @@
     method -> server -> channel;
     channel -> packets [folded];
   }
+
+RPC client
+==========
+The RPC client is used to send requests to a server and manages the contexts of
+ongoing RPCs.
+
+Setting up a client
+-------------------
+The ``pw::rpc::Client`` class is instantiated with a list of channels that it
+uses to communicate. These channels can be shared with a server, but multiple
+clients cannot use the same channels.
+
+To send incoming RPC packets from the transport layer to be processed by a
+client, the client's ``ProcessPacket`` function is called with the packet data.
+
+.. code:: c++
+
+  #include "pw_rpc/client.h"
+
+  namespace {
+
+  pw::rpc::Channel my_channels[] = {
+      pw::rpc::Channel::Create<1>(&my_channel_output)};
+  pw::rpc::Client my_client(my_channels);
+
+  }  // namespace
+
+  // Called when the transport layer receives an RPC packet.
+  void ProcessRpcPacket(ConstByteSpan packet) {
+    my_client.ProcessPacket(packet);
+  }
+
+.. _module-pw_rpc-making-calls:
+
+Making RPC calls
+----------------
+RPC calls are not made directly through the client, but using one of its
+registered channels instead. A service client class is generated from a .proto
+file for each selected protobuf library, which is then used to send RPC requests
+through a given channel. The API for this depends on the protobuf library;
+please refer to the
+:ref:`appropriate documentation<module-pw_rpc-protobuf-library-apis>`. Multiple
+service client implementations can exist simulatenously and share the same
+``Client`` class.
+
+When a call is made, a ``pw::rpc::ClientCall`` object is returned to the caller.
+This object tracks the ongoing RPC call, and can be used to manage it. An RPC
+call is only active as long as its ``ClientCall`` object is alive.
+
+.. tip::
+  Use ``std::move`` when passing around ``ClientCall`` objects to keep RPCs
+  alive.
+
+Client implementation details
+-----------------------------
+
+The ClientCall class
+^^^^^^^^^^^^^^^^^^^^
+``ClientCall`` stores the context of an active RPC, and serves as the user's
+interface to the RPC client. The core RPC library provides a base ``ClientCall``
+class with common functionality, which is then extended for RPC client
+implementations tied to different protobuf libraries to provide convenient
+interfaces for working with RPCs.
+
+The RPC server stores a list of all of active ``ClientCall`` objects. When an
+incoming packet is recieved, it dispatches to one of its active calls, which
+then decodes the payload and presents it to the user.
diff --git a/pw_rpc/nanopb/BUILD.gn b/pw_rpc/nanopb/BUILD.gn
index 3775437..39edd13 100644
--- a/pw_rpc/nanopb/BUILD.gn
+++ b/pw_rpc/nanopb/BUILD.gn
@@ -12,56 +12,90 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
-import("$dir_pw_protobuf_compiler/nanopb.gni")
+import("$dir_pw_third_party/nanopb/nanopb.gni")
 import("$dir_pw_unit_test/test.gni")
-config("overrides") {
-  include_dirs = [ "public_overrides" ]
-  visibility = [ ":*" ]
-}
 
 config("public") {
   include_dirs = [ "public" ]
   visibility = [ ":*" ]
 }
 
-pw_source_set("nanopb") {
-  public_configs = [ ":overrides" ]
-  public = [ "public_overrides/pw_rpc/internal/method.h" ]
-  sources = [ "method.cc" ]
-  public_deps = [ "..:server_library_deps" ]
+pw_source_set("method") {
+  public_configs = [ ":public" ]
+  public = [ "public/pw_rpc/internal/nanopb_method.h" ]
+  sources = [ "nanopb_method.cc" ]
+  public_deps = [
+    ":common",
+    "..:server",
+  ]
+  deps = [ dir_pw_log ]
+}
+
+pw_source_set("method_union") {
+  public_configs = [ ":public" ]
+  public = [ "public/pw_rpc/internal/nanopb_method_union.h" ]
+  public_deps = [
+    ":method",
+    "$dir_pw_rpc/raw:method_union",
+  ]
+}
+
+pw_source_set("client") {
+  public_configs = [ ":public" ]
+  public_deps = [
+    ":common",
+    "..:client",
+  ]
+  public = [ "public/pw_rpc/nanopb_client_call.h" ]
+  sources = [ "nanopb_client_call.cc" ]
+}
+
+pw_source_set("common") {
+  public_deps = [ dir_pw_bytes ]
+  public_configs = [ ":public" ]
+  public = [ "public/pw_rpc/internal/nanopb_common.h" ]
+  sources = [ "nanopb_common.cc" ]
 
   if (dir_pw_third_party_nanopb != "") {
-    public_deps += [ dir_pw_third_party_nanopb ]
+    public_deps += [ "$dir_pw_third_party/nanopb" ]
   }
-
-  visibility = [ "../*" ]
 }
 
 pw_source_set("service_method_traits") {
   public_configs = [ ":public" ]
-  public = [ "public/pw_rpc/internal/service_method_traits.h" ]
-  public_deps = [ "..:nanopb_server" ]
+  public = [ "public/pw_rpc/internal/nanopb_service_method_traits.h" ]
+  public_deps = [
+    ":method_union",
+    "..:service_method_traits",
+  ]
 }
 
 pw_source_set("test_method_context") {
   public_configs = [ ":public" ]
-  public = [ "public/pw_rpc/test_method_context.h" ]
+  public = [ "public/pw_rpc/nanopb_test_method_context.h" ]
   public_deps = [
     ":service_method_traits",
-    "..:nanopb_server",
+    "..:server",
+    dir_pw_assert,
     dir_pw_containers,
-    dir_pw_unit_test,
   ]
 }
 
+pw_source_set("internal_test_utils") {
+  public = [ "pw_rpc_nanopb_private/internal_test_utils.h" ]
+  public_deps = []
+  if (dir_pw_third_party_nanopb != "") {
+    public_deps += [ "$dir_pw_third_party/nanopb" ]
+  }
+}
+
 pw_source_set("echo_service") {
   public_configs = [ ":public" ]
-  public_deps = [ "..:echo_service_proto_nanopb_rpc" ]
+  public_deps = [ "..:echo_service_proto.nanopb_rpc" ]
   sources = [ "public/pw_rpc/echo_service_nanopb.h" ]
 }
 
@@ -71,49 +105,80 @@
 
 pw_test_group("tests") {
   tests = [
+    ":client_call_test",
     ":codegen_test",
     ":echo_service_test",
-    ":method_test",
-    ":service_method_traits_test",
+    ":nanopb_method_test",
+    ":nanopb_method_union_test",
+    ":nanopb_service_method_traits_test",
   ]
 }
 
+pw_test("client_call_test") {
+  deps = [
+    ":client",
+    ":internal_test_utils",
+    "..:test_protos.nanopb",
+    "..:test_utils",
+  ]
+  sources = [ "nanopb_client_call_test.cc" ]
+  enable_if = dir_pw_third_party_nanopb != ""
+}
+
 pw_test("codegen_test") {
   deps = [
+    ":client",
+    ":internal_test_utils",
     ":test_method_context",
-    "..:nanopb_server",
-    "..:test_protos_nanopb_rpc",
+    "..:server",
+    "..:test_protos.nanopb_rpc",
+    "..:test_utils",
   ]
   sources = [ "codegen_test.cc" ]
   enable_if = dir_pw_third_party_nanopb != ""
 }
 
-pw_test("method_test") {
+pw_test("nanopb_method_test") {
   deps = [
-    "..:internal_test_utils_nanopb_server",
-    "..:nanopb_server",
-    "..:test_protos_nanopb",
+    ":internal_test_utils",
+    ":method_union",
+    "..:server",
+    "..:test_protos.nanopb",
+    "..:test_utils",
   ]
-  sources = [ "method_test.cc" ]
+  sources = [ "nanopb_method_test.cc" ]
+  enable_if = dir_pw_third_party_nanopb != ""
+}
+
+pw_test("nanopb_method_union_test") {
+  deps = [
+    ":internal_test_utils",
+    ":method_union",
+    "..:test_protos.nanopb",
+    "..:test_utils",
+  ]
+  sources = [ "nanopb_method_union_test.cc" ]
   enable_if = dir_pw_third_party_nanopb != ""
 }
 
 pw_test("echo_service_test") {
   deps = [
     ":echo_service",
+    ":method",
     ":test_method_context",
   ]
   sources = [ "echo_service_test.cc" ]
   enable_if = dir_pw_third_party_nanopb != ""
 }
 
-pw_test("service_method_traits_test") {
+pw_test("nanopb_service_method_traits_test") {
   deps = [
     ":echo_service",
+    ":method",
     ":service_method_traits",
     ":test_method_context",
-    "..:test_protos_nanopb_rpc",
+    "..:test_protos.nanopb_rpc",
   ]
-  sources = [ "service_method_traits_test.cc" ]
+  sources = [ "nanopb_service_method_traits_test.cc" ]
   enable_if = dir_pw_third_party_nanopb != ""
 }
diff --git a/pw_rpc/nanopb/CMakeLists.txt b/pw_rpc/nanopb/CMakeLists.txt
new file mode 100644
index 0000000..896fefd
--- /dev/null
+++ b/pw_rpc/nanopb/CMakeLists.txt
@@ -0,0 +1,67 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
+pw_add_module_library(pw_rpc.nanopb.method
+  SOURCES
+    nanopb_method.cc
+  PUBLIC_DEPS
+    pw_rpc.nanopb.common
+    pw_rpc.server
+  PRIVATE_DEPS
+    pw_log
+)
+
+pw_add_module_library(pw_rpc.nanopb.method_union
+  PUBLIC_DEPS
+    pw_rpc.nanopb.method
+    pw_rpc.raw
+    pw_rpc.server
+  PRIVATE_DEPS
+    pw_log
+)
+
+pw_add_module_library(pw_rpc.nanopb.client
+  SOURCES
+    nanopb_client_call.cc
+  PUBLIC_DEPS
+    pw_rpc.nanopb.common
+    pw_rpc.common
+)
+
+pw_add_module_library(pw_rpc.nanopb.common
+  SOURCES
+    nanopb_common.cc
+  PUBLIC_DEPS
+    pw_bytes
+    pw_rpc.common
+    pw_third_party.nanopb
+)
+
+pw_add_module_library(pw_rpc.nanopb.echo_service
+  PUBLIC_DEPS
+    pw_rpc.echo_proto.nanopb_rpc
+)
+
+pw_auto_add_module_tests(pw_rpc.nanopb
+  PRIVATE_DEPS
+    pw_rpc.client
+    pw_rpc.raw
+    pw_rpc.server
+    pw_rpc.nanopb.common
+    pw_rpc.echo_proto.nanopb_rpc
+    pw_rpc.test_protos.nanopb_rpc
+    pw_rpc.test_utils
+)
diff --git a/pw_rpc/nanopb/codegen_test.cc b/pw_rpc/nanopb/codegen_test.cc
index 4a3bd80..7c7667e 100644
--- a/pw_rpc/nanopb/codegen_test.cc
+++ b/pw_rpc/nanopb/codegen_test.cc
@@ -14,7 +14,9 @@
 
 #include "gtest/gtest.h"
 #include "pw_rpc/internal/hash.h"
-#include "pw_rpc/test_method_context.h"
+#include "pw_rpc/nanopb_test_method_context.h"
+#include "pw_rpc_nanopb_private/internal_test_utils.h"
+#include "pw_rpc_private/internal_test_utils.h"
 #include "pw_rpc_test_protos/test.rpc.pb.h"
 
 namespace pw::rpc {
@@ -33,7 +35,7 @@
                      const pw_rpc_test_TestRequest& request,
                      ServerWriter<pw_rpc_test_TestStreamResponse>& writer) {
     for (int i = 0; i < request.integer; ++i) {
-      writer.Write({.number = static_cast<uint32_t>(i)});
+      writer.Write({.chunk = {}, .number = static_cast<uint32_t>(i)});
     }
 
     writer.Finish(static_cast<Status::Code>(request.status_code));
@@ -50,31 +52,31 @@
   EXPECT_STREQ(service.name(), "TestService");
 }
 
-TEST(NanopbCodegen, InvokeUnaryRpc) {
-  TestMethodContext<&test::TestService::TestRpc> context;
+TEST(NanopbCodegen, Server_InvokeUnaryRpc) {
+  PW_NANOPB_TEST_METHOD_CONTEXT(test::TestService, TestRpc) context;
 
-  EXPECT_EQ(Status::OK,
-            context.call({.integer = 123, .status_code = Status::OK}));
+  EXPECT_EQ(Status::Ok(),
+            context.call({.integer = 123, .status_code = Status::Ok().code()}));
 
   EXPECT_EQ(124, context.response().value);
 
-  EXPECT_EQ(
-      Status::INVALID_ARGUMENT,
-      context.call({.integer = 999, .status_code = Status::INVALID_ARGUMENT}));
+  EXPECT_EQ(Status::InvalidArgument(),
+            context.call({.integer = 999,
+                          .status_code = Status::InvalidArgument().code()}));
   EXPECT_EQ(1000, context.response().value);
 }
 
-TEST(NanopbCodegen, InvokeStreamingRpc) {
-  TestMethodContext<&test::TestService::TestStreamRpc> context;
+TEST(NanopbCodegen, Server_InvokeStreamingRpc) {
+  PW_NANOPB_TEST_METHOD_CONTEXT(test::TestService, TestStreamRpc) context;
 
-  context.call({.integer = 0, .status_code = Status::ABORTED});
+  context.call({.integer = 0, .status_code = Status::Aborted().code()});
 
-  EXPECT_EQ(Status::ABORTED, context.status());
+  EXPECT_EQ(Status::Aborted(), context.status());
   EXPECT_TRUE(context.done());
   EXPECT_TRUE(context.responses().empty());
   EXPECT_EQ(0u, context.total_responses());
 
-  context.call({.integer = 4, .status_code = Status::OK});
+  context.call({.integer = 4, .status_code = Status::Ok().code()});
 
   ASSERT_EQ(4u, context.responses().size());
   ASSERT_EQ(4u, context.total_responses());
@@ -83,15 +85,16 @@
     EXPECT_EQ(context.responses()[i].number, i);
   }
 
-  EXPECT_EQ(Status::OK, context.status());
+  EXPECT_EQ(Status::Ok().code(), context.status());
 }
 
-TEST(NanopbCodegen, InvokeStreamingRpc_ContextKeepsFixedNumberOfResponses) {
-  TestMethodContext<&test::TestService::TestStreamRpc, 3> context;
+TEST(NanopbCodegen,
+     Server_InvokeStreamingRpc_ContextKeepsFixedNumberOfResponses) {
+  PW_NANOPB_TEST_METHOD_CONTEXT(test::TestService, TestStreamRpc, 3) context;
 
   ASSERT_EQ(3u, context.responses().max_size());
 
-  context.call({.integer = 5, .status_code = Status::NOT_FOUND});
+  context.call({.integer = 5, .status_code = Status::NotFound().code()});
 
   ASSERT_EQ(3u, context.responses().size());
   ASSERT_EQ(5u, context.total_responses());
@@ -101,22 +104,22 @@
   EXPECT_EQ(context.responses()[2].number, 4u);
 }
 
-TEST(NanopbCodegen, InvokeStreamingRpc_ManualWriting) {
-  TestMethodContext<&test::TestService::TestStreamRpc, 3> context;
+TEST(NanopbCodegen, Server_InvokeStreamingRpc_ManualWriting) {
+  PW_NANOPB_TEST_METHOD_CONTEXT(test::TestService, TestStreamRpc, 3) context;
 
   ASSERT_EQ(3u, context.responses().max_size());
 
   auto writer = context.writer();
 
-  writer.Write({.number = 3});
-  writer.Write({.number = 6});
-  writer.Write({.number = 9});
+  writer.Write({.chunk = {}, .number = 3});
+  writer.Write({.chunk = {}, .number = 6});
+  writer.Write({.chunk = {}, .number = 9});
 
   EXPECT_FALSE(context.done());
 
-  writer.Finish(Status::CANCELLED);
+  writer.Finish(Status::Cancelled());
   ASSERT_TRUE(context.done());
-  EXPECT_EQ(Status::CANCELLED, context.status());
+  EXPECT_EQ(Status::Cancelled(), context.status());
 
   ASSERT_EQ(3u, context.responses().size());
   ASSERT_EQ(3u, context.total_responses());
@@ -126,5 +129,62 @@
   EXPECT_EQ(context.responses()[2].number, 9u);
 }
 
+using TestServiceClient = test::nanopb::TestServiceClient;
+using internal::TestServerStreamingResponseHandler;
+using internal::TestUnaryResponseHandler;
+
+TEST(NanopbCodegen, Client_InvokesUnaryRpcWithCallback) {
+  constexpr uint32_t service_id = internal::Hash("pw.rpc.test.TestService");
+  constexpr uint32_t method_id = internal::Hash("TestRpc");
+
+  ClientContextForTest<128, 128, 99, service_id, method_id> context;
+  TestUnaryResponseHandler<pw_rpc_test_TestResponse> handler;
+
+  auto call = TestServiceClient::TestRpc(
+      context.channel(), {.integer = 123, .status_code = 0}, handler);
+  EXPECT_EQ(context.output().packet_count(), 1u);
+  auto packet = context.output().sent_packet();
+  EXPECT_EQ(packet.channel_id(), context.channel().id());
+  EXPECT_EQ(packet.service_id(), service_id);
+  EXPECT_EQ(packet.method_id(), method_id);
+  PW_DECODE_PB(pw_rpc_test_TestRequest, sent_proto, packet.payload());
+  EXPECT_EQ(sent_proto.integer, 123);
+
+  PW_ENCODE_PB(pw_rpc_test_TestResponse, response, .value = 42);
+  context.SendResponse(Status::Ok(), response);
+  ASSERT_EQ(handler.responses_received(), 1u);
+  EXPECT_EQ(handler.last_status(), Status::Ok());
+  EXPECT_EQ(handler.last_response().value, 42);
+}
+
+TEST(NanopbCodegen, Client_InvokesServerStreamingRpcWithCallback) {
+  constexpr uint32_t service_id = internal::Hash("pw.rpc.test.TestService");
+  constexpr uint32_t method_id = internal::Hash("TestStreamRpc");
+
+  ClientContextForTest<128, 128, 99, service_id, method_id> context;
+  TestServerStreamingResponseHandler<pw_rpc_test_TestStreamResponse> handler;
+
+  auto call = TestServiceClient::TestStreamRpc(
+      context.channel(), {.integer = 123, .status_code = 0}, handler);
+  EXPECT_EQ(context.output().packet_count(), 1u);
+  auto packet = context.output().sent_packet();
+  EXPECT_EQ(packet.channel_id(), context.channel().id());
+  EXPECT_EQ(packet.service_id(), service_id);
+  EXPECT_EQ(packet.method_id(), method_id);
+  PW_DECODE_PB(pw_rpc_test_TestRequest, sent_proto, packet.payload());
+  EXPECT_EQ(sent_proto.integer, 123);
+
+  PW_ENCODE_PB(
+      pw_rpc_test_TestStreamResponse, response, .chunk = {}, .number = 11u);
+  context.SendResponse(Status::Ok(), response);
+  ASSERT_EQ(handler.responses_received(), 1u);
+  EXPECT_EQ(handler.last_response().number, 11u);
+
+  context.SendPacket(internal::PacketType::SERVER_STREAM_END,
+                     Status::NotFound());
+  EXPECT_FALSE(handler.active());
+  EXPECT_EQ(handler.status(), Status::NotFound());
+}
+
 }  // namespace
 }  // namespace pw::rpc
diff --git a/pw_rpc/nanopb/docs.rst b/pw_rpc/nanopb/docs.rst
index 32f87fb..5e3b0d2 100644
--- a/pw_rpc/nanopb/docs.rst
+++ b/pw_rpc/nanopb/docs.rst
@@ -1,8 +1,4 @@
-.. default-domain:: cpp
-
-.. highlight:: sh
-
-.. _chapter-pw-rpc-nanopb:
+.. _module-pw_rpc_nanopb:
 
 ------
 nanopb
@@ -12,23 +8,12 @@
 
 Usage
 =====
-To enable nanopb code generation, add ``nanopb_rpc`` as a generator to your
-Pigweed target's ``pw_protobuf_GENERATORS`` list. Refer to
-:ref:`chapter-pw-protobuf-compiler` for additional information.
-
-.. code::
-
-  # my_target/target_toolchains.gni
-
-  defaults = {
-    pw_protobuf_GENERATORS = [
-      "pwpb",
-      "nanopb_rpc",  # Enable RPC codegen
-    ]
-  }
+To enable nanopb code generation, the build argument
+``dir_pw_third_party_nanopb`` must be set to point to a local nanopb
+installation.
 
 Define a ``pw_proto_library`` containing the .proto file defining your service
-(and optionally other related protos), then depend on the ``_nanopb_rpc``
+(and optionally other related protos), then depend on the ``nanopb_rpc``
 version of that library in the code implementing the service.
 
 .. code::
@@ -48,7 +33,7 @@
       "chat_service.cc",
       "chat_service.h",
     ]
-    public_deps = [ ":chat_protos_nanopb_rpc" ]
+    public_deps = [ ":chat_protos.nanopb_rpc" ]
   }
 
 A C++ header file is generated for each input .proto file, with the ``.proto``
@@ -151,3 +136,181 @@
 .. attention::
 
   ``pw_rpc`` does not yet support bidirectional streaming RPCs.
+
+Client-side
+-----------
+A corresponding client class is generated for every service defined in the proto
+file. Like the service class, it is placed under the ``generated`` namespace.
+The class is named after the service, with a ``Client`` suffix. For example, the
+``ChatService`` would create a ``generated::ChatServiceClient``.
+
+The client class contains static methods to call each of the service's methods.
+It is not meant to be instantiated. The signatures for the methods all follow
+the same format, taking a channel through which to communicate, the initial
+request struct, and a response handler.
+
+.. code-block:: c++
+
+  static NanopbClientCall<UnaryResponseHandler<RoomInfoResponse>>
+  GetRoomInformation(Channel& channel,
+                     const RoomInfoRequest& request,
+                     UnaryResponseHandler<RoomInfoResponse> handler);
+
+The ``NanopbClientCall`` object returned by the RPC invocation stores the active
+RPC's context. For more information on ``ClientCall`` objects, refer to the
+:ref:`core RPC documentation <module-pw_rpc-making-calls>`.
+
+Response handlers
+^^^^^^^^^^^^^^^^^
+RPC responses are sent back to the caller through a response handler object.
+These are classes with virtual callback functions implemented by the RPC caller
+to handle RPC events.
+
+There are two types of response handlers: unary and server-streaming, which are
+used depending whether the method's responses are a stream or not.
+
+Unary / client streaming RPC
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+A ``UnaryResponseHandler`` is used by methods where the server returns a single
+response. It contains a callback for the response, which is only called once.
+
+.. code-block:: c++
+
+  template <typename Response>
+  class UnaryResponseHandler {
+   public:
+    virtual ~UnaryResponseHandler() = default;
+
+    // Called when the response is received from the server with the method's
+    // status and the deserialized response struct.
+    virtual void ReceivedResponse(Status status, const Response& response) = 0;
+
+    // Called when an error occurs internally in the RPC client or server.
+    virtual void RpcError(Status) {}
+  };
+
+.. cpp:class:: template <typename Response> UnaryResponseHandler
+
+  A handler for RPC methods which return a single response (i.e. unary and
+  client streaming).
+
+.. cpp:function:: virtual void UnaryResponseHandler::ReceivedResponse(Status status, const Response& response)
+
+  Callback invoked when the response is recieved from the server. Guaranteed to
+  only be called once.
+
+.. cpp:function:: virtual void UnaryResponseHandler::RpcError(Status status)
+
+  Callback invoked if an internal error occurs in the RPC system. Optional;
+  defaults to a no-op.
+
+**Example implementation**
+
+.. code-block:: c++
+
+  class RoomInfoHandler : public UnaryResponseHandler<RoomInfoResponse> {
+   public:
+    void ReceivedResponse(Status status,
+                          const RoomInfoResponse& response) override {
+      if (status.ok()) {
+        response_ = response;
+      }
+    }
+
+    constexpr RoomInfoResponse& response() { return response_; }
+
+   private:
+    RoomInfoResponse response_;
+  };
+
+Server streaming / bidirectional streaming RPC
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+For methods which return a response stream, a ``ServerStreamingResponseHandler``
+is used.
+
+.. code:: c++
+
+  class ServerStreamingResponseHandler {
+   public:
+    virtual ~ServerStreamingResponseHandler() = default;
+
+    // Called on every response received from the server with the deserialized
+    // response struct.
+    virtual void ReceivedResponse(const Response& response) = 0;
+
+    // Called when the server ends the stream with the overall RPC status.
+    virtual void Complete(Status status) = 0;
+
+    // Called when an error occurs internally in the RPC client or server.
+    virtual void RpcError(Status) {}
+  };
+
+.. cpp:class:: template <typename Response> ServerStreamingResponseHandler
+
+  A handler for RPC methods which return zero or more responses (i.e. server
+  and bidirectional streaming).
+
+.. cpp:function:: virtual void ServerStreamingResponseHandler::ReceivedResponse(const Response& response)
+
+  Callback invoked whenever a response is received from the server.
+
+.. cpp:function:: virtual void ServerStreamingResponseHandler::Complete(Status status)
+
+  Callback invoked when the server ends the stream, with the overall status for
+  the RPC.
+
+.. cpp:function:: virtual void ServerStreamingResponseHandler::RpcError(Status status)
+
+  Callback invoked if an internal error occurs in the RPC system. Optional;
+  defaults to a no-op.
+
+**Example implementation**
+
+.. code-block:: c++
+
+  class ChatHandler : public UnaryResponseHandler<ChatMessage> {
+   public:
+    void ReceivedResponse(const ChatMessage& response) override {
+      gui_.RenderChatMessage(response);
+    }
+
+    void Complete(Status status) override {
+      client_.Exit(status);
+    }
+
+   private:
+    ChatGui& gui_;
+    ChatClient& client_;
+  };
+
+Example usage
+~~~~~~~~~~~~~
+The following example demonstrates how to call an RPC method using a nanopb
+service client and receive the response.
+
+.. code-block:: c++
+
+  #include "chat_protos/chat_service.rpc.pb.h"
+
+  namespace {
+    MyChannelOutput output;
+    pw::rpc::Channel channels[] = {pw::rpc::Channel::Create<0>(&output)};
+    pw::rpc::Client client(channels);
+  }
+
+  void InvokeSomeRpcs() {
+    RoomInfoHandler handler;
+
+    // The RPC will remain active as long as `call` is alive.
+    auto call = ChatServiceClient::GetRoomInformation(channels[0],
+                                                      {.room = "pigweed"},
+                                                      handler);
+
+    // For simplicity, block here. An actual implementation would likely
+    // std::move the call somewhere to keep it active while doing other work.
+    while (call.active()) {
+      Wait();
+    }
+
+    DoStuff(handler.response());
+  }
diff --git a/pw_rpc/nanopb/echo_service_test.cc b/pw_rpc/nanopb/echo_service_test.cc
index a769653..31fbb7f 100644
--- a/pw_rpc/nanopb/echo_service_test.cc
+++ b/pw_rpc/nanopb/echo_service_test.cc
@@ -14,20 +14,20 @@
 
 #include "gtest/gtest.h"
 #include "pw_rpc/echo_service_nanopb.h"
-#include "pw_rpc/test_method_context.h"
+#include "pw_rpc/nanopb_test_method_context.h"
 
 namespace pw::rpc {
 namespace {
 
 TEST(EchoService, Echo_EchoesRequestMessage) {
-  TestMethodContext<&EchoService::Echo> context;
-  ASSERT_EQ(context.call({.msg = "Hello, world"}), Status::OK);
+  PW_NANOPB_TEST_METHOD_CONTEXT(EchoService, Echo) context;
+  ASSERT_EQ(context.call(_pw_rpc_EchoMessage{"Hello, world"}), Status::Ok());
   EXPECT_STREQ(context.response().msg, "Hello, world");
 }
 
 TEST(EchoService, Echo_EmptyRequest) {
-  TestMethodContext<&EchoService::Echo> context;
-  ASSERT_EQ(context.call({.msg = ""}), Status::OK);
+  PW_NANOPB_TEST_METHOD_CONTEXT(EchoService, Echo) context;
+  ASSERT_EQ(context.call({.msg = {}}), Status::Ok());
   EXPECT_STREQ(context.response().msg, "");
 }
 
diff --git a/pw_rpc/nanopb/method.cc b/pw_rpc/nanopb/method.cc
deleted file mode 100644
index a096ca8..0000000
--- a/pw_rpc/nanopb/method.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2020 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include "pw_rpc/internal/method.h"
-
-#include "pb_decode.h"
-#include "pb_encode.h"
-#include "pw_log/log.h"
-#include "pw_rpc/internal/packet.h"
-
-namespace pw::rpc::internal {
-namespace {
-
-// Nanopb 3 uses pb_field_s and Nanopb 4 uses pb_msgdesc_s for fields. The
-// Nanopb version macro is difficult to use, so deduce the correct type from the
-// pb_decode function.
-template <typename DecodeFunction>
-struct NanopbTraits;
-
-template <typename FieldsType>
-struct NanopbTraits<bool(pb_istream_t*, FieldsType, void*)> {
-  using Fields = FieldsType;
-};
-
-using Fields = typename NanopbTraits<decltype(pb_decode)>::Fields;
-
-}  // namespace
-
-using std::byte;
-
-StatusWithSize Method::EncodeResponse(const void* proto_struct,
-                                      std::span<byte> buffer) const {
-  auto output = pb_ostream_from_buffer(
-      reinterpret_cast<pb_byte_t*>(buffer.data()), buffer.size());
-  if (pb_encode(&output, static_cast<Fields>(response_fields_), proto_struct)) {
-    return StatusWithSize(output.bytes_written);
-  }
-  return StatusWithSize::INTERNAL;
-}
-
-bool Method::DecodeResponse(std::span<const byte> response,
-                            void* proto_struct) const {
-  auto input = pb_istream_from_buffer(
-      reinterpret_cast<const pb_byte_t*>(response.data()), response.size());
-  return pb_decode(&input, static_cast<Fields>(response_fields_), proto_struct);
-}
-
-void Method::CallUnary(ServerCall& call,
-                       const Packet& request,
-                       void* request_struct,
-                       void* response_struct) const {
-  if (!DecodeRequest(call.channel(), request, request_struct)) {
-    return;
-  }
-
-  const Status status = function_.unary(call, request_struct, response_struct);
-  SendResponse(call.channel(), request, response_struct, status);
-}
-
-void Method::CallServerStreaming(ServerCall& call,
-                                 const Packet& request,
-                                 void* request_struct) const {
-  if (!DecodeRequest(call.channel(), request, request_struct)) {
-    return;
-  }
-
-  internal::BaseServerWriter server_writer(call);
-  function_.server_streaming(call, request_struct, server_writer);
-}
-
-bool Method::DecodeRequest(Channel& channel,
-                           const Packet& request,
-                           void* proto_struct) const {
-  auto input = pb_istream_from_buffer(
-      reinterpret_cast<const pb_byte_t*>(request.payload().data()),
-      request.payload().size());
-  if (pb_decode(&input, static_cast<Fields>(request_fields_), proto_struct)) {
-    return true;
-  }
-
-  PW_LOG_WARN("Failed to decode request payload from channel %u",
-              unsigned(channel.id()));
-  channel.Send(Packet::Error(request, Status::DATA_LOSS));
-  return false;
-}
-
-void Method::SendResponse(Channel& channel,
-                          const Packet& request,
-                          const void* response_struct,
-                          Status status) const {
-  Channel::OutputBuffer response_buffer = channel.AcquireBuffer();
-  std::span payload_buffer = response_buffer.payload(request);
-
-  StatusWithSize encoded = EncodeResponse(response_struct, payload_buffer);
-
-  if (encoded.ok()) {
-    Packet response = Packet::Response(request);
-
-    response.set_payload(payload_buffer.first(encoded.size()));
-    response.set_status(status);
-    if (channel.Send(response_buffer, response).ok()) {
-      return;
-    }
-  }
-
-  PW_LOG_WARN("Failed to encode response packet for channel %u",
-              unsigned(channel.id()));
-  channel.Send(response_buffer, Packet::Error(request, Status::INTERNAL));
-}
-
-}  // namespace pw::rpc::internal
diff --git a/pw_rpc/nanopb/method_test.cc b/pw_rpc/nanopb/method_test.cc
deleted file mode 100644
index a408b67..0000000
--- a/pw_rpc/nanopb/method_test.cc
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2020 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include "pw_rpc/internal/method.h"
-
-#include <array>
-
-#include "gtest/gtest.h"
-#include "pb_encode.h"
-#include "pw_rpc/server_context.h"
-#include "pw_rpc/service.h"
-#include "pw_rpc_private/internal_test_utils.h"
-#include "pw_rpc_test_protos/test.pb.h"
-
-namespace pw::rpc::internal {
-namespace {
-
-using std::byte;
-
-#define ENCODE_PB(proto, init, result) \
-  _ENCODE_PB_EXPAND(proto, init, result, __LINE__)
-
-#define _ENCODE_PB_EXPAND(proto, init, result, unique) \
-  _ENCODE_PB_IMPL(proto, init, result, unique)
-
-#define _ENCODE_PB_IMPL(proto, init, result, unique)              \
-  std::array<pb_byte_t, 2 * sizeof(proto)> _pb_buffer_##unique{}; \
-  const std::span result =                                        \
-      EncodeProtobuf<proto, proto##_fields>(proto init, _pb_buffer_##unique)
-
-template <typename T, auto fields>
-std::span<const byte> EncodeProtobuf(const T& protobuf,
-                                     std::span<pb_byte_t> buffer) {
-  auto output = pb_ostream_from_buffer(buffer.data(), buffer.size());
-  EXPECT_TRUE(pb_encode(&output, fields, &protobuf));
-  return std::as_bytes(buffer.first(output.bytes_written));
-}
-
-template <typename Implementation>
-class FakeGeneratedService : public Service {
- public:
-  constexpr FakeGeneratedService(uint32_t id) : Service(id, kMethods) {}
-
-  static Status DoNothing(ServerCall& call,
-                          const pw_rpc_test_Empty& request,
-                          pw_rpc_test_Empty& response) {
-    return static_cast<Implementation&>(call.service())
-        .DoNothing(call.context(), request, response);
-  }
-
-  static Status AddFive(ServerCall& call,
-                        const pw_rpc_test_TestRequest& request,
-                        pw_rpc_test_TestResponse& response) {
-    return static_cast<Implementation&>(call.service())
-        .AddFive(call.context(), request, response);
-  }
-
-  static void StartStream(ServerCall& call,
-                          const pw_rpc_test_TestRequest& request,
-                          ServerWriter<pw_rpc_test_TestResponse>& writer) {
-    static_cast<Implementation&>(call.service())
-        .StartStream(call.context(), request, writer);
-  }
-
-  static constexpr std::array<Method, 3> kMethods = {
-      Method::Unary<DoNothing>(
-          10u, pw_rpc_test_Empty_fields, pw_rpc_test_Empty_fields),
-      Method::Unary<AddFive>(
-          11u, pw_rpc_test_TestRequest_fields, pw_rpc_test_TestResponse_fields),
-      Method::ServerStreaming<StartStream>(
-          12u, pw_rpc_test_TestRequest_fields, pw_rpc_test_TestResponse_fields),
-  };
-};
-
-pw_rpc_test_TestRequest last_request;
-ServerWriter<pw_rpc_test_TestResponse> last_writer;
-
-class FakeGeneratedServiceImpl
-    : public FakeGeneratedService<FakeGeneratedServiceImpl> {
- public:
-  FakeGeneratedServiceImpl(uint32_t id) : FakeGeneratedService(id) {}
-
-  Status AddFive(ServerContext&,
-                 const pw_rpc_test_TestRequest& request,
-                 pw_rpc_test_TestResponse& response) {
-    last_request = request;
-    response.value = request.integer + 5;
-    return Status::UNAUTHENTICATED;
-  }
-
-  Status DoNothing(ServerContext&,
-                   const pw_rpc_test_Empty&,
-                   pw_rpc_test_Empty&) {
-    return Status::UNKNOWN;
-  }
-
-  void StartStream(ServerContext&,
-                   const pw_rpc_test_TestRequest& request,
-                   ServerWriter<pw_rpc_test_TestResponse>& writer) {
-    last_request = request;
-    last_writer = std::move(writer);
-  }
-};
-
-TEST(Method, UnaryRpc_SendsResponse) {
-  ENCODE_PB(pw_rpc_test_TestRequest, {.integer = 123}, request);
-
-  const Method& method = std::get<1>(FakeGeneratedServiceImpl::kMethods);
-  ServerContextForTest<FakeGeneratedServiceImpl> context(method);
-  method.Invoke(context.get(), context.packet(request));
-
-  const Packet& response = context.output().sent_packet();
-  EXPECT_EQ(response.status(), Status::UNAUTHENTICATED);
-
-  // Field 1 (encoded as 1 << 3) with 128 as the value.
-  constexpr std::byte expected[]{
-      std::byte{0x08}, std::byte{0x80}, std::byte{0x01}};
-
-  EXPECT_EQ(sizeof(expected), response.payload().size());
-  EXPECT_EQ(0,
-            std::memcmp(expected, response.payload().data(), sizeof(expected)));
-
-  EXPECT_EQ(123, last_request.integer);
-}
-
-TEST(Method, UnaryRpc_InvalidPayload_SendsError) {
-  std::array<byte, 8> bad_payload{byte{0xFF}, byte{0xAA}, byte{0xDD}};
-
-  const Method& method = std::get<0>(FakeGeneratedServiceImpl::kMethods);
-  ServerContextForTest<FakeGeneratedServiceImpl> context(method);
-  method.Invoke(context.get(), context.packet(bad_payload));
-
-  const Packet& packet = context.output().sent_packet();
-  EXPECT_EQ(PacketType::ERROR, packet.type());
-  EXPECT_EQ(Status::DATA_LOSS, packet.status());
-  EXPECT_EQ(context.kServiceId, packet.service_id());
-  EXPECT_EQ(method.id(), packet.method_id());
-}
-
-TEST(Method, UnaryRpc_BufferTooSmallForResponse_SendsInternalError) {
-  constexpr int64_t value = 0x7FFFFFFF'FFFFFF00ll;
-  ENCODE_PB(pw_rpc_test_TestRequest, {.integer = value}, request);
-
-  const Method& method = std::get<1>(FakeGeneratedServiceImpl::kMethods);
-  // Output buffer is too small for the response, but can fit an error packet.
-  ServerContextForTest<FakeGeneratedServiceImpl, 22> context(method);
-  ASSERT_LT(context.output().buffer_size(),
-            context.packet(request).MinEncodedSizeBytes() + request.size() + 1);
-
-  method.Invoke(context.get(), context.packet(request));
-
-  const Packet& packet = context.output().sent_packet();
-  EXPECT_EQ(PacketType::ERROR, packet.type());
-  EXPECT_EQ(Status::INTERNAL, packet.status());
-  EXPECT_EQ(context.kServiceId, packet.service_id());
-  EXPECT_EQ(method.id(), packet.method_id());
-
-  EXPECT_EQ(value, last_request.integer);
-}
-
-TEST(Method, ServerStreamingRpc_SendsNothingWhenInitiallyCalled) {
-  ENCODE_PB(pw_rpc_test_TestRequest, {.integer = 555}, request);
-
-  const Method& method = std::get<2>(FakeGeneratedServiceImpl::kMethods);
-  ServerContextForTest<FakeGeneratedServiceImpl> context(method);
-
-  method.Invoke(context.get(), context.packet(request));
-
-  EXPECT_EQ(0u, context.output().packet_count());
-  EXPECT_EQ(555, last_request.integer);
-}
-
-TEST(Method, ServerWriter_SendsResponse) {
-  const Method& method = std::get<2>(FakeGeneratedServiceImpl::kMethods);
-  ServerContextForTest<FakeGeneratedServiceImpl> context(method);
-
-  method.Invoke(context.get(), context.packet({}));
-
-  EXPECT_EQ(Status::OK, last_writer.Write({.value = 100}));
-
-  ENCODE_PB(pw_rpc_test_TestResponse, {.value = 100}, payload);
-  std::array<byte, 128> encoded_response = {};
-  auto encoded = context.packet(payload).Encode(encoded_response);
-  ASSERT_EQ(Status::OK, encoded.status());
-
-  ASSERT_EQ(encoded.size(), context.output().sent_data().size());
-  EXPECT_EQ(0,
-            std::memcmp(encoded_response.data(),
-                        context.output().sent_data().data(),
-                        encoded.size()));
-}
-
-TEST(Method, ServerStreamingRpc_ServerWriterBufferTooSmall_InternalError) {
-  const Method& method = std::get<2>(FakeGeneratedServiceImpl::kMethods);
-
-  constexpr size_t kNoPayloadPacketSize = 2 /* type */ + 2 /* channel */ +
-                                          5 /* service */ + 5 /* method */ +
-                                          2 /* payload */ + 2 /* status */;
-
-  // Make the buffer barely fit a packet with no payload.
-  ServerContextForTest<FakeGeneratedServiceImpl, kNoPayloadPacketSize> context(
-      method);
-
-  // Verify that the encoded size of a packet with an empty payload is correct.
-  std::array<byte, 128> encoded_response = {};
-  auto encoded = context.packet({}).Encode(encoded_response);
-  ASSERT_EQ(Status::OK, encoded.status());
-  ASSERT_EQ(kNoPayloadPacketSize, encoded.size());
-
-  method.Invoke(context.get(), context.packet({}));
-
-  EXPECT_EQ(Status::OK, last_writer.Write({}));                  // Barely fits
-  EXPECT_EQ(Status::INTERNAL, last_writer.Write({.value = 1}));  // Too big
-}
-
-}  // namespace
-}  // namespace pw::rpc::internal
diff --git a/pw_rpc/nanopb/nanopb_client_call.cc b/pw_rpc/nanopb/nanopb_client_call.cc
new file mode 100644
index 0000000..8b4cc2d
--- /dev/null
+++ b/pw_rpc/nanopb/nanopb_client_call.cc
@@ -0,0 +1,33 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_rpc/nanopb_client_call.h"
+
+namespace pw::rpc {
+namespace internal {
+
+Status BaseNanopbClientCall::SendRequest(const void* request_struct) {
+  std::span<std::byte> buffer = AcquirePayloadBuffer();
+
+  StatusWithSize sws = serde_.EncodeRequest(buffer, request_struct);
+  if (!sws.ok()) {
+    ReleasePayloadBuffer({});
+    return sws.status();
+  }
+
+  return ReleasePayloadBuffer(buffer.first(sws.size()));
+}
+
+}  // namespace internal
+}  // namespace pw::rpc
diff --git a/pw_rpc/nanopb/nanopb_client_call_test.cc b/pw_rpc/nanopb/nanopb_client_call_test.cc
new file mode 100644
index 0000000..4659b95
--- /dev/null
+++ b/pw_rpc/nanopb/nanopb_client_call_test.cc
@@ -0,0 +1,248 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_rpc/nanopb_client_call.h"
+
+#include "gtest/gtest.h"
+#include "pw_rpc_nanopb_private/internal_test_utils.h"
+#include "pw_rpc_private/internal_test_utils.h"
+#include "pw_rpc_test_protos/test.pb.h"
+
+namespace pw::rpc {
+namespace {
+
+constexpr uint32_t kServiceId = 16;
+constexpr uint32_t kUnaryMethodId = 111;
+constexpr uint32_t kServerStreamingMethodId = 112;
+
+class FakeGeneratedServiceClient {
+ public:
+  static NanopbClientCall<UnaryResponseHandler<pw_rpc_test_TestResponse>>
+  TestRpc(Channel& channel,
+          const pw_rpc_test_TestRequest& request,
+          UnaryResponseHandler<pw_rpc_test_TestResponse>& callback) {
+    auto call = NanopbClientCall(&channel,
+                                 kServiceId,
+                                 kUnaryMethodId,
+                                 callback,
+                                 pw_rpc_test_TestRequest_fields,
+                                 pw_rpc_test_TestResponse_fields);
+    call.SendRequest(&request);
+    return call;
+  }
+
+  static NanopbClientCall<
+      ServerStreamingResponseHandler<pw_rpc_test_TestStreamResponse>>
+  TestStreamRpc(Channel& channel,
+                const pw_rpc_test_TestRequest& request,
+                ServerStreamingResponseHandler<pw_rpc_test_TestStreamResponse>&
+                    callback) {
+    auto call = NanopbClientCall(&channel,
+                                 kServiceId,
+                                 kServerStreamingMethodId,
+                                 callback,
+                                 pw_rpc_test_TestRequest_fields,
+                                 pw_rpc_test_TestStreamResponse_fields);
+    call.SendRequest(&request);
+    return call;
+  }
+};
+
+using internal::TestServerStreamingResponseHandler;
+using internal::TestUnaryResponseHandler;
+
+TEST(NanopbClientCall, Unary_SendsRequestPacket) {
+  ClientContextForTest context;
+  TestUnaryResponseHandler<pw_rpc_test_TestResponse> handler;
+
+  auto call = FakeGeneratedServiceClient::TestRpc(
+      context.channel(), {.integer = 123, .status_code = 0}, handler);
+
+  EXPECT_EQ(context.output().packet_count(), 1u);
+  auto packet = context.output().sent_packet();
+  EXPECT_EQ(packet.channel_id(), context.channel().id());
+  EXPECT_EQ(packet.service_id(), kServiceId);
+  EXPECT_EQ(packet.method_id(), kUnaryMethodId);
+
+  PW_DECODE_PB(pw_rpc_test_TestRequest, sent_proto, packet.payload());
+  EXPECT_EQ(sent_proto.integer, 123);
+}
+
+TEST(NanopbClientCall, Unary_InvokesCallbackOnValidResponse) {
+  ClientContextForTest context;
+  TestUnaryResponseHandler<pw_rpc_test_TestResponse> handler;
+
+  auto call = FakeGeneratedServiceClient::TestRpc(
+      context.channel(), {.integer = 123, .status_code = 0}, handler);
+
+  PW_ENCODE_PB(pw_rpc_test_TestResponse, response, .value = 42);
+  context.SendResponse(Status::Ok(), response);
+
+  ASSERT_EQ(handler.responses_received(), 1u);
+  EXPECT_EQ(handler.last_status(), Status::Ok());
+  EXPECT_EQ(handler.last_response().value, 42);
+}
+
+TEST(NanopbClientCall, Unary_InvokesErrorCallbackOnInvalidResponse) {
+  ClientContextForTest context;
+  TestUnaryResponseHandler<pw_rpc_test_TestResponse> handler;
+
+  auto call = FakeGeneratedServiceClient::TestRpc(
+      context.channel(), {.integer = 123, .status_code = 0}, handler);
+
+  constexpr std::byte bad_payload[]{
+      std::byte{0xab}, std::byte{0xcd}, std::byte{0xef}};
+  context.SendResponse(Status::Ok(), bad_payload);
+
+  EXPECT_EQ(handler.responses_received(), 0u);
+  EXPECT_EQ(handler.rpc_error(), Status::DataLoss());
+}
+
+TEST(NanopbClientCall, Unary_InvokesErrorCallbackOnServerError) {
+  ClientContextForTest context;
+  TestUnaryResponseHandler<pw_rpc_test_TestResponse> handler;
+
+  auto call = FakeGeneratedServiceClient::TestRpc(
+      context.channel(), {.integer = 123, .status_code = 0}, handler);
+
+  context.SendPacket(internal::PacketType::SERVER_ERROR, Status::NotFound());
+
+  EXPECT_EQ(handler.responses_received(), 0u);
+  EXPECT_EQ(handler.rpc_error(), Status::NotFound());
+}
+
+TEST(NanopbClientCall, Unary_OnlyReceivesOneResponse) {
+  ClientContextForTest context;
+  TestUnaryResponseHandler<pw_rpc_test_TestResponse> handler;
+
+  auto call = FakeGeneratedServiceClient::TestRpc(
+      context.channel(), {.integer = 123, .status_code = 0}, handler);
+
+  PW_ENCODE_PB(pw_rpc_test_TestResponse, r1, .value = 42);
+  context.SendResponse(Status::Unimplemented(), r1);
+  PW_ENCODE_PB(pw_rpc_test_TestResponse, r2, .value = 44);
+  context.SendResponse(Status::OutOfRange(), r2);
+  PW_ENCODE_PB(pw_rpc_test_TestResponse, r3, .value = 46);
+  context.SendResponse(Status::Internal(), r3);
+
+  EXPECT_EQ(handler.responses_received(), 1u);
+  EXPECT_EQ(handler.last_status(), Status::Unimplemented());
+  EXPECT_EQ(handler.last_response().value, 42);
+}
+
+TEST(NanopbClientCall, ServerStreaming_SendsRequestPacket) {
+  ClientContextForTest<128, 128, 99, kServiceId, kServerStreamingMethodId>
+      context;
+  TestServerStreamingResponseHandler<pw_rpc_test_TestStreamResponse> handler;
+
+  auto call = FakeGeneratedServiceClient::TestStreamRpc(
+      context.channel(), {.integer = 71, .status_code = 0}, handler);
+
+  EXPECT_EQ(context.output().packet_count(), 1u);
+  auto packet = context.output().sent_packet();
+  EXPECT_EQ(packet.channel_id(), context.channel().id());
+  EXPECT_EQ(packet.service_id(), kServiceId);
+  EXPECT_EQ(packet.method_id(), kServerStreamingMethodId);
+
+  PW_DECODE_PB(pw_rpc_test_TestRequest, sent_proto, packet.payload());
+  EXPECT_EQ(sent_proto.integer, 71);
+}
+
+TEST(NanopbClientCall, ServerStreaming_InvokesCallbackOnValidResponse) {
+  ClientContextForTest<128, 128, 99, kServiceId, kServerStreamingMethodId>
+      context;
+  TestServerStreamingResponseHandler<pw_rpc_test_TestStreamResponse> handler;
+
+  auto call = FakeGeneratedServiceClient::TestStreamRpc(
+      context.channel(), {.integer = 71, .status_code = 0}, handler);
+
+  PW_ENCODE_PB(pw_rpc_test_TestStreamResponse, r1, .chunk = {}, .number = 11u);
+  context.SendResponse(Status::Ok(), r1);
+  EXPECT_TRUE(handler.active());
+  EXPECT_EQ(handler.responses_received(), 1u);
+  EXPECT_EQ(handler.last_response().number, 11u);
+
+  PW_ENCODE_PB(pw_rpc_test_TestStreamResponse, r2, .chunk = {}, .number = 22u);
+  context.SendResponse(Status::Ok(), r2);
+  EXPECT_TRUE(handler.active());
+  EXPECT_EQ(handler.responses_received(), 2u);
+  EXPECT_EQ(handler.last_response().number, 22u);
+
+  PW_ENCODE_PB(pw_rpc_test_TestStreamResponse, r3, .chunk = {}, .number = 33u);
+  context.SendResponse(Status::Ok(), r3);
+  EXPECT_TRUE(handler.active());
+  EXPECT_EQ(handler.responses_received(), 3u);
+  EXPECT_EQ(handler.last_response().number, 33u);
+}
+
+TEST(NanopbClientCall, ServerStreaming_ClosesOnFinish) {
+  ClientContextForTest<128, 128, 99, kServiceId, kServerStreamingMethodId>
+      context;
+  TestServerStreamingResponseHandler<pw_rpc_test_TestStreamResponse> handler;
+
+  auto call = FakeGeneratedServiceClient::TestStreamRpc(
+      context.channel(), {.integer = 71, .status_code = 0}, handler);
+
+  PW_ENCODE_PB(pw_rpc_test_TestStreamResponse, r1, .chunk = {}, .number = 11u);
+  context.SendResponse(Status::Ok(), r1);
+  EXPECT_TRUE(handler.active());
+
+  PW_ENCODE_PB(pw_rpc_test_TestStreamResponse, r2, .chunk = {}, .number = 22u);
+  context.SendResponse(Status::Ok(), r2);
+  EXPECT_TRUE(handler.active());
+
+  // Close the stream.
+  context.SendPacket(internal::PacketType::SERVER_STREAM_END,
+                     Status::NotFound());
+
+  PW_ENCODE_PB(pw_rpc_test_TestStreamResponse, r3, .chunk = {}, .number = 33u);
+  context.SendResponse(Status::Ok(), r3);
+  EXPECT_FALSE(handler.active());
+
+  EXPECT_EQ(handler.responses_received(), 2u);
+}
+
+TEST(NanopbClientCall, ServerStreaming_InvokesErrorCallbackOnInvalidResponses) {
+  ClientContextForTest<128, 128, 99, kServiceId, kServerStreamingMethodId>
+      context;
+  TestServerStreamingResponseHandler<pw_rpc_test_TestStreamResponse> handler;
+
+  auto call = FakeGeneratedServiceClient::TestStreamRpc(
+      context.channel(), {.integer = 71, .status_code = 0}, handler);
+
+  PW_ENCODE_PB(pw_rpc_test_TestStreamResponse, r1, .chunk = {}, .number = 11u);
+  context.SendResponse(Status::Ok(), r1);
+  EXPECT_TRUE(handler.active());
+  EXPECT_EQ(handler.responses_received(), 1u);
+  EXPECT_EQ(handler.last_response().number, 11u);
+
+  constexpr std::byte bad_payload[]{
+      std::byte{0xab}, std::byte{0xcd}, std::byte{0xef}};
+  context.SendResponse(Status::Ok(), bad_payload);
+  EXPECT_EQ(handler.responses_received(), 1u);
+  EXPECT_EQ(handler.rpc_error(), Status::DataLoss());
+
+  PW_ENCODE_PB(pw_rpc_test_TestStreamResponse, r2, .chunk = {}, .number = 22u);
+  context.SendResponse(Status::Ok(), r2);
+  EXPECT_TRUE(handler.active());
+  EXPECT_EQ(handler.responses_received(), 2u);
+  EXPECT_EQ(handler.last_response().number, 22u);
+
+  context.SendPacket(internal::PacketType::SERVER_ERROR, Status::NotFound());
+  EXPECT_EQ(handler.responses_received(), 2u);
+  EXPECT_EQ(handler.rpc_error(), Status::NotFound());
+}
+
+}  // namespace
+}  // namespace pw::rpc
diff --git a/pw_rpc/nanopb/nanopb_common.cc b/pw_rpc/nanopb/nanopb_common.cc
new file mode 100644
index 0000000..d3339c6
--- /dev/null
+++ b/pw_rpc/nanopb/nanopb_common.cc
@@ -0,0 +1,55 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_rpc/internal/nanopb_common.h"
+
+#include "pb_decode.h"
+#include "pb_encode.h"
+
+namespace pw::rpc::internal {
+
+// Nanopb 3 uses pb_field_s and Nanopb 4 uses pb_msgdesc_s for fields. The
+// Nanopb version macro is difficult to use, so deduce the correct type from the
+// pb_decode function.
+template <typename DecodeFunction>
+struct NanopbTraits;
+
+template <typename FieldsType>
+struct NanopbTraits<bool(pb_istream_t*, FieldsType, void*)> {
+  using Fields = FieldsType;
+};
+
+using Fields = typename NanopbTraits<decltype(pb_decode)>::Fields;
+
+StatusWithSize NanopbMethodSerde::Encode(NanopbMessageDescriptor fields,
+                                         ByteSpan buffer,
+                                         const void* proto_struct) const {
+  auto output = pb_ostream_from_buffer(
+      reinterpret_cast<pb_byte_t*>(buffer.data()), buffer.size());
+  if (!pb_encode(&output, static_cast<Fields>(fields), proto_struct)) {
+    return StatusWithSize::Internal();
+  }
+
+  return StatusWithSize::Ok(output.bytes_written);
+}
+
+bool NanopbMethodSerde::Decode(NanopbMessageDescriptor fields,
+                               void* proto_struct,
+                               ConstByteSpan buffer) const {
+  auto input = pb_istream_from_buffer(
+      reinterpret_cast<const pb_byte_t*>(buffer.data()), buffer.size());
+  return pb_decode(&input, static_cast<Fields>(fields), proto_struct);
+}
+
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/nanopb/nanopb_method.cc b/pw_rpc/nanopb/nanopb_method.cc
new file mode 100644
index 0000000..bc96dc9
--- /dev/null
+++ b/pw_rpc/nanopb/nanopb_method.cc
@@ -0,0 +1,87 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_rpc/internal/nanopb_method.h"
+
+#include "pb_decode.h"
+#include "pb_encode.h"
+#include "pw_log/log.h"
+#include "pw_rpc/internal/packet.h"
+
+namespace pw::rpc::internal {
+
+using std::byte;
+
+void NanopbMethod::CallUnary(ServerCall& call,
+                             const Packet& request,
+                             void* request_struct,
+                             void* response_struct) const {
+  if (!DecodeRequest(call.channel(), request, request_struct)) {
+    return;
+  }
+
+  const Status status = function_.unary(call, request_struct, response_struct);
+  SendResponse(call.channel(), request, response_struct, status);
+}
+
+void NanopbMethod::CallServerStreaming(ServerCall& call,
+                                       const Packet& request,
+                                       void* request_struct) const {
+  if (!DecodeRequest(call.channel(), request, request_struct)) {
+    return;
+  }
+
+  internal::BaseServerWriter server_writer(call);
+  function_.server_streaming(call, request_struct, server_writer);
+}
+
+bool NanopbMethod::DecodeRequest(Channel& channel,
+                                 const Packet& request,
+                                 void* proto_struct) const {
+  if (serde_.DecodeRequest(proto_struct, request.payload())) {
+    return true;
+  }
+
+  PW_LOG_WARN("Failed to decode request payload from channel %u",
+              unsigned(channel.id()));
+  channel.Send(Packet::ServerError(request, Status::DataLoss()));
+  return false;
+}
+
+void NanopbMethod::SendResponse(Channel& channel,
+                                const Packet& request,
+                                const void* response_struct,
+                                Status status) const {
+  Channel::OutputBuffer response_buffer = channel.AcquireBuffer();
+  std::span payload_buffer = response_buffer.payload(request);
+
+  StatusWithSize encoded = EncodeResponse(response_struct, payload_buffer);
+
+  if (encoded.ok()) {
+    Packet response = Packet::Response(request);
+
+    response.set_payload(payload_buffer.first(encoded.size()));
+    response.set_status(status);
+    if (channel.Send(response_buffer, response).ok()) {
+      return;
+    }
+  }
+
+  PW_LOG_WARN("Failed to encode response packet for channel %u",
+              unsigned(channel.id()));
+  channel.Send(response_buffer,
+               Packet::ServerError(request, Status::Internal()));
+}
+
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/nanopb/nanopb_method_test.cc b/pw_rpc/nanopb/nanopb_method_test.cc
new file mode 100644
index 0000000..a02cf89
--- /dev/null
+++ b/pw_rpc/nanopb/nanopb_method_test.cc
@@ -0,0 +1,189 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_rpc/internal/nanopb_method.h"
+
+#include <array>
+
+#include "gtest/gtest.h"
+#include "pw_rpc/internal/nanopb_method_union.h"
+#include "pw_rpc/server_context.h"
+#include "pw_rpc/service.h"
+#include "pw_rpc_nanopb_private/internal_test_utils.h"
+#include "pw_rpc_private/internal_test_utils.h"
+#include "pw_rpc_test_protos/test.pb.h"
+
+namespace pw::rpc::internal {
+namespace {
+
+using std::byte;
+
+pw_rpc_test_TestRequest last_request;
+ServerWriter<pw_rpc_test_TestResponse> last_writer;
+
+Status AddFive(ServerCall&,
+               const pw_rpc_test_TestRequest& request,
+               pw_rpc_test_TestResponse& response) {
+  last_request = request;
+  response.value = request.integer + 5;
+  return Status::Unauthenticated();
+}
+
+Status DoNothing(ServerCall&, const pw_rpc_test_Empty&, pw_rpc_test_Empty&) {
+  return Status::Unknown();
+}
+
+void StartStream(ServerCall&,
+                 const pw_rpc_test_TestRequest& request,
+                 ServerWriter<pw_rpc_test_TestResponse>& writer) {
+  last_request = request;
+  last_writer = std::move(writer);
+}
+
+class FakeService : public Service {
+ public:
+  FakeService(uint32_t id) : Service(id, kMethods) {}
+
+  static constexpr std::array<NanopbMethodUnion, 3> kMethods = {
+      NanopbMethod::Unary<DoNothing>(
+          10u, pw_rpc_test_Empty_fields, pw_rpc_test_Empty_fields),
+      NanopbMethod::Unary<AddFive>(
+          11u, pw_rpc_test_TestRequest_fields, pw_rpc_test_TestResponse_fields),
+      NanopbMethod::ServerStreaming<StartStream>(
+          12u, pw_rpc_test_TestRequest_fields, pw_rpc_test_TestResponse_fields),
+  };
+};
+
+TEST(NanopbMethod, UnaryRpc_SendsResponse) {
+  PW_ENCODE_PB(
+      pw_rpc_test_TestRequest, request, .integer = 123, .status_code = 0);
+
+  const NanopbMethod& method =
+      std::get<1>(FakeService::kMethods).nanopb_method();
+  ServerContextForTest<FakeService> context(method);
+  method.Invoke(context.get(), context.packet(request));
+
+  const Packet& response = context.output().sent_packet();
+  EXPECT_EQ(response.status(), Status::Unauthenticated());
+
+  // Field 1 (encoded as 1 << 3) with 128 as the value.
+  constexpr std::byte expected[]{
+      std::byte{0x08}, std::byte{0x80}, std::byte{0x01}};
+
+  EXPECT_EQ(sizeof(expected), response.payload().size());
+  EXPECT_EQ(0,
+            std::memcmp(expected, response.payload().data(), sizeof(expected)));
+
+  EXPECT_EQ(123, last_request.integer);
+}
+
+TEST(NanopbMethod, UnaryRpc_InvalidPayload_SendsError) {
+  std::array<byte, 8> bad_payload{byte{0xFF}, byte{0xAA}, byte{0xDD}};
+
+  const NanopbMethod& method =
+      std::get<0>(FakeService::kMethods).nanopb_method();
+  ServerContextForTest<FakeService> context(method);
+  method.Invoke(context.get(), context.packet(bad_payload));
+
+  const Packet& packet = context.output().sent_packet();
+  EXPECT_EQ(PacketType::SERVER_ERROR, packet.type());
+  EXPECT_EQ(Status::DataLoss(), packet.status());
+  EXPECT_EQ(context.kServiceId, packet.service_id());
+  EXPECT_EQ(method.id(), packet.method_id());
+}
+
+TEST(NanopbMethod, UnaryRpc_BufferTooSmallForResponse_SendsInternalError) {
+  constexpr int64_t value = 0x7FFFFFFF'FFFFFF00ll;
+  PW_ENCODE_PB(
+      pw_rpc_test_TestRequest, request, .integer = value, .status_code = 0);
+
+  const NanopbMethod& method =
+      std::get<1>(FakeService::kMethods).nanopb_method();
+  // Output buffer is too small for the response, but can fit an error packet.
+  ServerContextForTest<FakeService, 22> context(method);
+  ASSERT_LT(context.output().buffer_size(),
+            context.packet(request).MinEncodedSizeBytes() + request.size() + 1);
+
+  method.Invoke(context.get(), context.packet(request));
+
+  const Packet& packet = context.output().sent_packet();
+  EXPECT_EQ(PacketType::SERVER_ERROR, packet.type());
+  EXPECT_EQ(Status::Internal(), packet.status());
+  EXPECT_EQ(context.kServiceId, packet.service_id());
+  EXPECT_EQ(method.id(), packet.method_id());
+
+  EXPECT_EQ(value, last_request.integer);
+}
+
+TEST(NanopbMethod, ServerStreamingRpc_SendsNothingWhenInitiallyCalled) {
+  PW_ENCODE_PB(
+      pw_rpc_test_TestRequest, request, .integer = 555, .status_code = 0);
+
+  const NanopbMethod& method =
+      std::get<2>(FakeService::kMethods).nanopb_method();
+  ServerContextForTest<FakeService> context(method);
+
+  method.Invoke(context.get(), context.packet(request));
+
+  EXPECT_EQ(0u, context.output().packet_count());
+  EXPECT_EQ(555, last_request.integer);
+}
+
+TEST(NanopbMethod, ServerWriter_SendsResponse) {
+  const NanopbMethod& method =
+      std::get<2>(FakeService::kMethods).nanopb_method();
+  ServerContextForTest<FakeService> context(method);
+
+  method.Invoke(context.get(), context.packet({}));
+
+  EXPECT_EQ(Status::Ok(), last_writer.Write({.value = 100}));
+
+  PW_ENCODE_PB(pw_rpc_test_TestResponse, payload, .value = 100);
+  std::array<byte, 128> encoded_response = {};
+  auto encoded = context.packet(payload).Encode(encoded_response);
+  ASSERT_EQ(Status::Ok(), encoded.status());
+
+  ASSERT_EQ(encoded.value().size(), context.output().sent_data().size());
+  EXPECT_EQ(0,
+            std::memcmp(encoded.value().data(),
+                        context.output().sent_data().data(),
+                        encoded.value().size()));
+}
+
+TEST(NanopbMethod,
+     ServerStreamingRpc_ServerWriterBufferTooSmall_InternalError) {
+  const NanopbMethod& method =
+      std::get<2>(FakeService::kMethods).nanopb_method();
+
+  constexpr size_t kNoPayloadPacketSize = 2 /* type */ + 2 /* channel */ +
+                                          5 /* service */ + 5 /* method */ +
+                                          2 /* payload */ + 2 /* status */;
+
+  // Make the buffer barely fit a packet with no payload.
+  ServerContextForTest<FakeService, kNoPayloadPacketSize> context(method);
+
+  // Verify that the encoded size of a packet with an empty payload is correct.
+  std::array<byte, 128> encoded_response = {};
+  auto encoded = context.packet({}).Encode(encoded_response);
+  ASSERT_EQ(Status::Ok(), encoded.status());
+  ASSERT_EQ(kNoPayloadPacketSize, encoded.value().size());
+
+  method.Invoke(context.get(), context.packet({}));
+
+  EXPECT_EQ(Status::Ok(), last_writer.Write({}));  // Barely fits
+  EXPECT_EQ(Status::Internal(), last_writer.Write({.value = 1}));  // Too big
+}
+
+}  // namespace
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/nanopb/nanopb_method_union_test.cc b/pw_rpc/nanopb/nanopb_method_union_test.cc
new file mode 100644
index 0000000..ccd8493
--- /dev/null
+++ b/pw_rpc/nanopb/nanopb_method_union_test.cc
@@ -0,0 +1,148 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_rpc/internal/nanopb_method_union.h"
+
+#include <array>
+
+#include "gtest/gtest.h"
+#include "pw_rpc_nanopb_private/internal_test_utils.h"
+#include "pw_rpc_private/internal_test_utils.h"
+#include "pw_rpc_test_protos/test.pb.h"
+
+namespace pw::rpc::internal {
+namespace {
+
+using std::byte;
+
+template <typename Implementation>
+class FakeGeneratedService : public Service {
+ public:
+  constexpr FakeGeneratedService(uint32_t id) : Service(id, kMethods) {}
+
+  static constexpr std::array<NanopbMethodUnion, 4> kMethods = {
+      GetNanopbOrRawMethodFor<&Implementation::DoNothing>(
+          10u, pw_rpc_test_Empty_fields, pw_rpc_test_Empty_fields),
+      GetNanopbOrRawMethodFor<&Implementation::RawStream>(
+          11u, pw_rpc_test_TestRequest_fields, pw_rpc_test_TestResponse_fields),
+      GetNanopbOrRawMethodFor<&Implementation::AddFive>(
+          12u, pw_rpc_test_TestRequest_fields, pw_rpc_test_TestResponse_fields),
+      GetNanopbOrRawMethodFor<&Implementation::StartStream>(
+          13u, pw_rpc_test_TestRequest_fields, pw_rpc_test_TestResponse_fields),
+  };
+};
+
+pw_rpc_test_TestRequest last_request;
+ServerWriter<pw_rpc_test_TestResponse> last_writer;
+RawServerWriter last_raw_writer;
+
+class FakeGeneratedServiceImpl
+    : public FakeGeneratedService<FakeGeneratedServiceImpl> {
+ public:
+  FakeGeneratedServiceImpl(uint32_t id) : FakeGeneratedService(id) {}
+
+  Status AddFive(ServerContext&,
+                 const pw_rpc_test_TestRequest& request,
+                 pw_rpc_test_TestResponse& response) {
+    last_request = request;
+    response.value = request.integer + 5;
+    return Status::Unauthenticated();
+  }
+
+  StatusWithSize DoNothing(ServerContext&, ConstByteSpan, ByteSpan) {
+    return StatusWithSize::Unknown();
+  }
+
+  void RawStream(ServerContext&, ConstByteSpan, RawServerWriter& writer) {
+    last_raw_writer = std::move(writer);
+  }
+
+  void StartStream(ServerContext&,
+                   const pw_rpc_test_TestRequest& request,
+                   ServerWriter<pw_rpc_test_TestResponse>& writer) {
+    last_request = request;
+    last_writer = std::move(writer);
+  }
+};
+
+TEST(NanopbMethodUnion, Raw_CallsUnaryMethod) {
+  const Method& method =
+      std::get<0>(FakeGeneratedServiceImpl::kMethods).method();
+  ServerContextForTest<FakeGeneratedServiceImpl> context(method);
+  method.Invoke(context.get(), context.packet({}));
+
+  const Packet& response = context.output().sent_packet();
+  EXPECT_EQ(response.status(), Status::Unknown());
+}
+
+TEST(NanopbMethodUnion, Raw_CallsServerStreamingMethod) {
+  PW_ENCODE_PB(
+      pw_rpc_test_TestRequest, request, .integer = 555, .status_code = 0);
+
+  const Method& method =
+      std::get<1>(FakeGeneratedServiceImpl::kMethods).method();
+  ServerContextForTest<FakeGeneratedServiceImpl> context(method);
+
+  method.Invoke(context.get(), context.packet(request));
+
+  EXPECT_TRUE(last_raw_writer.open());
+  last_raw_writer.Finish();
+  EXPECT_EQ(context.output().sent_packet().type(),
+            PacketType::SERVER_STREAM_END);
+}
+
+TEST(NanopbMethodUnion, Nanopb_CallsUnaryMethod) {
+  PW_ENCODE_PB(
+      pw_rpc_test_TestRequest, request, .integer = 123, .status_code = 3);
+
+  const Method& method =
+      std::get<2>(FakeGeneratedServiceImpl::kMethods).method();
+  ServerContextForTest<FakeGeneratedServiceImpl> context(method);
+  method.Invoke(context.get(), context.packet(request));
+
+  const Packet& response = context.output().sent_packet();
+  EXPECT_EQ(response.status(), Status::Unauthenticated());
+
+  // Field 1 (encoded as 1 << 3) with 128 as the value.
+  constexpr std::byte expected[]{
+      std::byte{0x08}, std::byte{0x80}, std::byte{0x01}};
+
+  EXPECT_EQ(sizeof(expected), response.payload().size());
+  EXPECT_EQ(0,
+            std::memcmp(expected, response.payload().data(), sizeof(expected)));
+
+  EXPECT_EQ(123, last_request.integer);
+  EXPECT_EQ(3u, last_request.status_code);
+}
+
+TEST(NanopbMethodUnion, Nanopb_CallsServerStreamingMethod) {
+  PW_ENCODE_PB(
+      pw_rpc_test_TestRequest, request, .integer = 555, .status_code = 0);
+
+  const Method& method =
+      std::get<3>(FakeGeneratedServiceImpl::kMethods).method();
+  ServerContextForTest<FakeGeneratedServiceImpl> context(method);
+
+  method.Invoke(context.get(), context.packet(request));
+
+  EXPECT_EQ(555, last_request.integer);
+  EXPECT_TRUE(last_writer.open());
+
+  last_writer.Finish();
+  EXPECT_EQ(context.output().sent_packet().type(),
+            PacketType::SERVER_STREAM_END);
+}
+
+}  // namespace
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/nanopb/service_method_traits_test.cc b/pw_rpc/nanopb/nanopb_service_method_traits_test.cc
similarity index 64%
rename from pw_rpc/nanopb/service_method_traits_test.cc
rename to pw_rpc/nanopb/nanopb_service_method_traits_test.cc
index 5ca9280..8043e12 100644
--- a/pw_rpc/nanopb/service_method_traits_test.cc
+++ b/pw_rpc/nanopb/nanopb_service_method_traits_test.cc
@@ -12,24 +12,20 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-#include "pw_rpc/internal/service_method_traits.h"
+#include "pw_rpc/internal/nanopb_service_method_traits.h"
 
 #include <type_traits>
 
 #include "pw_rpc/echo_service_nanopb.h"
+#include "pw_rpc/internal/hash.h"
 
 namespace pw::rpc::internal {
 namespace {
 
-static_assert(std::is_same_v<ServiceMethodTraits<&EchoService::Echo>::Service,
-                             EchoService>);
 static_assert(
-    std::is_same_v<ServiceMethodTraits<&EchoService::Echo>::BaseService,
-                   generated::EchoService<EchoService>>);
-
-static_assert(
-    std::is_same_v<decltype(ServiceMethodTraits<&EchoService::Echo>::method()),
-                   const Method&>);
+    std::is_same_v<decltype(NanopbServiceMethodTraits<&EchoService::Echo,
+                                                      Hash("Echo")>::method()),
+                   const NanopbMethod&>);
 
 }  // namespace
 }  // namespace pw::rpc::internal
diff --git a/pw_rpc/nanopb/public/pw_rpc/echo_service_nanopb.h b/pw_rpc/nanopb/public/pw_rpc/echo_service_nanopb.h
index 0ce388c..127c712 100644
--- a/pw_rpc/nanopb/public/pw_rpc/echo_service_nanopb.h
+++ b/pw_rpc/nanopb/public/pw_rpc/echo_service_nanopb.h
@@ -25,7 +25,7 @@
               const pw_rpc_EchoMessage& request,
               pw_rpc_EchoMessage& response) {
     std::strncpy(response.msg, request.msg, sizeof(response.msg));
-    return Status::OK;
+    return Status::Ok();
   }
 };
 
diff --git a/pw_rpc/nanopb/public/pw_rpc/internal/nanopb_common.h b/pw_rpc/nanopb/public/pw_rpc/internal/nanopb_common.h
new file mode 100644
index 0000000..de31463
--- /dev/null
+++ b/pw_rpc/nanopb/public/pw_rpc/internal/nanopb_common.h
@@ -0,0 +1,63 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_bytes/span.h"
+#include "pw_status/status_with_size.h"
+
+namespace pw::rpc::internal {
+
+// Use a void* to cover both Nanopb 3's pb_field_s and Nanopb 4's pb_msgdesc_s.
+using NanopbMessageDescriptor = const void*;
+
+// Serializer/deserializer for nanopb message request and response structs in an
+// RPC method.
+class NanopbMethodSerde {
+ public:
+  constexpr NanopbMethodSerde(NanopbMessageDescriptor request_fields,
+                              NanopbMessageDescriptor response_fields)
+      : request_fields_(request_fields), response_fields_(response_fields) {}
+
+  StatusWithSize EncodeRequest(ByteSpan buffer,
+                               const void* proto_struct) const {
+    return Encode(request_fields_, buffer, proto_struct);
+  }
+  StatusWithSize EncodeResponse(ByteSpan buffer,
+                                const void* proto_struct) const {
+    return Encode(response_fields_, buffer, proto_struct);
+  }
+
+  bool DecodeRequest(void* proto_struct, ConstByteSpan buffer) const {
+    return Decode(request_fields_, proto_struct, buffer);
+  }
+  bool DecodeResponse(void* proto_struct, ConstByteSpan buffer) const {
+    return Decode(response_fields_, proto_struct, buffer);
+  }
+
+ private:
+  // Encodes a nanopb protobuf struct to serialized wire format.
+  StatusWithSize Encode(NanopbMessageDescriptor fields,
+                        ByteSpan buffer,
+                        const void* proto_struct) const;
+
+  // Decodes a serialized protobuf to a nanopb struct.
+  bool Decode(NanopbMessageDescriptor fields,
+              void* proto_struct,
+              ConstByteSpan buffer) const;
+
+  NanopbMessageDescriptor request_fields_;
+  NanopbMessageDescriptor response_fields_;
+};
+
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/nanopb/public_overrides/pw_rpc/internal/method.h b/pw_rpc/nanopb/public/pw_rpc/internal/nanopb_method.h
similarity index 67%
rename from pw_rpc/nanopb/public_overrides/pw_rpc/internal/method.h
rename to pw_rpc/nanopb/public/pw_rpc/internal/nanopb_method.h
index 18d4e9b..78b798c 100644
--- a/pw_rpc/nanopb/public_overrides/pw_rpc/internal/method.h
+++ b/pw_rpc/nanopb/public/pw_rpc/internal/nanopb_method.h
@@ -19,8 +19,10 @@
 #include <span>
 #include <type_traits>
 
-#include "pw_rpc/internal/base_method.h"
 #include "pw_rpc/internal/base_server_writer.h"
+#include "pw_rpc/internal/method.h"
+#include "pw_rpc/internal/method_type.h"
+#include "pw_rpc/internal/nanopb_common.h"
 #include "pw_rpc/server_context.h"
 #include "pw_status/status.h"
 #include "pw_status/status_with_size.h"
@@ -38,8 +40,14 @@
   ServerWriter(ServerWriter&&) = default;
   ServerWriter& operator=(ServerWriter&&) = default;
 
-  // Writes a response struct. Returns Status::OK on success, or
-  // Status::FAILED_PRECONDITION if the writer is closed.
+  // Writes a response struct. Returns the following Status codes:
+  //
+  //   OK - the response was successfully sent
+  //   FAILED_PRECONDITION - the writer is closed
+  //   INTERNAL - pw_rpc was unable to encode the Nanopb protobuf
+  //   other errors - the ChannelOutput failed to send the packet; the error
+  //       codes are determined by the ChannelOutput implementation
+  //
   Status Write(const T& response);
 };
 
@@ -47,19 +55,14 @@
 
 class Packet;
 
-// Use a void* to cover both Nanopb 3's pb_field_s and Nanopb 4's pb_msgdesc_s.
-using NanopbMessageDescriptor = const void*;
-
-enum class Type { kUnary, kServerStreaming, kClientStreaming, kBidiStreaming };
-
 // Templated false value for use in static_assert(false) statements.
 template <typename...>
-constexpr std::false_type kFalse{};
+constexpr std::false_type kFalseValue{};
 
 // Extracts the request and response proto types from a method.
 template <typename Method>
 struct RpcTraits {
-  static_assert(kFalse<Method>,
+  static_assert(kFalseValue<Method>,
                 "The selected function is not an RPC service method");
 };
 
@@ -69,7 +72,7 @@
   using Request = RequestType;
   using Response = ResponseType;
 
-  static constexpr Type kType = Type::kUnary;
+  static constexpr MethodType kType = MethodType::kUnary;
   static constexpr bool kServerStreaming = false;
   static constexpr bool kClientStreaming = false;
 };
@@ -81,7 +84,7 @@
   using Request = RequestType;
   using Response = ResponseType;
 
-  static constexpr Type kType = Type::kServerStreaming;
+  static constexpr MethodType kType = MethodType::kServerStreaming;
   static constexpr bool kServerStreaming = true;
   static constexpr bool kClientStreaming = false;
 };
@@ -110,80 +113,81 @@
 template <auto method>
 using Response = typename RpcTraits<decltype(method)>::Response;
 
-// The Method class invokes user-defined service methods. When a pw::rpc::Server
-// receives an RPC request packet, it looks up the matching Method instance and
-// calls its Invoke method, which eventually calls into the user-defined RPC
-// function.
+// The NanopbMethod class invokes user-defined service methods. When a
+// pw::rpc::Server receives an RPC request packet, it looks up the matching
+// NanopbMethod instance and calls its Invoke method, which eventually calls
+// into the user-defined RPC function.
 //
-// A Method instance is created for each user-defined RPC in the pw_rpc
-// generated code. The Nanopb Method stores a pointer to the RPC function, a
+// A NanopbMethod instance is created for each user-defined RPC in the pw_rpc
+// generated code. The NanopbMethod stores a pointer to the RPC function, a
 // pointer to an "invoker" function that calls that function, and pointers to
 // the Nanopb descriptors used to encode and decode request and response
 // structs.
-class Method : public BaseMethod {
+class NanopbMethod : public Method {
  public:
-  // Creates a Method for a unary RPC.
+  // Creates a NanopbMethod for a unary RPC.
   template <auto method>
-  static constexpr Method Unary(uint32_t id,
-                                NanopbMessageDescriptor request,
-                                NanopbMessageDescriptor response) {
+  static constexpr NanopbMethod Unary(uint32_t id,
+                                      NanopbMessageDescriptor request,
+                                      NanopbMessageDescriptor response) {
     // Define a wrapper around the user-defined function that takes the
     // request and response protobuf structs as void*. This wrapper is stored
     // generically in the Function union, defined below.
     //
     // In optimized builds, the compiler inlines the user-defined function into
     // this wrapper, elminating any overhead.
-    return Method({.unary =
-                       [](ServerCall& call, const void* req, void* resp) {
-                         return method(
-                             call,
-                             *static_cast<const Request<method>*>(req),
-                             *static_cast<Response<method>*>(resp));
-                       }},
-                  UnaryInvoker<AllocateSpaceFor<Request<method>>(),
-                               AllocateSpaceFor<Response<method>>()>,
-                  id,
-                  request,
-                  response);
-  }
-
-  // Creates a Method for a server-streaming RPC.
-  template <auto method>
-  static constexpr Method ServerStreaming(uint32_t id,
-                                          NanopbMessageDescriptor request,
-                                          NanopbMessageDescriptor response) {
-    // Define a wrapper around the user-defined function that takes the request
-    // struct as void* and a BaseServerWriter instead of the templated
-    // ServerWriter class. This wrapper is stored generically in the Function
-    // union, defined below.
-    return Method(
-        {.server_streaming =
-             [](ServerCall& call, const void* req, BaseServerWriter& resp) {
-               method(call,
-                      *static_cast<const Request<method>*>(req),
-                      static_cast<ServerWriter<Response<method>>&>(resp));
-             }},
-        ServerStreamingInvoker<AllocateSpaceFor<Request<method>>()>,
+    return NanopbMethod(
         id,
+        UnaryInvoker<AllocateSpaceFor<Request<method>>(),
+                     AllocateSpaceFor<Response<method>>()>,
+        Function{.unary =
+                     [](ServerCall& call, const void* req, void* resp) {
+                       return method(call,
+                                     *static_cast<const Request<method>*>(req),
+                                     *static_cast<Response<method>*>(resp));
+                     }},
         request,
         response);
   }
 
-  // The pw::rpc::Server calls method.Invoke to call a user-defined RPC. Invoke
-  // calls the invoker function, which encodes and decodes the request and
-  // response (if any) and calls the user-defined RPC function.
-  void Invoke(ServerCall& call, const Packet& request) const {
-    return invoker_(*this, call, request);
+  // Creates a NanopbMethod for a server-streaming RPC.
+  template <auto method>
+  static constexpr NanopbMethod ServerStreaming(
+      uint32_t id,
+      NanopbMessageDescriptor request,
+      NanopbMessageDescriptor response) {
+    // Define a wrapper around the user-defined function that takes the request
+    // struct as void* and a BaseServerWriter instead of the templated
+    // ServerWriter class. This wrapper is stored generically in the Function
+    // union, defined below.
+    return NanopbMethod(
+        id,
+        ServerStreamingInvoker<AllocateSpaceFor<Request<method>>()>,
+        Function{.server_streaming =
+                     [](ServerCall& call,
+                        const void* req,
+                        BaseServerWriter& writer) {
+                       method(call,
+                              *static_cast<const Request<method>*>(req),
+                              static_cast<ServerWriter<Response<method>>&>(
+                                  writer));
+                     }},
+        request,
+        response);
   }
 
   // Encodes a response protobuf with Nanopb to the provided buffer.
   StatusWithSize EncodeResponse(const void* proto_struct,
-                                std::span<std::byte> buffer) const;
+                                std::span<std::byte> buffer) const {
+    return serde_.EncodeResponse(buffer, proto_struct);
+  }
 
   // Decodes a response protobuf with Nanopb to the provided buffer. For testing
   // use.
   bool DecodeResponse(std::span<const std::byte> response,
-                      void* proto_struct) const;
+                      void* proto_struct) const {
+    return serde_.DecodeResponse(proto_struct, response);
+  }
 
  private:
   // Generic version of the unary RPC function signature:
@@ -218,20 +222,12 @@
     return std::max(sizeof(T), size_t(64));
   }
 
-  // The Invoker allocates request/response structs on the stack and calls the
-  // RPC according to its type (unary, server streaming, etc.).
-  using Invoker = void (&)(const Method&, ServerCall&, const Packet&);
-
-  constexpr Method(Function function,
-                   Invoker invoker,
-                   uint32_t id,
-                   NanopbMessageDescriptor request,
-                   NanopbMessageDescriptor response)
-      : BaseMethod(id),
-        invoker_(invoker),
-        function_(function),
-        request_fields_(request),
-        response_fields_(response) {}
+  constexpr NanopbMethod(uint32_t id,
+                         Invoker invoker,
+                         Function function,
+                         NanopbMessageDescriptor request,
+                         NanopbMessageDescriptor response)
+      : Method(id, invoker), function_(function), serde_(request, response) {}
 
   void CallUnary(ServerCall& call,
                  const Packet& request,
@@ -256,7 +252,8 @@
     std::aligned_storage_t<response_size, alignof(std::max_align_t)>
         response_struct{};
 
-    method.CallUnary(call, request, &request_struct, &response_struct);
+    static_cast<const NanopbMethod&>(method).CallUnary(
+        call, request, &request_struct, &response_struct);
   }
 
   // Invoker function for server streaming RPCs. Allocates space for a request
@@ -269,7 +266,8 @@
     std::aligned_storage_t<request_size, alignof(std::max_align_t)>
         request_struct{};
 
-    method.CallServerStreaming(call, request, &request_struct);
+    static_cast<const NanopbMethod&>(method).CallServerStreaming(
+        call, request, &request_struct);
   }
 
   // Decodes a request protobuf with Nanopb to the provided buffer. Sends an
@@ -284,16 +282,11 @@
                     const void* response_struct,
                     Status status) const;
 
-  // Allocates memory for the request/response structs and invokes the
-  // user-defined RPC based on its type (unary, server streaming, etc.).
-  Invoker invoker_;
-
   // Stores the user-defined RPC in a generic wrapper.
   Function function_;
 
-  // Pointers to the descriptors used to encode and decode Nanopb structs.
-  NanopbMessageDescriptor request_fields_;
-  NanopbMessageDescriptor response_fields_;
+  // Serde used to encode and decode Nanopb structs.
+  NanopbMethodSerde serde_;
 };
 
 }  // namespace internal
@@ -302,12 +295,15 @@
 Status ServerWriter<T>::Write(const T& response) {
   std::span<std::byte> buffer = AcquirePayloadBuffer();
 
-  if (auto result = method().EncodeResponse(&response, buffer); result.ok()) {
+  if (auto result =
+          static_cast<const internal::NanopbMethod&>(method()).EncodeResponse(
+              &response, buffer);
+      result.ok()) {
     return ReleasePayloadBuffer(buffer.first(result.size()));
   }
 
-  ReleasePayloadBuffer({});
-  return Status::INTERNAL;
+  ReleasePayloadBuffer();
+  return Status::Internal();
 }
 
 }  // namespace pw::rpc
diff --git a/pw_rpc/nanopb/public/pw_rpc/internal/nanopb_method_union.h b/pw_rpc/nanopb/public/pw_rpc/internal/nanopb_method_union.h
new file mode 100644
index 0000000..eef6e47
--- /dev/null
+++ b/pw_rpc/nanopb/public/pw_rpc/internal/nanopb_method_union.h
@@ -0,0 +1,126 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_bytes/span.h"
+#include "pw_rpc/internal/method_union.h"
+#include "pw_rpc/internal/nanopb_method.h"
+#include "pw_rpc/internal/raw_method_union.h"
+
+namespace pw::rpc::internal {
+
+// Method union which holds either a nanopb or a raw method.
+class NanopbMethodUnion : public MethodUnion {
+ public:
+  constexpr NanopbMethodUnion(RawMethod&& method)
+      : impl_({.raw = std::move(method)}) {}
+  constexpr NanopbMethodUnion(NanopbMethod&& method)
+      : impl_({.nanopb = std::move(method)}) {}
+
+  constexpr const Method& method() const { return impl_.method; }
+  constexpr const RawMethod& raw_method() const { return impl_.raw; }
+  constexpr const NanopbMethod& nanopb_method() const { return impl_.nanopb; }
+
+ private:
+  union {
+    Method method;
+    RawMethod raw;
+    NanopbMethod nanopb;
+  } impl_;
+};
+
+// Specialization for a nanopb unary method.
+template <typename T, typename RequestType, typename ResponseType>
+struct MethodTraits<Status (T::*)(
+    ServerContext&, const RequestType&, ResponseType&)> {
+  static constexpr MethodType kType = MethodType::kUnary;
+
+  using Service = T;
+  using Implementation = NanopbMethod;
+  using Request = RequestType;
+  using Response = ResponseType;
+};
+
+// Specialization for a nanopb server streaming method.
+template <typename T, typename RequestType, typename ResponseType>
+struct MethodTraits<void (T::*)(
+    ServerContext&, const RequestType&, ServerWriter<ResponseType>&)> {
+  static constexpr MethodType kType = MethodType::kServerStreaming;
+
+  using Service = T;
+  using Implementation = NanopbMethod;
+  using Request = RequestType;
+  using Response = ResponseType;
+};
+
+template <auto method>
+constexpr bool kIsNanopb =
+    std::is_same_v<MethodImplementation<method>, NanopbMethod>;
+
+// Deduces the type of an implemented nanopb service method from its signature,
+// and returns the appropriate Method object to invoke it.
+template <auto method>
+constexpr NanopbMethod GetNanopbMethodFor(
+    uint32_t id,
+    NanopbMessageDescriptor request_fields,
+    NanopbMessageDescriptor response_fields) {
+  static_assert(
+      kIsNanopb<method>,
+      "GetNanopbMethodFor should only be called on nanopb RPC methods");
+
+  using Traits = MethodTraits<decltype(method)>;
+  using ServiceImpl = typename Traits::Service;
+
+  if constexpr (Traits::kType == MethodType::kUnary) {
+    constexpr auto invoker = +[](ServerCall& call,
+                                 const typename Traits::Request& request,
+                                 typename Traits::Response& response) {
+      return (static_cast<ServiceImpl&>(call.service()).*method)(
+          call.context(), request, response);
+    };
+    return NanopbMethod::Unary<invoker>(id, request_fields, response_fields);
+  }
+
+  if constexpr (Traits::kType == MethodType::kServerStreaming) {
+    constexpr auto invoker =
+        +[](ServerCall& call,
+            const typename Traits::Request& request,
+            ServerWriter<typename Traits::Response>& writer) {
+          (static_cast<ServiceImpl&>(call.service()).*method)(
+              call.context(), request, writer);
+        };
+    return NanopbMethod::ServerStreaming<invoker>(
+        id, request_fields, response_fields);
+  }
+
+  constexpr auto fake_invoker =
+      +[](ServerCall&, const int&, ServerWriter<int>&) {};
+  return NanopbMethod::ServerStreaming<fake_invoker>(0, nullptr, nullptr);
+}
+
+// Returns either a raw or nanopb method object, depending on an implemented
+// function's signature.
+template <auto method>
+constexpr auto GetNanopbOrRawMethodFor(
+    uint32_t id,
+    [[maybe_unused]] NanopbMessageDescriptor request_fields,
+    [[maybe_unused]] NanopbMessageDescriptor response_fields) {
+  if constexpr (kIsRaw<method>) {
+    return GetRawMethodFor<method>(id);
+  } else {
+    return GetNanopbMethodFor<method>(id, request_fields, response_fields);
+  }
+};
+
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/nanopb/public/pw_rpc/internal/nanopb_service_method_traits.h b/pw_rpc/nanopb/public/pw_rpc/internal/nanopb_service_method_traits.h
new file mode 100644
index 0000000..866ceef
--- /dev/null
+++ b/pw_rpc/nanopb/public/pw_rpc/internal/nanopb_service_method_traits.h
@@ -0,0 +1,27 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_rpc/internal/nanopb_method_union.h"
+#include "pw_rpc/internal/service_method_traits.h"
+
+namespace pw::rpc::internal {
+
+template <auto impl_method, uint32_t method_id>
+using NanopbServiceMethodTraits =
+    ServiceMethodTraits<&MethodBaseService<impl_method>::NanopbMethodFor,
+                        impl_method,
+                        method_id>;
+
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/nanopb/public/pw_rpc/internal/service_method_traits.h b/pw_rpc/nanopb/public/pw_rpc/internal/service_method_traits.h
deleted file mode 100644
index a73ae14..0000000
--- a/pw_rpc/nanopb/public/pw_rpc/internal/service_method_traits.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2020 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-#pragma once
-
-#include "pw_rpc/internal/method.h"
-
-namespace pw::rpc::internal {
-
-// Identifies a base class from a member function it defines. This should be
-// used with decltype to retrieve the base class.
-template <typename T, typename U>
-T BaseFromMember(U T::*);
-
-// Gets information about a service and method at compile-time. Uses a pointer
-// to a member function of the service implementation to identify the service
-// class, generated service class, and Method object.This class is friended by
-// the generated service classes to give it access to the internal method list.
-template <auto impl_method>
-class ServiceMethodTraits {
- public:
-  ServiceMethodTraits() = delete;
-
-  // Type of the service implementation derived class.
-  using Service = typename internal::RpcTraits<decltype(impl_method)>::Service;
-
-  // Type of the generic service base class.
-  using BaseService =
-      decltype(BaseFromMember(&Service::_PwRpcInternalGeneratedBase));
-
-  // Reference to the Method object corresponding to this method.
-  static constexpr const Method& method() {
-    return *BaseService::template MethodFor<impl_method>();
-  }
-
-  static_assert(BaseService::template MethodFor<impl_method>() != nullptr,
-                "The selected function is not an RPC service method");
-};
-
-}  // namespace pw::rpc::internal
diff --git a/pw_rpc/nanopb/public/pw_rpc/nanopb_client_call.h b/pw_rpc/nanopb/public/pw_rpc/nanopb_client_call.h
new file mode 100644
index 0000000..44e8475
--- /dev/null
+++ b/pw_rpc/nanopb/public/pw_rpc/nanopb_client_call.h
@@ -0,0 +1,185 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <new>
+
+#include "pw_bytes/span.h"
+#include "pw_rpc/internal/base_client_call.h"
+#include "pw_rpc/internal/method_type.h"
+#include "pw_rpc/internal/nanopb_common.h"
+#include "pw_status/status.h"
+
+namespace pw::rpc {
+
+// Response handler callback for unary RPC methods.
+template <typename Response>
+class UnaryResponseHandler {
+ public:
+  virtual ~UnaryResponseHandler() = default;
+
+  // Called when the response is received from the server with the method's
+  // status and the deserialized response struct.
+  virtual void ReceivedResponse(Status status, const Response& response) = 0;
+
+  // Called when an error occurs internally in the RPC client or server.
+  virtual void RpcError(Status) {}
+};
+
+// Response handler callbacks for server streaming RPC methods.
+template <typename Response>
+class ServerStreamingResponseHandler {
+ public:
+  virtual ~ServerStreamingResponseHandler() = default;
+
+  // Called on every response received from the server with the deserialized
+  // response struct.
+  virtual void ReceivedResponse(const Response& response) = 0;
+
+  // Called when the server ends the stream with the overall RPC status.
+  virtual void Complete(Status status) = 0;
+
+  // Called when an error occurs internally in the RPC client or server.
+  virtual void RpcError(Status) {}
+};
+
+namespace internal {
+
+// Non-templated nanopb base class providing protobuf encoding and decoding.
+class BaseNanopbClientCall : public BaseClientCall {
+ public:
+  Status SendRequest(const void* request_struct);
+
+ protected:
+  constexpr BaseNanopbClientCall(
+      rpc::Channel* channel,
+      uint32_t service_id,
+      uint32_t method_id,
+      ResponseHandler handler,
+      internal::NanopbMessageDescriptor request_fields,
+      internal::NanopbMessageDescriptor response_fields)
+      : BaseClientCall(channel, service_id, method_id, handler),
+        serde_(request_fields, response_fields) {}
+
+  constexpr const internal::NanopbMethodSerde& serde() const { return serde_; }
+
+ private:
+  internal::NanopbMethodSerde serde_;
+};
+
+template <typename Callback>
+struct CallbackTraits {};
+
+template <typename ResponseType>
+struct CallbackTraits<UnaryResponseHandler<ResponseType>> {
+  using Response = ResponseType;
+
+  static constexpr MethodType kType = MethodType::kUnary;
+};
+
+template <typename ResponseType>
+struct CallbackTraits<ServerStreamingResponseHandler<ResponseType>> {
+  using Response = ResponseType;
+
+  static constexpr MethodType kType = MethodType::kServerStreaming;
+};
+
+}  // namespace internal
+
+template <typename Callback>
+class NanopbClientCall : public internal::BaseNanopbClientCall {
+ public:
+  constexpr NanopbClientCall(Channel* channel,
+                             uint32_t service_id,
+                             uint32_t method_id,
+                             Callback& callback,
+                             internal::NanopbMessageDescriptor request_fields,
+                             internal::NanopbMessageDescriptor response_fields)
+      : BaseNanopbClientCall(channel,
+                             service_id,
+                             method_id,
+                             &ResponseHandler,
+                             request_fields,
+                             response_fields),
+        callback_(callback) {}
+
+ private:
+  using Traits = internal::CallbackTraits<Callback>;
+  using Response = typename Traits::Response;
+
+  // Buffer into which the nanopb struct is decoded. Its contents are unknown,
+  // so it is aligned to maximum alignment to accommodate any type.
+  using ResponseBuffer =
+      std::aligned_storage_t<sizeof(Response), alignof(std::max_align_t)>;
+
+  friend class Client;
+
+  static void ResponseHandler(internal::BaseClientCall& call,
+                              const internal::Packet& packet) {
+    static_cast<NanopbClientCall<Callback>&>(call).HandleResponse(packet);
+  }
+
+  void HandleResponse(const internal::Packet& packet) {
+    if constexpr (Traits::kType == internal::MethodType::kUnary) {
+      InvokeUnaryCallback(packet);
+    }
+    if constexpr (Traits::kType == internal::MethodType::kServerStreaming) {
+      InvokeServerStreamingCallback(packet);
+    }
+  }
+
+  void InvokeUnaryCallback(const internal::Packet& packet) {
+    if (packet.type() == internal::PacketType::SERVER_ERROR) {
+      callback_.RpcError(packet.status());
+      return;
+    }
+
+    ResponseBuffer response_struct{};
+
+    if (serde().DecodeResponse(&response_struct, packet.payload())) {
+      callback_.ReceivedResponse(
+          packet.status(),
+          *std::launder(reinterpret_cast<Response*>(&response_struct)));
+    } else {
+      callback_.RpcError(Status::DataLoss());
+    }
+
+    Unregister();
+  }
+
+  void InvokeServerStreamingCallback(const internal::Packet& packet) {
+    if (packet.type() == internal::PacketType::SERVER_ERROR) {
+      callback_.RpcError(packet.status());
+      return;
+    }
+
+    if (packet.type() == internal::PacketType::SERVER_STREAM_END) {
+      callback_.Complete(packet.status());
+      return;
+    }
+
+    ResponseBuffer response_struct{};
+
+    if (serde().DecodeResponse(&response_struct, packet.payload())) {
+      callback_.ReceivedResponse(
+          *std::launder(reinterpret_cast<Response*>(&response_struct)));
+    } else {
+      callback_.RpcError(Status::DataLoss());
+    }
+  }
+
+  Callback& callback_;
+};
+
+}  // namespace pw::rpc
diff --git a/pw_rpc/nanopb/public/pw_rpc/test_method_context.h b/pw_rpc/nanopb/public/pw_rpc/nanopb_test_method_context.h
similarity index 64%
rename from pw_rpc/nanopb/public/pw_rpc/test_method_context.h
rename to pw_rpc/nanopb/public/pw_rpc/nanopb_test_method_context.h
index 4fc17a5..6ff26d9 100644
--- a/pw_rpc/nanopb/public/pw_rpc/test_method_context.h
+++ b/pw_rpc/nanopb/public/pw_rpc/nanopb_test_method_context.h
@@ -16,38 +16,38 @@
 #include <tuple>
 #include <utility>
 
-#include "gtest/gtest.h"
+#include "pw_assert/assert.h"
 #include "pw_containers/vector.h"
-#include "pw_preprocessor/macro_arg_count.h"
+#include "pw_preprocessor/arguments.h"
 #include "pw_rpc/channel.h"
 #include "pw_rpc/internal/hash.h"
-#include "pw_rpc/internal/method.h"
+#include "pw_rpc/internal/nanopb_method.h"
+#include "pw_rpc/internal/nanopb_service_method_traits.h"
 #include "pw_rpc/internal/packet.h"
 #include "pw_rpc/internal/server.h"
-#include "pw_rpc/internal/service_method_traits.h"
 
 namespace pw::rpc {
 
 // Declares a context object that may be used to invoke an RPC. The context is
-// declared with a pointer to the service member function (&Service::Method).
+// declared with the name of the implemented service and the method to invoke.
 // The RPC can then be invoked with the call method.
 //
 // For a unary RPC, context.call(request) returns the status, and the response
 // struct can be accessed via context.response().
 //
-//   pw::rpc::TestMethodContext<&my::CoolService::TheMethod> context;
-//   EXPECT_EQ(Status::OK, context.call({.some_arg = 123}));
+//   PW_NANOPB_TEST_METHOD_CONTEXT(my::CoolService, TheMethod) context;
+//   EXPECT_EQ(Status::Ok(), context.call({.some_arg = 123}));
 //   EXPECT_EQ(500, context.response().some_response_value);
 //
 // For a server streaming RPC, context.call(request) invokes the method. As in a
 // normal RPC, the method completes when the ServerWriter's Finish method is
 // called (or it goes out of scope).
 //
-//   pw::rpc::TestMethodContext<&my::CoolService::TheStreamingMethod> context;
+//   PW_NANOPB_TEST_METHOD_CONTEXT(my::CoolService, TheStreamingMethod) context;
 //   context.call({.some_arg = 123});
 //
 //   EXPECT_TRUE(context.done());  // Check that the RPC completed
-//   EXPECT_EQ(Status::OK, context.status());  // Check the status
+//   EXPECT_EQ(Status::Ok(), context.status());  // Check the status
 //
 //   EXPECT_EQ(3u, context.responses().size());
 //   EXPECT_EQ(123, context.responses()[0].value); // check individual responses
@@ -56,35 +56,43 @@
 //     // iterate over the responses
 //   }
 //
-// TestMethodContext forwards its constructor arguments to the underlying
-// serivce. For example:
+// PW_NANOPB_TEST_METHOD_CONTEXT forwards its constructor arguments to the
+// underlying serivce. For example:
 //
-//   pw::rpc::TestMethodContext<&MyService::Go> context(serivce, args);
+//   PW_NANOPB_TEST_METHOD_CONTEXT(MyService, Go) context(service, args);
 //
-// pw::rpc::TestMethodContext takes two optional template arguments:
+// PW_NANOPB_TEST_METHOD_CONTEXT takes two optional arguments:
 //
 //   size_t max_responses: maximum responses to store; ignored unless streaming
 //   size_t output_size_bytes: buffer size; must be large enough for a packet
 //
 // Example:
 //
-//   pw::rpc::TestMethodContext<&MyService::BestMethod, 3, 256> context;
+//   PW_NANOPB_TEST_METHOD_CONTEXT(MyService, BestMethod, 3, 256) context;
 //   ASSERT_EQ(3u, context.responses().max_size());
 //
-template <auto method, size_t max_responses = 4, size_t output_size_bytes = 128>
-class TestMethodContext;
 
-// Internal classes that implement TestMethodContext.
-namespace internal::test {
+#define PW_NANOPB_TEST_METHOD_CONTEXT(service, method, ...)              \
+  ::pw::rpc::NanopbTestMethodContext<&service::method,                   \
+                                     ::pw::rpc::internal::Hash(#method), \
+                                     ##__VA_ARGS__>
+template <auto method,
+          uint32_t method_id,
+          size_t max_responses = 4,
+          size_t output_size_bytes = 128>
+class NanopbTestMethodContext;
+
+// Internal classes that implement NanopbTestMethodContext.
+namespace internal::test::nanopb {
 
 // A ChannelOutput implementation that stores the outgoing payloads and status.
 template <typename Response>
 class MessageOutput final : public ChannelOutput {
  public:
-  MessageOutput(const internal::Method& method,
+  MessageOutput(const internal::NanopbMethod& method,
                 Vector<Response>& responses,
                 std::span<std::byte> buffer)
-      : ChannelOutput("internal::test::MessageOutput"),
+      : ChannelOutput("internal::test::nanopb::MessageOutput"),
         method_(method),
         responses_(responses),
         buffer_(buffer) {
@@ -103,9 +111,9 @@
  private:
   std::span<std::byte> AcquireBuffer() override { return buffer_; }
 
-  void SendAndReleaseBuffer(size_t size) override;
+  Status SendAndReleaseBuffer(size_t size) override;
 
-  const internal::Method& method_;
+  const internal::NanopbMethod& method_;
   Vector<Response>& responses_;
   std::span<std::byte> buffer_;
   size_t total_responses_;
@@ -114,27 +122,32 @@
 };
 
 // Collects everything needed to invoke a particular RPC.
-template <auto method, size_t max_responses, size_t output_size>
+template <auto method,
+          uint32_t method_id,
+          size_t max_responses,
+          size_t output_size>
 struct InvocationContext {
   using Request = internal::Request<method>;
   using Response = internal::Response<method>;
 
   template <typename... Args>
   InvocationContext(Args&&... args)
-      : output(ServiceMethodTraits<method>::method(), responses, buffer),
+      : output(NanopbServiceMethodTraits<method, method_id>::method(),
+               responses,
+               buffer),
         channel(Channel::Create<123>(&output)),
         server(std::span(&channel, 1)),
         service(std::forward<Args>(args)...),
         call(static_cast<internal::Server&>(server),
              static_cast<internal::Channel&>(channel),
              service,
-             ServiceMethodTraits<method>::method()) {}
+             NanopbServiceMethodTraits<method, method_id>::method()) {}
 
   MessageOutput<Response> output;
 
   rpc::Channel channel;
   rpc::Server server;
-  typename ServiceMethodTraits<method>::Service service;
+  typename NanopbServiceMethodTraits<method, method_id>::Service service;
   Vector<Response, max_responses> responses;
   std::array<std::byte, output_size> buffer = {};
 
@@ -143,10 +156,10 @@
 
 // Method invocation context for a unary RPC. Returns the status in call() and
 // provides the response through the response() method.
-template <auto method, size_t output_size>
+template <auto method, uint32_t method_id, size_t output_size>
 class UnaryContext {
  private:
-  InvocationContext<method, 1, output_size> ctx_;
+  InvocationContext<method, method_id, 1, output_size> ctx_;
 
  public:
   using Request = typename decltype(ctx_)::Request;
@@ -166,16 +179,19 @@
 
   // Gives access to the RPC's response.
   const Response& response() const {
-    EXPECT_FALSE(ctx_.responses.empty());
+    PW_CHECK_UINT_GT(ctx_.responses.size(), 0);
     return ctx_.responses.back();
   }
 };
 
 // Method invocation context for a server streaming RPC.
-template <auto method, size_t max_responses, size_t output_size>
+template <auto method,
+          uint32_t method_id,
+          size_t max_responses,
+          size_t output_size>
 class ServerStreamingContext {
  private:
-  InvocationContext<method, max_responses, output_size> ctx_;
+  InvocationContext<method, method_id, max_responses, output_size> ctx_;
 
  public:
   using Request = typename decltype(ctx_)::Request;
@@ -216,74 +232,77 @@
 
   // The status of the stream. Only valid if done() is true.
   Status status() const {
-    EXPECT_TRUE(done());
+    PW_CHECK(done());
     return ctx_.output.last_status();
   }
 };
 
 // Alias to select the type of the context object to use based on which type of
 // RPC it is for.
-template <auto method, size_t responses, size_t output_size>
+template <auto method, uint32_t method_id, size_t responses, size_t output_size>
 using Context = std::tuple_element_t<
     static_cast<size_t>(internal::RpcTraits<decltype(method)>::kType),
-    std::tuple<
-        internal::test::UnaryContext<method, output_size>,
-        internal::test::ServerStreamingContext<method, responses, output_size>
-        // TODO(hepler): Support client and bidi streaming
-        >>;
+    std::tuple<UnaryContext<method, method_id, output_size>,
+               ServerStreamingContext<method, method_id, responses, output_size>
+               // TODO(hepler): Support client and bidi streaming
+               >>;
 
 template <typename Response>
 void MessageOutput<Response>::clear() {
   responses_.clear();
   total_responses_ = 0;
   stream_ended_ = false;
-  last_status_ = Status::UNKNOWN;
+  last_status_ = Status::Unknown();
 }
 
 template <typename Response>
-void MessageOutput<Response>::SendAndReleaseBuffer(size_t size) {
-  EXPECT_FALSE(stream_ended_);
+Status MessageOutput<Response>::SendAndReleaseBuffer(size_t size) {
+  PW_CHECK(!stream_ended_);
 
   if (size == 0u) {
-    return;
+    return Status::Ok();
   }
 
-  internal::Packet packet;
-  EXPECT_EQ(
-      Status::OK,
-      internal::Packet::FromBuffer(std::span(buffer_.data(), size), packet));
+  Result<internal::Packet> result =
+      internal::Packet::FromBuffer(std::span(buffer_.data(), size));
+  PW_CHECK(result.ok());
 
-  last_status_ = packet.status();
+  last_status_ = result.value().status();
 
-  switch (packet.type()) {
-    case internal::PacketType::RPC:
+  switch (result.value().type()) {
+    case internal::PacketType::RESPONSE:
       // If we run out of space, the back message is always the most recent.
       responses_.emplace_back();
       responses_.back() = {};
-      EXPECT_TRUE(method_.DecodeResponse(packet.payload(), &responses_.back()));
+      PW_CHECK(
+          method_.DecodeResponse(result.value().payload(), &responses_.back()));
       total_responses_ += 1;
       break;
-    case internal::PacketType::STREAM_END:
+    case internal::PacketType::SERVER_STREAM_END:
       stream_ended_ = true;
       break;
-    case internal::PacketType::CANCEL:
-    case internal::PacketType::ERROR:
-      FAIL();
-      break;
+    default:
+      PW_CRASH("Unhandled PacketType");
   }
+  return Status::Ok();
 }
 
-}  // namespace internal::test
+}  // namespace internal::test::nanopb
 
-template <auto method, size_t max_responses, size_t output_size_bytes>
-class TestMethodContext
-    : public internal::test::Context<method, max_responses, output_size_bytes> {
+template <auto method,
+          uint32_t method_id,
+          size_t max_responses,
+          size_t output_size_bytes>
+class NanopbTestMethodContext
+    : public internal::test::nanopb::
+          Context<method, method_id, max_responses, output_size_bytes> {
  public:
   // Forwards constructor arguments to the service class.
   template <typename... ServiceArgs>
-  TestMethodContext(ServiceArgs&&... service_args)
-      : internal::test::Context<method, max_responses, output_size_bytes>(
-            std::forward<ServiceArgs>(service_args)...) {}
+  NanopbTestMethodContext(ServiceArgs&&... service_args)
+      : internal::test::nanopb::
+            Context<method, method_id, max_responses, output_size_bytes>(
+                std::forward<ServiceArgs>(service_args)...) {}
 };
 
 }  // namespace pw::rpc
diff --git a/pw_rpc/nanopb/pw_rpc_nanopb_private/internal_test_utils.h b/pw_rpc/nanopb/pw_rpc_nanopb_private/internal_test_utils.h
new file mode 100644
index 0000000..1cd8df7
--- /dev/null
+++ b/pw_rpc/nanopb/pw_rpc_nanopb_private/internal_test_utils.h
@@ -0,0 +1,120 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <span>
+
+#include "pb_decode.h"
+#include "pb_encode.h"
+#include "pw_rpc/nanopb_client_call.h"
+
+namespace pw::rpc::internal {
+
+// Encodes a protobuf to a local span named by result from a list of nanopb
+// struct initializers.
+//
+//  PW_ENCODE_PB(pw_rpc_TestProto, encoded, .value = 42);
+//
+#define PW_ENCODE_PB(proto, result, ...) \
+  _PW_ENCODE_PB_EXPAND(proto, result, __LINE__, __VA_ARGS__)
+
+#define _PW_ENCODE_PB_EXPAND(proto, result, unique, ...) \
+  _PW_ENCODE_PB_IMPL(proto, result, unique, __VA_ARGS__)
+
+#define _PW_ENCODE_PB_IMPL(proto, result, unique, ...)            \
+  std::array<pb_byte_t, 2 * sizeof(proto)> _pb_buffer_##unique{}; \
+  const std::span result =                                        \
+      ::pw::rpc::internal::EncodeProtobuf<proto, proto##_fields>( \
+          proto{__VA_ARGS__}, _pb_buffer_##unique)
+
+template <typename T, auto fields>
+std::span<const std::byte> EncodeProtobuf(const T& protobuf,
+                                          std::span<pb_byte_t> buffer) {
+  auto output = pb_ostream_from_buffer(buffer.data(), buffer.size());
+  EXPECT_TRUE(pb_encode(&output, fields, &protobuf));
+  return std::as_bytes(buffer.first(output.bytes_written));
+}
+
+// Decodes a protobuf to a nanopb struct named by result.
+#define PW_DECODE_PB(proto, result, buffer)                        \
+  proto result;                                                    \
+  ::pw::rpc::internal::DecodeProtobuf<proto, proto##_fields>(      \
+      std::span(reinterpret_cast<const pb_byte_t*>(buffer.data()), \
+                buffer.size()),                                    \
+      result);
+
+template <typename T, auto fields>
+void DecodeProtobuf(std::span<const pb_byte_t> buffer, T& protobuf) {
+  auto input = pb_istream_from_buffer(buffer.data(), buffer.size());
+  EXPECT_TRUE(pb_decode(&input, fields, &protobuf));
+}
+
+// Client response handler for a unary RPC invocation which captures the
+// response it receives.
+template <typename Response>
+class TestUnaryResponseHandler : public UnaryResponseHandler<Response> {
+ public:
+  void ReceivedResponse(Status status, const Response& response) override {
+    last_status_ = status;
+    last_response_ = response;
+    ++responses_received_;
+  }
+
+  void RpcError(Status status) override { rpc_error_ = status; }
+
+  constexpr Status last_status() const { return last_status_; }
+  constexpr const Response& last_response() const& { return last_response_; }
+  constexpr size_t responses_received() const { return responses_received_; }
+  constexpr Status rpc_error() const { return rpc_error_; }
+
+ private:
+  Status last_status_;
+  Response last_response_;
+  size_t responses_received_ = 0;
+  Status rpc_error_;
+};
+
+// Client response handler for a unary RPC invocation which stores information
+// about the state of the stream.
+template <typename Response>
+class TestServerStreamingResponseHandler
+    : public ServerStreamingResponseHandler<Response> {
+ public:
+  void ReceivedResponse(const Response& response) override {
+    last_response_ = response;
+    ++responses_received_;
+  }
+
+  void Complete(Status status) override {
+    active_ = false;
+    status_ = status;
+  }
+
+  void RpcError(Status status) override { rpc_error_ = status; }
+
+  constexpr bool active() const { return active_; }
+  constexpr Status status() const { return status_; }
+  constexpr const Response& last_response() const& { return last_response_; }
+  constexpr size_t responses_received() const { return responses_received_; }
+  constexpr Status rpc_error() const { return rpc_error_; }
+
+ private:
+  Status status_;
+  Response last_response_;
+  size_t responses_received_ = 0;
+  bool active_ = true;
+  Status rpc_error_;
+};
+
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/packet.cc b/pw_rpc/packet.cc
index dfbdb50..9b62a4f 100644
--- a/pw_rpc/packet.cc
+++ b/pw_rpc/packet.cc
@@ -20,12 +20,10 @@
 
 using std::byte;
 
-Status Packet::FromBuffer(std::span<const byte> data, Packet& packet) {
-  packet = Packet();
-
-  protobuf::Decoder decoder(data);
-
+Result<Packet> Packet::FromBuffer(ConstByteSpan data) {
+  Packet packet;
   Status status;
+  protobuf::Decoder decoder(data);
 
   while ((status = decoder.Next()).ok()) {
     RpcPacket::Fields field =
@@ -64,10 +62,14 @@
     }
   }
 
-  return status == Status::DATA_LOSS ? Status::DATA_LOSS : Status::OK;
+  if (status == Status::DataLoss()) {
+    return status;
+  }
+
+  return packet;
 }
 
-StatusWithSize Packet::Encode(std::span<byte> buffer) const {
+Result<ConstByteSpan> Packet::Encode(ByteSpan buffer) const {
   pw::protobuf::NestedEncoder encoder(buffer);
   RpcPacket::Encoder rpc_packet(&encoder);
 
@@ -78,14 +80,9 @@
   rpc_packet.WriteChannelId(channel_id_);
   rpc_packet.WriteServiceId(service_id_);
   rpc_packet.WriteMethodId(method_id_);
-  rpc_packet.WriteStatus(status_);
+  rpc_packet.WriteStatus(status_.code());
 
-  std::span<const byte> proto;
-  if (Status status = encoder.Encode(&proto); !status.ok()) {
-    return StatusWithSize(status, 0);
-  }
-
-  return StatusWithSize(proto.size());
+  return encoder.Encode();
 }
 
 size_t Packet::MinEncodedSizeBytes() const {
diff --git a/pw_rpc/packet_test.cc b/pw_rpc/packet_test.cc
index e4ac4b6..9f6d7ea 100644
--- a/pw_rpc/packet_test.cc
+++ b/pw_rpc/packet_test.cc
@@ -15,6 +15,7 @@
 #include "pw_rpc/internal/packet.h"
 
 #include "gtest/gtest.h"
+#include "pw_bytes/array.h"
 #include "pw_protobuf/codegen.h"
 #include "pw_protobuf/wire_format.h"
 
@@ -23,82 +24,90 @@
 
 using std::byte;
 
-constexpr byte kPayload[] = {byte(0x82), byte(0x02), byte(0xff), byte(0xff)};
+constexpr auto kPayload = bytes::Array<0x82, 0x02, 0xff, 0xff>();
 
-constexpr byte kEncoded[] = {
+constexpr auto kEncoded = bytes::Array<
     // Payload
-    byte{MakeKey(5, protobuf::WireType::kDelimited)},
-    byte{0x04},
-    byte{0x82},
-    byte{0x02},
-    byte{0xff},
-    byte{0xff},
+    MakeKey(5, protobuf::WireType::kDelimited),
+    0x04,
+    0x82,
+    0x02,
+    0xff,
+    0xff,
 
     // Packet type
-    byte{MakeKey(1, protobuf::WireType::kVarint)},
-    byte{0},  // RPC
+    MakeKey(1, protobuf::WireType::kVarint),
+    1,  // RESPONSE
 
     // Channel ID
-    byte{MakeKey(2, protobuf::WireType::kVarint)},
-    byte{1},
+    MakeKey(2, protobuf::WireType::kVarint),
+    1,
 
     // Service ID
-    byte{MakeKey(3, protobuf::WireType::kFixed32)},
-    byte{42},
-    byte{0},
-    byte{0},
-    byte{0},
+    MakeKey(3, protobuf::WireType::kFixed32),
+    42,
+    0,
+    0,
+    0,
 
     // Method ID
-    byte{MakeKey(4, protobuf::WireType::kFixed32)},
-    byte{100},
-    byte{0},
-    byte{0},
-    byte{0},
+    MakeKey(4, protobuf::WireType::kFixed32),
+    100,
+    0,
+    0,
+    0,
 
     // Status
-    byte{MakeKey(6, protobuf::WireType::kVarint)},
-    byte{0x00},
-};
+    MakeKey(6, protobuf::WireType::kVarint),
+    0x00>();
+
+// Test that a default-constructed packet sets its members to the default
+// protobuf values.
+static_assert(Packet().type() == PacketType{});
+static_assert(Packet().channel_id() == 0);
+static_assert(Packet().service_id() == 0);
+static_assert(Packet().method_id() == 0);
+static_assert(Packet().status() == static_cast<Status::Code>(0));
+static_assert(Packet().payload().empty());
 
 TEST(Packet, Encode) {
   byte buffer[64];
 
-  Packet packet(PacketType::RPC, 1, 42, 100, kPayload);
+  Packet packet(PacketType::RESPONSE, 1, 42, 100, kPayload);
 
-  auto sws = packet.Encode(buffer);
-  ASSERT_EQ(sizeof(kEncoded), sws.size());
-  EXPECT_EQ(std::memcmp(kEncoded, buffer, sizeof(kEncoded)), 0);
+  auto result = packet.Encode(buffer);
+  ASSERT_EQ(Status::Ok(), result.status());
+  ASSERT_EQ(kEncoded.size(), result.value().size());
+  EXPECT_EQ(std::memcmp(kEncoded.data(), buffer, kEncoded.size()), 0);
 }
 
 TEST(Packet, Encode_BufferTooSmall) {
   byte buffer[2];
 
-  Packet packet(PacketType::RPC, 1, 42, 100, kPayload);
+  Packet packet(PacketType::RESPONSE, 1, 42, 100, kPayload);
 
-  auto sws = packet.Encode(buffer);
-  EXPECT_EQ(0u, sws.size());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sws.status());
+  auto result = packet.Encode(buffer);
+  EXPECT_EQ(Status::ResourceExhausted(), result.status());
 }
 
 TEST(Packet, Decode_ValidPacket) {
-  Packet packet;
-  ASSERT_EQ(Status::OK, Packet::FromBuffer(kEncoded, packet));
+  auto result = Packet::FromBuffer(kEncoded);
+  ASSERT_TRUE(result.ok());
 
-  EXPECT_EQ(PacketType::RPC, packet.type());
+  auto& packet = result.value();
+  EXPECT_EQ(PacketType::RESPONSE, packet.type());
   EXPECT_EQ(1u, packet.channel_id());
   EXPECT_EQ(42u, packet.service_id());
   EXPECT_EQ(100u, packet.method_id());
   ASSERT_EQ(sizeof(kPayload), packet.payload().size());
-  EXPECT_EQ(0,
-            std::memcmp(packet.payload().data(), kPayload, sizeof(kPayload)));
+  EXPECT_EQ(
+      0,
+      std::memcmp(packet.payload().data(), kPayload.data(), kPayload.size()));
 }
 
 TEST(Packet, Decode_InvalidPacket) {
   byte bad_data[] = {byte{0xFF}, byte{0x00}, byte{0x00}, byte{0xFF}};
-
-  Packet packet;
-  EXPECT_EQ(Status::DATA_LOSS, Packet::FromBuffer(bad_data, packet));
+  EXPECT_EQ(Status::DataLoss(), Packet::FromBuffer(bad_data).status());
 }
 
 TEST(Packet, EncodeDecode) {
@@ -109,16 +118,17 @@
   packet.set_service_id(0xdeadbeef);
   packet.set_method_id(0x03a82921);
   packet.set_payload(payload);
-  packet.set_status(Status::UNAVAILABLE);
+  packet.set_status(Status::Unavailable());
 
   byte buffer[128];
-  StatusWithSize sws = packet.Encode(buffer);
-  ASSERT_EQ(sws.status(), Status::OK);
+  Result result = packet.Encode(buffer);
+  ASSERT_EQ(result.status(), Status::Ok());
 
-  std::span<byte> packet_data(buffer, sws.size());
-  Packet decoded;
-  ASSERT_EQ(Status::OK, Packet::FromBuffer(packet_data, decoded));
+  std::span<byte> packet_data(buffer, result.value().size());
+  auto decode_result = Packet::FromBuffer(packet_data);
+  ASSERT_TRUE(decode_result.ok());
 
+  auto& decoded = decode_result.value();
   EXPECT_EQ(decoded.type(), packet.type());
   EXPECT_EQ(decoded.channel_id(), packet.channel_id());
   EXPECT_EQ(decoded.service_id(), packet.service_id());
@@ -128,7 +138,7 @@
                         packet.payload().data(),
                         packet.payload().size()),
             0);
-  EXPECT_EQ(decoded.status(), Status::UNAVAILABLE);
+  EXPECT_EQ(decoded.status(), Status::Unavailable());
 }
 
 constexpr size_t kReservedSize = 2 /* type */ + 2 /* channel */ +
@@ -137,12 +147,13 @@
 
 TEST(Packet, PayloadUsableSpace_ExactFit) {
   EXPECT_EQ(kReservedSize,
-            Packet(PacketType::RPC, 1, 42, 100).MinEncodedSizeBytes());
+            Packet(PacketType::RESPONSE, 1, 42, 100).MinEncodedSizeBytes());
 }
 
 TEST(Packet, PayloadUsableSpace_LargerVarints) {
-  EXPECT_EQ(kReservedSize + 2 /* channel */,  // service and method are Fixed32
-            Packet(PacketType::RPC, 17000, 200, 200).MinEncodedSizeBytes());
+  EXPECT_EQ(
+      kReservedSize + 2 /* channel */,  // service and method are Fixed32
+      Packet(PacketType::RESPONSE, 17000, 200, 200).MinEncodedSizeBytes());
 }
 
 }  // namespace
diff --git a/pw_rpc/public/pw_rpc/channel.h b/pw_rpc/public/pw_rpc/channel.h
index 1b95082..094d269 100644
--- a/pw_rpc/public/pw_rpc/channel.h
+++ b/pw_rpc/public/pw_rpc/channel.h
@@ -20,6 +20,13 @@
 #include "pw_status/status.h"
 
 namespace pw::rpc {
+namespace internal {
+
+class BaseClientCall;
+
+}  // namespace internal
+
+class Client;
 
 class ChannelOutput {
  public:
@@ -34,8 +41,11 @@
   // Acquire a buffer into which to write an outgoing RPC packet.
   virtual std::span<std::byte> AcquireBuffer() = 0;
 
-  // Sends the contents of the buffer from AcquireBuffer().
-  virtual void SendAndReleaseBuffer(size_t size) = 0;
+  // Sends the contents of the buffer from AcquireBuffer(). Returns OK if the
+  // operation succeeded, on an implementation-defined Status value if there was
+  // an error. The implementation must NOT return FAILED_PRECONDITION or
+  // INTERNAL, which are reserved by pw_rpc.
+  virtual Status SendAndReleaseBuffer(size_t size) = 0;
 
  private:
   const char* name_;
@@ -46,7 +56,8 @@
   static constexpr uint32_t kUnassignedChannelId = 0;
 
   // Creates a dynamically assignable channel without a set ID or output.
-  constexpr Channel() : id_(kUnassignedChannelId), output_(nullptr) {}
+  constexpr Channel()
+      : id_(kUnassignedChannelId), output_(nullptr), client_(nullptr) {}
 
   // Creates a channel with a static ID. The channel's output can also be
   // static, or it can set to null to allow dynamically opening connections
@@ -62,18 +73,27 @@
 
  protected:
   constexpr Channel(uint32_t id, ChannelOutput* output)
-      : id_(id), output_(output) {
-    PW_CHECK_UINT_NE(id, kUnassignedChannelId);
+      : id_(id), output_(output), client_(nullptr) {
+    // TODO(pwbug/246): Use PW_ASSERT when that is available.
+    // PW_ASSERT(id != kUnassignedChannelId);
   }
 
   ChannelOutput& output() const {
-    PW_CHECK_NOTNULL(output_);
+    // TODO(pwbug/246): Use PW_ASSERT when that is available.
+    // PW_ASSERT(output_ != nullptr);
     return *output_;
   }
 
  private:
+  friend class internal::BaseClientCall;
+  friend class Client;
+
+  constexpr Client* client() const { return client_; }
+  constexpr void set_client(Client* client) { client_ = client; }
+
   uint32_t id_;
   ChannelOutput* output_;
+  Client* client_;
 };
 
 }  // namespace pw::rpc
diff --git a/pw_rpc/public/pw_rpc/client.h b/pw_rpc/public/pw_rpc/client.h
new file mode 100644
index 0000000..6e7def1
--- /dev/null
+++ b/pw_rpc/public/pw_rpc/client.h
@@ -0,0 +1,62 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <cstddef>
+#include <span>
+
+#include "pw_bytes/span.h"
+#include "pw_rpc/internal/base_client_call.h"
+#include "pw_rpc/internal/channel.h"
+
+namespace pw::rpc {
+
+class Client {
+ public:
+  // Creates a client that uses a set of RPC channels. Channels can be shared
+  // between a client and a server, but not between multiple clients.
+  constexpr Client(std::span<Channel> channels)
+      : channels_(static_cast<internal::Channel*>(channels.data()),
+                  channels.size()) {
+    for (Channel& channel : channels_) {
+      channel.set_client(this);
+    };
+  }
+
+  // Processes an incoming RPC packet. The packet may be an RPC response or a
+  // control packet, the result of which is processed in this function. Returns
+  // whether the packet was able to be processed:
+  //
+  //   OK - The packet was processed by the client.
+  //   DATA_LOSS - Failed to decode the packet.
+  //   INVALID_ARGUMENT - The packet is intended for a server, not a client.
+  //   NOT_FOUND - The packet belongs to an unknown RPC call.
+  //   UNIMPLEMENTED - Received a type of packet that the client doesn't know
+  //       how to handle.
+  //
+  Status ProcessPacket(ConstByteSpan data);
+
+  size_t active_calls() const { return calls_.size(); }
+
+ private:
+  friend class internal::BaseClientCall;
+
+  Status RegisterCall(internal::BaseClientCall& call);
+  void RemoveCall(const internal::BaseClientCall& call) { calls_.remove(call); }
+
+  std::span<internal::Channel> channels_;
+  IntrusiveList<internal::BaseClientCall> calls_;
+};
+
+}  // namespace pw::rpc
diff --git a/pw_rpc/public/pw_rpc/internal/base_client_call.h b/pw_rpc/public/pw_rpc/internal/base_client_call.h
new file mode 100644
index 0000000..a6b800b
--- /dev/null
+++ b/pw_rpc/public/pw_rpc/internal/base_client_call.h
@@ -0,0 +1,86 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_assert/assert.h"
+#include "pw_containers/intrusive_list.h"
+#include "pw_rpc/internal/channel.h"
+#include "pw_rpc/internal/packet.h"
+#include "pw_status/status.h"
+
+namespace pw::rpc::internal {
+
+// Base class representing an active client-side RPC call. Implementations
+// derive from this class and provide a packet handler function which is
+// called with a reference to the ClientCall object and the received packet.
+class BaseClientCall : public IntrusiveList<BaseClientCall>::Item {
+ public:
+  using ResponseHandler = void (*)(BaseClientCall&, const Packet&);
+
+  constexpr BaseClientCall(rpc::Channel* channel,
+                           uint32_t service_id,
+                           uint32_t method_id,
+                           ResponseHandler handler)
+      : channel_(static_cast<Channel*>(channel)),
+        service_id_(service_id),
+        method_id_(method_id),
+        handler_(handler),
+        active_(true) {
+    // TODO(pwbug/246): Use PW_ASSERT when that is available.
+    // PW_ASSERT(channel_ != nullptr);
+
+    Register();
+  }
+
+  ~BaseClientCall() { Unregister(); }
+
+  BaseClientCall(const BaseClientCall&) = delete;
+  BaseClientCall& operator=(const BaseClientCall&) = delete;
+
+  BaseClientCall(BaseClientCall&& other) { *this = std::move(other); }
+  BaseClientCall& operator=(BaseClientCall&& other);
+
+  constexpr bool active() const { return active_; }
+
+  void Cancel();
+
+ protected:
+  constexpr Channel& channel() const { return *channel_; }
+  constexpr uint32_t service_id() const { return service_id_; }
+  constexpr uint32_t method_id() const { return method_id_; }
+
+  std::span<std::byte> AcquirePayloadBuffer();
+  Status ReleasePayloadBuffer(std::span<const std::byte> payload);
+
+  void Unregister();
+
+ private:
+  friend class rpc::Client;
+
+  void Register();
+
+  void HandleResponse(const Packet& packet) { handler_(*this, packet); }
+
+  Packet NewPacket(PacketType type,
+                   std::span<const std::byte> payload = {}) const;
+
+  Channel* channel_;
+  uint32_t service_id_;
+  uint32_t method_id_;
+  Channel::OutputBuffer request_;
+  ResponseHandler handler_;
+  bool active_;
+};
+
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/public/pw_rpc/internal/base_method.h b/pw_rpc/public/pw_rpc/internal/base_method.h
deleted file mode 100644
index 4b3fdae..0000000
--- a/pw_rpc/public/pw_rpc/internal/base_method.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2020 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-#pragma once
-
-#include <cstddef>
-#include <cstdint>
-
-namespace pw::rpc::internal {
-
-// RPC server implementations provide a Method class in the
-// pw_rpc/internal/method.h header that is derived from BaseMethod.
-class BaseMethod {
- public:
-  constexpr uint32_t id() const { return id_; }
-
-  // Implementations must provide the Invoke method, which the Server calls:
-  //
-  // StatusWithSize Invoke(ServerCall& call,
-  //                       std::span<const std::byte> request,
-  //                       std::span<std::byte> payload_buffer) const;
-
- protected:
-  constexpr BaseMethod(uint32_t id) : id_(id) {}
-
- private:
-  uint32_t id_;
-};
-
-}  // namespace pw::rpc::internal
diff --git a/pw_rpc/public/pw_rpc/internal/base_server_writer.h b/pw_rpc/public/pw_rpc/internal/base_server_writer.h
index 9d2254b..4b9dbad 100644
--- a/pw_rpc/public/pw_rpc/internal/base_server_writer.h
+++ b/pw_rpc/public/pw_rpc/internal/base_server_writer.h
@@ -20,12 +20,16 @@
 #include "pw_containers/intrusive_list.h"
 #include "pw_rpc/internal/call.h"
 #include "pw_rpc/internal/channel.h"
+#include "pw_rpc/internal/method.h"
 #include "pw_rpc/service.h"
 #include "pw_status/status.h"
 
-namespace pw::rpc::internal {
+namespace pw::rpc {
 
-class Method;
+class Server;
+
+namespace internal {
+
 class Packet;
 
 // Internal ServerWriter base class. ServerWriters are used to stream responses.
@@ -53,7 +57,7 @@
   uint32_t method_id() const;
 
   // Closes the ServerWriter, if it is open.
-  void Finish(Status status = Status::OK);
+  void Finish(Status status = Status::Ok());
 
  protected:
   constexpr BaseServerWriter() : state_{kClosed} {}
@@ -62,16 +66,27 @@
 
   const Channel& channel() const { return call_.channel(); }
 
+  constexpr const Channel::OutputBuffer& buffer() const { return response_; }
+
   std::span<std::byte> AcquirePayloadBuffer();
 
+  // Releases the buffer, sending a packet with the specified payload.
   Status ReleasePayloadBuffer(std::span<const std::byte> payload);
 
+  // Releases the buffer without sending a packet.
+  Status ReleasePayloadBuffer();
+
  private:
-  Packet RpcPacket(std::span<const std::byte> payload = {}) const;
+  friend class rpc::Server;
+
+  void Close();
+
+  Packet ResponsePacket(std::span<const std::byte> payload = {}) const;
 
   ServerCall call_;
   Channel::OutputBuffer response_;
   enum { kClosed, kOpen } state_;
 };
 
-}  // namespace pw::rpc::internal
+}  // namespace internal
+}  // namespace pw::rpc
diff --git a/pw_rpc/public/pw_rpc/internal/channel.h b/pw_rpc/public/pw_rpc/internal/channel.h
index ba03f07..6533eda 100644
--- a/pw_rpc/public/pw_rpc/internal/channel.h
+++ b/pw_rpc/public/pw_rpc/internal/channel.h
@@ -51,6 +51,13 @@
     // Returns a portion of this OutputBuffer to use as the packet payload.
     std::span<std::byte> payload(const Packet& packet) const;
 
+    bool Contains(std::span<const std::byte> buffer) const {
+      return buffer.data() >= buffer_.data() &&
+             buffer.data() + buffer.size() <= buffer_.data() + buffer_.size();
+    }
+
+    bool empty() const { return buffer_.empty(); }
+
    private:
     friend class Channel;
 
@@ -71,6 +78,11 @@
   }
 
   Status Send(OutputBuffer& output, const internal::Packet& packet);
+
+  void Release(OutputBuffer& buffer) {
+    buffer.buffer_ = {};
+    output().SendAndReleaseBuffer(0);
+  }
 };
 
 }  // namespace pw::rpc::internal
diff --git a/pw_rpc/public/pw_rpc/internal/method.h b/pw_rpc/public/pw_rpc/internal/method.h
new file mode 100644
index 0000000..cd9155b
--- /dev/null
+++ b/pw_rpc/public/pw_rpc/internal/method.h
@@ -0,0 +1,48 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <cstddef>
+#include <cstdint>
+
+#include "pw_rpc/internal/call.h"
+
+namespace pw::rpc::internal {
+
+class Packet;
+
+// RPC server implementations provide a class that dervies from Method.
+class Method {
+ public:
+  constexpr uint32_t id() const { return id_; }
+
+  // The pw::rpc::Server calls method.Invoke to call a user-defined RPC. Invoke
+  // calls the invoker function, which handles the RPC request and response
+  // according to the RPC type and protobuf implementation and calls the
+  // user-defined RPC function.
+  void Invoke(ServerCall& call, const Packet& request) const {
+    return invoker_(*this, call, request);
+  }
+
+ protected:
+  using Invoker = void (&)(const Method&, ServerCall&, const Packet&);
+
+  constexpr Method(uint32_t id, Invoker invoker) : id_(id), invoker_(invoker) {}
+
+ private:
+  uint32_t id_;
+  Invoker invoker_;
+};
+
+}  // namespace pw::rpc::internal
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_rpc/public/pw_rpc/internal/method_type.h
similarity index 75%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to pw_rpc/public/pw_rpc/internal/method_type.h
index 1670b7d..9a44e7a 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_rpc/public/pw_rpc/internal/method_type.h
@@ -11,7 +11,15 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+#pragma once
 
-#include "pw_boot_armv7m/boot.h"
+namespace pw::rpc::internal {
 
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+enum class MethodType {
+  kUnary,
+  kServerStreaming,
+  kClientStreaming,
+  kBidirectionalStreaming,
+};
+
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/public/pw_rpc/internal/method_union.h b/pw_rpc/public/pw_rpc/internal/method_union.h
new file mode 100644
index 0000000..9d491af
--- /dev/null
+++ b/pw_rpc/public/pw_rpc/internal/method_union.h
@@ -0,0 +1,87 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <type_traits>
+
+#include "pw_rpc/internal/method.h"
+
+namespace pw::rpc::internal {
+
+// Base class for different combinations of possible service methods. Derived
+// classes should contain a union of different method types, one of which is a
+// base Method.
+class MethodUnion {
+ public:
+  constexpr const Method& method() const;
+};
+
+// Templated false value for use in static_assert(false) statements.
+template <typename...>
+constexpr std::false_type kFalse{};
+
+// Traits struct that determines the type of an RPC service method from its
+// signature. Derived MethodUnions should provide specializations for their
+// method types.
+template <typename Method>
+struct MethodTraits {
+  static_assert(kFalse<Method>,
+                "The selected function is not an RPC service method");
+
+  // Specializations must set Implementation as an alias for their method
+  // implementation class.
+  using Implementation = Method;
+
+  // Specializations must set Service as an alias to the implemented service
+  // class.
+  using Service = rpc::Service;
+};
+
+template <auto method>
+using MethodImplementation =
+    typename MethodTraits<decltype(method)>::Implementation;
+
+template <auto method>
+using MethodService = typename MethodTraits<decltype(method)>::Service;
+
+// Identifies a base class from a member function it defines. This should be
+// used with decltype to retrieve the base class.
+template <typename T, typename U>
+T BaseFromMember(U T::*);
+
+// The base generated service of an implemented RPC method.
+template <auto method>
+using MethodBaseService = decltype(
+    BaseFromMember(&MethodService<method>::_PwRpcInternalGeneratedBase));
+
+class CoreMethodUnion : public MethodUnion {
+ public:
+  constexpr const Method& method() const { return impl_.method; }
+
+ private:
+  // All derived MethodUnions must contain a union of different method
+  // implementations as their only member.
+  union {
+    Method method;
+  } impl_;
+};
+
+constexpr const Method& MethodUnion::method() const {
+  // This is an ugly hack. As all MethodUnion classes contain a union of Method
+  // derivatives, CoreMethodUnion is used to extract a generic Method from the
+  // specific implementation.
+  return static_cast<const CoreMethodUnion*>(this)->method();
+}
+
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/public/pw_rpc/internal/packet.h b/pw_rpc/public/pw_rpc/internal/packet.h
index 15904f7..c67d86a 100644
--- a/pw_rpc/public/pw_rpc/internal/packet.h
+++ b/pw_rpc/public/pw_rpc/internal/packet.h
@@ -17,6 +17,7 @@
 #include <cstdint>
 #include <span>
 
+#include "pw_bytes/span.h"
 #include "pw_rpc_protos/packet.pwpb.h"
 #include "pw_status/status_with_size.h"
 
@@ -28,13 +29,13 @@
 
   // Parses a packet from a protobuf message. Missing or malformed fields take
   // their default values.
-  static Status FromBuffer(std::span<const std::byte> data, Packet& packet);
+  static Result<Packet> FromBuffer(ConstByteSpan data);
 
   // Creates an RPC packet with the channel, service, and method ID of the
   // provided packet.
   static constexpr Packet Response(const Packet& request,
-                                   Status status = Status::OK) {
-    return Packet(PacketType::RPC,
+                                   Status status = Status::Ok()) {
+    return Packet(PacketType::RESPONSE,
                   request.channel_id(),
                   request.service_id(),
                   request.method_id(),
@@ -42,10 +43,21 @@
                   status);
   }
 
-  // Creates an ERROR packet with the channel, service, and method ID of the
-  // provided packet.
-  static constexpr Packet Error(const Packet& packet, Status status) {
-    return Packet(PacketType::ERROR,
+  // Creates a SERVER_ERROR packet with the channel, service, and method ID of
+  // the provided packet.
+  static constexpr Packet ServerError(const Packet& packet, Status status) {
+    return Packet(PacketType::SERVER_ERROR,
+                  packet.channel_id(),
+                  packet.service_id(),
+                  packet.method_id(),
+                  {},
+                  status);
+  }
+
+  // Creates a CLIENT_ERROR packet with the channel, service, and method ID of
+  // the provided packet.
+  static constexpr Packet ClientError(const Packet& packet, Status status) {
+    return Packet(PacketType::CLIENT_ERROR,
                   packet.channel_id(),
                   packet.service_id(),
                   packet.method_id(),
@@ -55,14 +67,14 @@
 
   // Creates an empty packet.
   constexpr Packet()
-      : Packet(PacketType::RPC, kUnassignedId, kUnassignedId, kUnassignedId) {}
+      : Packet(PacketType{}, kUnassignedId, kUnassignedId, kUnassignedId) {}
 
   constexpr Packet(PacketType type,
                    uint32_t channel_id,
                    uint32_t service_id,
                    uint32_t method_id,
-                   std::span<const std::byte> payload = {},
-                   Status status = Status::OK)
+                   ConstByteSpan payload = {},
+                   Status status = Status::Ok())
       : type_(type),
         channel_id_(channel_id),
         service_id_(service_id),
@@ -71,20 +83,24 @@
         status_(status) {}
 
   // Encodes the packet into its wire format. Returns the encoded size.
-  StatusWithSize Encode(std::span<std::byte> buffer) const;
+  Result<ConstByteSpan> Encode(ByteSpan buffer) const;
 
   // Determines the space required to encode the packet proto fields for a
   // response, excluding the payload. This may be used to split the buffer into
   // reserved space and available space for the payload.
   size_t MinEncodedSizeBytes() const;
 
+  enum Destination : bool { kServer, kClient };
+
+  constexpr Destination destination() const {
+    return static_cast<int>(type_) % 2 == 0 ? kServer : kClient;
+  }
+
   constexpr PacketType type() const { return type_; }
   constexpr uint32_t channel_id() const { return channel_id_; }
   constexpr uint32_t service_id() const { return service_id_; }
   constexpr uint32_t method_id() const { return method_id_; }
-  constexpr const std::span<const std::byte>& payload() const {
-    return payload_;
-  }
+  constexpr const ConstByteSpan& payload() const { return payload_; }
   constexpr Status status() const { return status_; }
 
   constexpr void set_type(PacketType type) { type_ = type; }
@@ -95,9 +111,7 @@
     service_id_ = service_id;
   }
   constexpr void set_method_id(uint32_t method_id) { method_id_ = method_id; }
-  constexpr void set_payload(std::span<const std::byte> payload) {
-    payload_ = payload;
-  }
+  constexpr void set_payload(ConstByteSpan payload) { payload_ = payload; }
   constexpr void set_status(Status status) { status_ = status; }
 
  private:
@@ -105,7 +119,7 @@
   uint32_t channel_id_;
   uint32_t service_id_;
   uint32_t method_id_;
-  std::span<const std::byte> payload_;
+  ConstByteSpan payload_;
   Status status_;
 };
 
diff --git a/pw_rpc/public/pw_rpc/internal/service_method_traits.h b/pw_rpc/public/pw_rpc/internal/service_method_traits.h
new file mode 100644
index 0000000..0e26b8d
--- /dev/null
+++ b/pw_rpc/public/pw_rpc/internal/service_method_traits.h
@@ -0,0 +1,42 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_rpc/internal/method_union.h"
+
+namespace pw::rpc::internal {
+
+// Gets information about a service and method at compile-time. Uses a pointer
+// to a member function of the service implementation to identify the service
+// class, generated service class, and Method object.
+template <auto lookup_function, auto impl_method, uint32_t method_id>
+class ServiceMethodTraits {
+ public:
+  ServiceMethodTraits() = delete;
+
+  // Type of the service implementation derived class.
+  using Service = MethodService<impl_method>;
+
+  using MethodImpl = MethodImplementation<impl_method>;
+
+  // Reference to the Method object corresponding to this method.
+  static constexpr const MethodImpl& method() {
+    return *lookup_function(method_id);
+  }
+
+  static_assert(lookup_function(method_id) != nullptr,
+                "The selected function is not an RPC service method");
+};
+
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/test_impl/public_overrides/pw_rpc/internal/method.h b/pw_rpc/public/pw_rpc/internal/test_method.h
similarity index 65%
rename from pw_rpc/test_impl/public_overrides/pw_rpc/internal/method.h
rename to pw_rpc/public/pw_rpc/internal/test_method.h
index 41ed781..9acc26f 100644
--- a/pw_rpc/test_impl/public_overrides/pw_rpc/internal/method.h
+++ b/pw_rpc/public/pw_rpc/internal/test_method.h
@@ -17,7 +17,8 @@
 #include <cstring>
 #include <span>
 
-#include "pw_rpc/internal/base_method.h"
+#include "pw_rpc/internal/method.h"
+#include "pw_rpc/internal/method_union.h"
 #include "pw_rpc/internal/packet.h"
 #include "pw_rpc/server_context.h"
 #include "pw_status/status_with_size.h"
@@ -26,14 +27,10 @@
 
 // This is a fake RPC method implementation for testing only. It stores the
 // channel ID, request, and payload buffer, and optionally provides a response.
-class Method : public BaseMethod {
+class TestMethod : public Method {
  public:
-  constexpr Method(uint32_t id) : BaseMethod(id), last_channel_id_(0) {}
-
-  void Invoke(ServerCall& call, const Packet& request) const {
-    last_channel_id_ = call.channel().id();
-    last_request_ = request;
-  }
+  constexpr TestMethod(uint32_t id)
+      : Method(id, InvokeForTest), last_channel_id_(0) {}
 
   uint32_t last_channel_id() const { return last_channel_id_; }
   const Packet& last_request() const { return last_request_; }
@@ -42,6 +39,14 @@
   void set_status(Status status) { response_status_ = status; }
 
  private:
+  static void InvokeForTest(const Method& method,
+                            ServerCall& call,
+                            const Packet& request) {
+    const auto& test_method = static_cast<const TestMethod&>(method);
+    test_method.last_channel_id_ = call.channel().id();
+    test_method.last_request_ = request;
+  }
+
   // Make these mutable so they can be set in the Invoke method, which is const.
   // The Method class is used exclusively in tests. Having these members mutable
   // allows tests to verify that the Method is invoked correctly.
@@ -52,4 +57,18 @@
   Status response_status_;
 };
 
+class TestMethodUnion : public MethodUnion {
+ public:
+  constexpr TestMethodUnion(TestMethod&& method) : impl_({.test = method}) {}
+
+  constexpr const Method& method() const { return impl_.method; }
+  constexpr const TestMethod& test_method() const { return impl_.test; }
+
+ private:
+  union {
+    Method method;
+    TestMethod test;
+  } impl_;
+};
+
 }  // namespace pw::rpc::internal
diff --git a/pw_rpc/public/pw_rpc/server.h b/pw_rpc/public/pw_rpc/server.h
index c0ca500..51e3f36 100644
--- a/pw_rpc/public/pw_rpc/server.h
+++ b/pw_rpc/public/pw_rpc/server.h
@@ -15,12 +15,15 @@
 
 #include <cstddef>
 #include <span>
+#include <tuple>
 
 #include "pw_containers/intrusive_list.h"
 #include "pw_rpc/channel.h"
 #include "pw_rpc/internal/base_server_writer.h"
 #include "pw_rpc/internal/channel.h"
+#include "pw_rpc/internal/method.h"
 #include "pw_rpc/service.h"
+#include "pw_status/status.h"
 
 namespace pw::rpc {
 
@@ -36,8 +39,16 @@
   // with a Service; instead, use a generated class which inherits from it.
   void RegisterService(Service& service) { services_.push_front(service); }
 
-  void ProcessPacket(std::span<const std::byte> packet,
-                     ChannelOutput& interface);
+  // Processes an RPC packet. The packet may contain an RPC request or a control
+  // packet, the result of which is processed in this function. Returns whether
+  // the packet was able to be processed:
+  //
+  //   OK - The packet was processed by the server.
+  //   DATA_LOSS - Failed to decode the packet.
+  //   INVALID_ARGUMENT - The packet is intended for a client, not a server.
+  //
+  Status ProcessPacket(std::span<const std::byte> packet,
+                       ChannelOutput& interface);
 
   constexpr size_t channel_count() const { return channels_.size(); }
 
@@ -45,8 +56,12 @@
   IntrusiveList<internal::BaseServerWriter>& writers() { return writers_; }
 
  private:
+  std::tuple<Service*, const internal::Method*> FindMethod(
+      const internal::Packet& packet);
+
   void HandleCancelPacket(const internal::Packet& request,
                           internal::Channel& channel);
+  void HandleClientError(const internal::Packet& packet);
 
   internal::Channel* FindChannel(uint32_t id) const;
   internal::Channel* AssignChannel(uint32_t id, ChannelOutput& interface);
diff --git a/pw_rpc/public/pw_rpc/service.h b/pw_rpc/public/pw_rpc/service.h
index 2076896..e9b0715 100644
--- a/pw_rpc/public/pw_rpc/service.h
+++ b/pw_rpc/public/pw_rpc/service.h
@@ -14,37 +14,51 @@
 #pragma once
 
 #include <cstdint>
+#include <limits>
 #include <span>
-#include <utility>
 
 #include "pw_containers/intrusive_list.h"
+#include "pw_rpc/internal/method.h"
+#include "pw_rpc/internal/method_union.h"
 
 namespace pw::rpc {
-namespace internal {
-
-class Method;
-
-}  // namespace internal
 
 // Base class for all RPC services. This cannot be instantiated directly; use a
 // generated subclass instead.
+//
+// Services store a span of concrete method implementation classes. To support
+// different Method implementations, Service stores a base MethodUnion* and the
+// size of the concrete MethodUnion object.
 class Service : public IntrusiveList<Service>::Item {
  public:
   uint32_t id() const { return id_; }
 
  protected:
+  template <typename T, size_t method_count>
+  constexpr Service(uint32_t id, const std::array<T, method_count>& methods)
+      : id_(id),
+        methods_(methods.data()),
+        method_size_(sizeof(T)),
+        method_count_(static_cast<uint16_t>(method_count)) {
+    static_assert(method_count <= std::numeric_limits<uint16_t>::max());
+  }
+
+  // For use by tests with only one method.
   template <typename T>
-  constexpr Service(uint32_t id, T&& methods)
-      : id_(id), methods_(std::forward<T>(methods)) {}
+  constexpr Service(uint32_t id, const T& method)
+      : id_(id), methods_(&method), method_size_(sizeof(T)), method_count_(1) {}
 
  private:
   friend class Server;
+  friend class ServiceTestHelper;
 
   // Finds the method with the provided method_id. Returns nullptr if no match.
   const internal::Method* FindMethod(uint32_t method_id) const;
 
-  uint32_t id_;
-  std::span<const internal::Method> methods_;
+  const uint32_t id_;
+  const internal::MethodUnion* const methods_;
+  const uint16_t method_size_;
+  const uint16_t method_count_;
 };
 
 }  // namespace pw::rpc
diff --git a/pw_rpc/pw_rpc_private/internal_test_utils.h b/pw_rpc/pw_rpc_private/internal_test_utils.h
index c3759e7..faa4833 100644
--- a/pw_rpc/pw_rpc_private/internal_test_utils.h
+++ b/pw_rpc/pw_rpc_private/internal_test_utils.h
@@ -21,6 +21,7 @@
 #include <cstdint>
 #include <span>
 
+#include "pw_rpc/client.h"
 #include "pw_rpc/internal/channel.h"
 #include "pw_rpc/internal/method.h"
 #include "pw_rpc/internal/packet.h"
@@ -38,21 +39,25 @@
 
   std::span<std::byte> AcquireBuffer() override { return buffer_; }
 
-  void SendAndReleaseBuffer(size_t size) override {
+  Status SendAndReleaseBuffer(size_t size) override {
     if (size == 0u) {
-      return;
+      return Status::Ok();
     }
 
     packet_count_ += 1;
     sent_data_ = std::span(buffer_.data(), size);
-    EXPECT_EQ(Status::OK,
-              internal::Packet::FromBuffer(sent_data_, sent_packet_));
+    Result<internal::Packet> result = internal::Packet::FromBuffer(sent_data_);
+    EXPECT_EQ(Status::Ok(), result.status());
+    sent_packet_ = result.value_or(internal::Packet());
+    return send_status_;
   }
 
   std::span<const std::byte> buffer() const { return buffer_; }
 
   size_t packet_count() const { return packet_count_; }
 
+  void set_send_status(Status status) { send_status_ = status; }
+
   const std::span<const std::byte>& sent_data() const { return sent_data_; }
   const internal::Packet& sent_packet() const {
     EXPECT_GT(packet_count_, 0u);
@@ -64,6 +69,7 @@
   std::span<const std::byte> sent_data_;
   internal::Packet sent_packet_;
   size_t packet_count_ = 0;
+  Status send_status_;
 };
 
 // Version of the internal::Server with extra methods exposed for testing.
@@ -92,16 +98,14 @@
     server_.RegisterService(service_);
   }
 
-  ServerContextForTest() : ServerContextForTest(service_.method) {}
-
-  // Creates a packet for this context's channel, service, and method.
+  // Creates a response packet for this context's channel, service, and method.
   internal::Packet packet(std::span<const std::byte> payload) const {
-    return internal::Packet(internal::PacketType::RPC,
+    return internal::Packet(internal::PacketType::RESPONSE,
                             kChannelId,
                             kServiceId,
                             context_.method().id(),
                             payload,
-                            Status::OK);
+                            Status::Ok());
   }
 
   internal::ServerCall& get() { return context_; }
@@ -117,4 +121,46 @@
   internal::ServerCall context_;
 };
 
+template <size_t output_buffer_size = 128,
+          size_t input_buffer_size = 128,
+          uint32_t channel_id = 99,
+          uint32_t service_id = 16,
+          uint32_t method_id = 111>
+class ClientContextForTest {
+ public:
+  static constexpr uint32_t kChannelId = channel_id;
+  static constexpr uint32_t kServiceId = service_id;
+  static constexpr uint32_t kMethodId = method_id;
+
+  ClientContextForTest()
+      : channel_(Channel::Create<kChannelId>(&output_)),
+        client_(std::span(&channel_, 1)) {}
+
+  const auto& output() const { return output_; }
+  Channel& channel() { return channel_; }
+  Client& client() { return client_; }
+
+  // Sends a packet to be processed by the client. Returns the client's
+  // ProcessPacket status.
+  Status SendPacket(internal::PacketType type,
+                    Status status = Status::Ok(),
+                    std::span<const std::byte> payload = {}) {
+    internal::Packet packet(
+        type, kChannelId, kServiceId, kMethodId, payload, status);
+    std::byte buffer[input_buffer_size];
+    Result result = packet.Encode(buffer);
+    EXPECT_EQ(result.status(), Status::Ok());
+    return client_.ProcessPacket(result.value_or(ConstByteSpan()));
+  }
+
+  Status SendResponse(Status status, std::span<const std::byte> payload) {
+    return SendPacket(internal::PacketType::RESPONSE, status, payload);
+  }
+
+ private:
+  TestOutput<output_buffer_size> output_;
+  Channel channel_;
+  Client client_;
+};
+
 }  // namespace pw::rpc
diff --git a/pw_rpc/pw_rpc_protos/packet.proto b/pw_rpc/pw_rpc_protos/packet.proto
index df226c7..459ac94 100644
--- a/pw_rpc/pw_rpc_protos/packet.proto
+++ b/pw_rpc/pw_rpc_protos/packet.proto
@@ -15,26 +15,43 @@
 
 package pw.rpc.internal;
 
+option java_package = "dev.pigweed.pw.rpc.internal";
+
 enum PacketType {
-  // RPC packets correspond with a request or response for a service method.
-  RPC = 0;
+  // To simplify identifying the origin of a packet, client-to-server packets
+  // use even numbers and server-to-client packets use odd numbers.
 
-  // STREAM_END packets signal the end of a server or client stream.
-  STREAM_END = 1;
+  // Client-to-server packets
 
-  // CANCEL packets request termination of an ongoing RPC.
-  CANCEL = 2;
+  // A request from a client for a service method.
+  REQUEST = 0;
 
-  // ERROR packets are sent by the server to indicate that it received an
-  // unexpected or malformed packet.
-  ERROR = 3;
+  // A client stream has completed.
+  CLIENT_STREAM_END = 2;
+
+  // The client received a packet for an RPC it did not request.
+  CLIENT_ERROR = 4;
+
+  // The client requests cancellation of an ongoing server stream.
+  CANCEL_SERVER_STREAM = 6;
+
+  // Server-to-client packets
+
+  // A response from a server for a service method.
+  RESPONSE = 1;
+
+  // A server streaming or bidirectional RPC has completed.
+  SERVER_STREAM_END = 3;
+
+  // The server was unable to process a request.
+  SERVER_ERROR = 5;
 }
 
 message RpcPacket {
-  // The type of packet. Determines which other fields are used. Required.
+  // The type of packet. Determines which other fields are used.
   PacketType type = 1;
 
-  // Channel through which the packet is sent. Required.
+  // Channel through which the packet is sent.
   uint32 channel_id = 2;
 
   // Hash of the fully-qualified name of the service with which this packet is
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_rpc/pw_rpc_test_protos/test.options
similarity index 88%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to pw_rpc/pw_rpc_test_protos/test.options
index 1670b7d..a10a02f 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_rpc/pw_rpc_test_protos/test.options
@@ -12,6 +12,4 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-#include "pw_boot_armv7m/boot.h"
-
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+pw.rpc.test.TestStreamResponse.chunk max_size:32
diff --git a/pw_rpc/py/BUILD.gn b/pw_rpc/py/BUILD.gn
new file mode 100644
index 0000000..56ca19d
--- /dev/null
+++ b/pw_rpc/py/BUILD.gn
@@ -0,0 +1,46 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_rpc/__init__.py",
+    "pw_rpc/callback_client.py",
+    "pw_rpc/client.py",
+    "pw_rpc/codegen_nanopb.py",
+    "pw_rpc/codegen_raw.py",
+    "pw_rpc/descriptors.py",
+    "pw_rpc/ids.py",
+    "pw_rpc/packet_pb2.py",
+    "pw_rpc/packets.py",
+    "pw_rpc/plugin.py",
+    "pw_rpc/plugin_nanopb.py",
+    "pw_rpc/plugin_raw.py",
+  ]
+  tests = [
+    "callback_client_test.py",
+    "client_test.py",
+    "codegen_test.py",
+    "ids_test.py",
+    "packets_test.py",
+  ]
+  python_deps = [
+    "$dir_pw_protobuf_compiler/py",
+    "$dir_pw_status/py",
+  ]
+}
diff --git a/pw_rpc/py/callback_client_test.py b/pw_rpc/py/callback_client_test.py
index 0f27dc4..0ae02c4 100755
--- a/pw_rpc/py/callback_client_test.py
+++ b/pw_rpc/py/callback_client_test.py
@@ -19,7 +19,7 @@
 from typing import List, Tuple
 
 from pw_protobuf_compiler import python_protos
-from pw_rpc import callback_client, client, packets
+from pw_rpc import callback_client, client, packet_pb2, packets
 from pw_status import Status
 
 TEST_PROTO_1 = """\
@@ -65,8 +65,9 @@
             callback_client.Impl(), [client.Channel(1, self._handle_request)],
             self._protos.modules())
 
-        self._last_request: packets.RpcPacket = None
-        self._next_packets: List[Tuple[bytes, bool]] = []
+        self._last_request: packet_pb2.RpcPacket = None
+        self._next_packets: List[Tuple[bytes, Status]] = []
+        self._send_responses_on_request = True
 
     def _enqueue_response(self,
                           channel_id: int,
@@ -75,7 +76,7 @@
                           response=b'',
                           *,
                           ids: Tuple[int, int] = None,
-                          valid=True):
+                          process_status=Status.OK):
         if method:
             assert ids is None
             service_id, method_id = method.service.id, method.id
@@ -89,32 +90,42 @@
             payload = response.SerializeToString()
 
         self._next_packets.append(
-            (packets.RpcPacket(channel_id=channel_id,
-                               service_id=service_id,
-                               method_id=method_id,
-                               status=status.value,
-                               payload=payload).SerializeToString(), valid))
+            (packet_pb2.RpcPacket(type=packets.PacketType.RESPONSE,
+                                  channel_id=channel_id,
+                                  service_id=service_id,
+                                  method_id=method_id,
+                                  status=status.value,
+                                  payload=payload).SerializeToString(),
+             process_status))
 
     def _enqueue_stream_end(self,
                             channel_id: int,
                             method,
                             status: Status = Status.OK,
-                            valid=True):
+                            process_status=Status.OK):
         self._next_packets.append(
-            (packets.RpcPacket(type=packets.PacketType.STREAM_END,
-                               channel_id=channel_id,
-                               service_id=method.service.id,
-                               method_id=method.id,
-                               status=status.value).SerializeToString(),
-             valid))
+            (packet_pb2.RpcPacket(type=packets.PacketType.SERVER_STREAM_END,
+                                  channel_id=channel_id,
+                                  service_id=method.service.id,
+                                  method_id=method.id,
+                                  status=status.value).SerializeToString(),
+             process_status))
 
     def _handle_request(self, data: bytes):
+        # Disable this method to prevent infinite recursion if processing the
+        # packet happens to send another packet.
+        if not self._send_responses_on_request:
+            return
+
+        self._send_responses_on_request = False
+
         self._last_request = packets.decode(data)
 
-        for packet, valid in self._next_packets:
-            self.assertEqual(valid, self._client.process_packet(packet))
+        for packet, status in self._next_packets:
+            self.assertIs(status, self._client.process_packet(packet))
 
         self._next_packets.clear()
+        self._send_responses_on_request = True
 
     def _sent_payload(self, message_type):
         self.assertIsNotNone(self._last_request)
@@ -204,7 +215,8 @@
         for _ in range(3):
             self._last_request = None
             stub.SomeUnary.reinvoke(callback, magic_number=55)
-            self.assertEqual(self._last_request.type, packets.PacketType.RPC)
+            self.assertEqual(self._last_request.type,
+                             packets.PacketType.REQUEST)
 
     def test_invoke_server_streaming(self):
         stub = self._client.channel(1).rpcs.pw.test1.PublicService
@@ -267,7 +279,8 @@
 
         call.cancel()
 
-        self.assertEqual(self._last_request.type, packets.PacketType.CANCEL)
+        self.assertEqual(self._last_request.type,
+                         packets.PacketType.CANCEL_SERVER_STREAM)
 
         # Ensure the RPC can be called after being cancelled.
         self._enqueue_response(1, stub.method, response=resp)
@@ -287,19 +300,23 @@
         service_id = method.service.id
 
         # Unknown channel
-        self._enqueue_response(999, method, valid=False)
+        self._enqueue_response(999, method, process_status=Status.NOT_FOUND)
         # Bad service
-        self._enqueue_response(1, ids=(999, method.id), valid=False)
+        self._enqueue_response(1,
+                               ids=(999, method.id),
+                               process_status=Status.OK)
         # Bad method
-        self._enqueue_response(1, ids=(service_id, 999), valid=False)
-        # For RPC not pending (valid=True because the packet is processed)
+        self._enqueue_response(1,
+                               ids=(service_id, 999),
+                               process_status=Status.OK)
+        # For RPC not pending (is Status.OK because the packet is processed)
         self._enqueue_response(
             1,
             ids=(service_id,
                  rpcs.pw.test1.PublicService.SomeBidiStreaming.method.id),
-            valid=True)
+            process_status=Status.OK)
 
-        self._enqueue_response(1, method, valid=True)
+        self._enqueue_response(1, method, process_status=Status.OK)
 
         status, response = rpcs.pw.test1.PublicService.SomeUnary(
             magic_number=6)
@@ -314,7 +331,7 @@
                                method,
                                Status.OK,
                                b'INVALID DATA!!!',
-                               valid=True)
+                               process_status=Status.OK)
 
         status, response = rpcs.pw.test1.PublicService.SomeUnary(
             magic_number=6)
diff --git a/pw_rpc/py/client_test.py b/pw_rpc/py/client_test.py
index 1331bab..cb78992 100755
--- a/pw_rpc/py/client_test.py
+++ b/pw_rpc/py/client_test.py
@@ -18,7 +18,9 @@
 
 from pw_protobuf_compiler import python_protos
 from pw_rpc import callback_client, client, packets
+from pw_rpc.packet_pb2 import RpcPacket
 import pw_rpc.ids
+from pw_status import Status
 
 TEST_PROTO_1 = """\
 syntax = "proto3";
@@ -70,10 +72,10 @@
 """
 
 
-def _test_setup():
+def _test_setup(output=None):
     protos = python_protos.Library.from_strings([TEST_PROTO_1, TEST_PROTO_2])
     return protos, client.Client.from_modules(callback_client.Impl(),
-                                              [client.Channel(1, None)],
+                                              [client.Channel(1, output)],
                                               protos.modules())
 
 
@@ -112,7 +114,7 @@
             'Unary')
         self.assertEqual(
             self._channel_client.rpcs.pw.test2.Alpha.Unary.method.full_name,
-            'pw.test2.Alpha/Unary')
+            'pw.test2.Alpha.Unary')
 
     def test_iterate_over_all_methods(self):
         channel_client = self._channel_client
@@ -157,7 +159,17 @@
 class ClientTest(unittest.TestCase):
     """Tests the pw_rpc Client independently of the ClientImpl."""
     def setUp(self):
-        self._protos, self._client = _test_setup()
+        self._last_packet_sent_bytes = None
+        self._protos, self._client = _test_setup(self._save_packet)
+
+    def _save_packet(self, packet):
+        self._last_packet_sent_bytes = packet
+
+    def _last_packet_sent(self):
+        packet = RpcPacket()
+        self.assertIsNotNone(self._last_packet_sent_bytes)
+        packet.MergeFromString(self._last_packet_sent_bytes)
+        return packet
 
     def test_all_methods(self):
         services = self._client.services
@@ -209,27 +221,73 @@
                 'SomeUnary'].get_request(msg, {})
 
     def test_process_packet_invalid_proto_data(self):
-        self.assertFalse(self._client.process_packet(b'NOT a packet!'))
+        self.assertIs(self._client.process_packet(b'NOT a packet!'),
+                      Status.DATA_LOSS)
+
+    def test_process_packet_not_for_client(self):
+        self.assertIs(
+            self._client.process_packet(
+                RpcPacket(
+                    type=packets.PacketType.REQUEST).SerializeToString()),
+            Status.INVALID_ARGUMENT)
 
     def test_process_packet_unrecognized_channel(self):
-        self.assertFalse(
+        self.assertIs(
             self._client.process_packet(
-                packets.encode_request(
+                packets.encode_response(
                     (123, 456, 789),
-                    self._protos.packages.pw.test2.Request())))
+                    self._protos.packages.pw.test2.Request())),
+            Status.NOT_FOUND)
 
     def test_process_packet_unrecognized_service(self):
-        self.assertFalse(
+        self.assertIs(
             self._client.process_packet(
-                packets.encode_request(
-                    (1, 456, 789), self._protos.packages.pw.test2.Request())))
+                packets.encode_response(
+                    (1, 456, 789), self._protos.packages.pw.test2.Request())),
+            Status.OK)
+
+        self.assertEqual(
+            self._last_packet_sent(),
+            RpcPacket(type=packets.PacketType.CLIENT_ERROR,
+                      channel_id=1,
+                      service_id=456,
+                      method_id=789,
+                      status=Status.NOT_FOUND.value))
 
     def test_process_packet_unrecognized_method(self):
-        self.assertFalse(
+        service = next(iter(self._client.services))
+
+        self.assertIs(
             self._client.process_packet(
-                packets.encode_request(
-                    (1, next(iter(self._client.services)).id, 789),
-                    self._protos.packages.pw.test2.Request())))
+                packets.encode_response(
+                    (1, service.id, 789),
+                    self._protos.packages.pw.test2.Request())), Status.OK)
+
+        self.assertEqual(
+            self._last_packet_sent(),
+            RpcPacket(type=packets.PacketType.CLIENT_ERROR,
+                      channel_id=1,
+                      service_id=service.id,
+                      method_id=789,
+                      status=Status.NOT_FOUND.value))
+
+    def test_process_packet_non_pending_method(self):
+        service = next(iter(self._client.services))
+        method = next(iter(service.methods))
+
+        self.assertIs(
+            self._client.process_packet(
+                packets.encode_response(
+                    (1, service.id, method.id),
+                    self._protos.packages.pw.test2.Request())), Status.OK)
+
+        self.assertEqual(
+            self._last_packet_sent(),
+            RpcPacket(type=packets.PacketType.CLIENT_ERROR,
+                      channel_id=1,
+                      service_id=service.id,
+                      method_id=method.id,
+                      status=Status.FAILED_PRECONDITION.value))
 
 
 if __name__ == '__main__':
diff --git a/pw_rpc/py/ids_test.py b/pw_rpc/py/ids_test.py
new file mode 100755
index 0000000..28f812f
--- /dev/null
+++ b/pw_rpc/py/ids_test.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python3
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Tests service and method ID calculation for Python and C++."""
+
+from typing import Iterator
+import unittest
+
+from pw_build.generated_tests import Context, TestGenerator
+from pw_build import generated_tests
+from pw_rpc import ids
+
+_TESTS = TestGenerator([
+    'Empty string',
+    (0x00000000, ''),
+    'Single character strings',
+    (0x00000001, '\0'),
+    (0x00010040, '\1'),
+    (0x003F0F82, '?'),
+    'Non-printable strings',
+    (0xD3556087, '\0\0\0\1\1\1\1'),
+    'General strings',
+    (0x63D43D8C, 'Pigweed?'),
+    (0x79AB6494, 'Pigweed!Pigweed!Pigweed!Pigweed!Pigweed!Pigweed!'),
+])
+
+
+def _define_py_test(ctx: Context):
+    expected_id, name = ctx.test_case
+    return lambda self: self.assertEqual(expected_id, ids.calculate(name))
+
+
+IdsTest = _TESTS.python_tests('IdsTest', _define_py_test)
+
+_CC_HEADER = """\
+#include <string_view>
+
+#include "gtest/gtest.h"
+#include "pw_rpc/internal/hash.h"
+
+namespace pw::rpc::internal {
+
+using namespace std::string_view_literals;
+"""
+
+_CC_FOOTER = '}  // namespace pw::rpc::internal'
+
+
+def _cc_test(ctx: Context) -> Iterator[str]:
+    expected_id, name = ctx.test_case
+
+    yield f'TEST(RpcIds, {ctx.cc_name()}) {{'
+    yield f'    EXPECT_EQ(0x{expected_id:08x}u,'
+    yield f'              Hash({generated_tests.cc_string(name)}sv));'
+    yield '}'
+
+
+if __name__ == '__main__':
+    args = generated_tests.parse_test_generation_args()
+    if args.generate_cc_test:
+        _TESTS.cc_tests(args.generate_cc_test, _cc_test, _CC_HEADER,
+                        _CC_FOOTER)
+    else:
+        unittest.main()
diff --git a/pw_rpc/py/packets_test.py b/pw_rpc/py/packets_test.py
index 216af86..c8ed99e 100755
--- a/pw_rpc/py/packets_test.py
+++ b/pw_rpc/py/packets_test.py
@@ -17,40 +17,80 @@
 import unittest
 
 from pw_rpc import packets
+from pw_rpc.packet_pb2 import RpcPacket
+from pw_status import Status
 
-_TEST_REQUEST = packets.RpcPacket(
-    type=packets.PacketType.RPC,
-    channel_id=1,
-    service_id=2,
-    method_id=3,
-    payload=packets.RpcPacket(status=321).SerializeToString())
+_TEST_REQUEST = RpcPacket(type=packets.PacketType.REQUEST,
+                          channel_id=1,
+                          service_id=2,
+                          method_id=3,
+                          payload=RpcPacket(status=321).SerializeToString())
 
 
 class PacketsTest(unittest.TestCase):
+    """Tests for packet encoding and decoding."""
     def test_encode_request(self):
-        data = packets.encode_request((1, 2, 3), packets.RpcPacket(status=321))
-        packet = packets.RpcPacket()
+        data = packets.encode_request((1, 2, 3), RpcPacket(status=321))
+        packet = RpcPacket()
         packet.ParseFromString(data)
 
         self.assertEqual(_TEST_REQUEST, packet)
 
+    def test_encode_response(self):
+        response = RpcPacket(type=packets.PacketType.RESPONSE,
+                             channel_id=1,
+                             service_id=2,
+                             method_id=3,
+                             payload=RpcPacket(status=321).SerializeToString())
+
+        data = packets.encode_response((1, 2, 3), RpcPacket(status=321))
+        packet = RpcPacket()
+        packet.ParseFromString(data)
+
+        self.assertEqual(response, packet)
+
     def test_encode_cancel(self):
         data = packets.encode_cancel((9, 8, 7))
 
-        packet = packets.RpcPacket()
+        packet = RpcPacket()
         packet.ParseFromString(data)
 
         self.assertEqual(
             packet,
-            packets.RpcPacket(type=packets.PacketType.CANCEL,
-                              channel_id=9,
-                              service_id=8,
-                              method_id=7))
+            RpcPacket(type=packets.PacketType.CANCEL_SERVER_STREAM,
+                      channel_id=9,
+                      service_id=8,
+                      method_id=7))
+
+    def test_encode_client_error(self):
+        data = packets.encode_client_error(_TEST_REQUEST, Status.NOT_FOUND)
+
+        packet = RpcPacket()
+        packet.ParseFromString(data)
+
+        self.assertEqual(
+            packet,
+            RpcPacket(type=packets.PacketType.CLIENT_ERROR,
+                      channel_id=1,
+                      service_id=2,
+                      method_id=3,
+                      status=Status.NOT_FOUND.value))
 
     def test_decode(self):
         self.assertEqual(_TEST_REQUEST,
                          packets.decode(_TEST_REQUEST.SerializeToString()))
 
+    def test_for_server(self):
+        self.assertTrue(packets.for_server(_TEST_REQUEST))
+
+        self.assertFalse(
+            packets.for_server(
+                RpcPacket(type=packets.PacketType.RESPONSE,
+                          channel_id=1,
+                          service_id=2,
+                          method_id=3,
+                          payload=RpcPacket(status=321).SerializeToString())))
+
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_rpc/py/pw_rpc/__init__.py
similarity index 78%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_rpc/py/pw_rpc/__init__.py
index 3c3be32..1f1e72e 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_rpc/py/pw_rpc/__init__.py
@@ -11,9 +11,7 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations under
 # the License.
+"""Package for calling Pigweed RPCs from Python."""
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+from pw_rpc.client import Client
+from pw_rpc.descriptors import Channel
diff --git a/pw_rpc/py/pw_rpc/callback_client.py b/pw_rpc/py/pw_rpc/callback_client.py
index da98a42..e61490a 100644
--- a/pw_rpc/py/pw_rpc/callback_client.py
+++ b/pw_rpc/py/pw_rpc/callback_client.py
@@ -75,19 +75,25 @@
 
     def invoke(self, callback: Callback, _request=None, **request_fields):
         """Invokes an RPC with a callback."""
-        self._rpcs.invoke(self._rpc,
-                          self.method.get_request(_request, request_fields),
-                          callback)
+        self._rpcs.send_request(self._rpc,
+                                self.method.get_request(
+                                    _request, request_fields),
+                                callback,
+                                override_pending=False)
         return _AsyncCall(self._rpcs, self._rpc)
 
     def reinvoke(self, callback: Callback, _request=None, **request_fields):
         """Invokes an RPC with a callback, overriding any pending requests."""
-        self._rpcs.invoke(self._rpc,
-                          self.method.get_request(_request, request_fields),
-                          callback,
-                          override_pending=True)
+        self._rpcs.send_request(self._rpc,
+                                self.method.get_request(
+                                    _request, request_fields),
+                                callback,
+                                override_pending=True)
         return _AsyncCall(self._rpcs, self._rpc)
 
+    def __repr__(self) -> str:
+        return repr(self.method)
+
 
 class _AsyncCall:
     """Represents an ongoing callback-based call."""
@@ -99,7 +105,7 @@
         self._rpcs = rpcs
 
     def cancel(self) -> bool:
-        return self._rpcs.cancel(self.rpc)
+        return self._rpcs.send_cancel(self.rpc)
 
     def __enter__(self) -> '_AsyncCall':
         return self
@@ -129,7 +135,7 @@
 class UnaryMethodClient(_MethodClient):
     def __call__(self, _request=None, **request_fields) -> Tuple[Status, Any]:
         responses: queue.SimpleQueue = queue.SimpleQueue()
-        self.invoke(
+        self.reinvoke(
             lambda _, status, payload: responses.put((status, payload)),
             _request, **request_fields)
         return responses.get()
@@ -138,7 +144,7 @@
 class ServerStreamingMethodClient(_MethodClient):
     def __call__(self, _request=None, **request_fields) -> _StreamingResponses:
         responses: queue.SimpleQueue = queue.SimpleQueue()
-        self.invoke(
+        self.reinvoke(
             lambda _, status, payload: responses.put((status, payload)),
             _request, **request_fields)
         return _StreamingResponses(responses)
@@ -201,5 +207,5 @@
         try:
             context(rpc, status, payload, *args, **kwargs)
         except:  # pylint: disable=bare-except
-            rpcs.cancel(rpc)
+            rpcs.send_cancel(rpc)
             _LOG.exception('Callback %s for %s raised exception', context, rpc)
diff --git a/pw_rpc/py/pw_rpc/client.py b/pw_rpc/py/pw_rpc/client.py
index b2db2b4..4d3ec55 100644
--- a/pw_rpc/py/pw_rpc/client.py
+++ b/pw_rpc/py/pw_rpc/client.py
@@ -20,6 +20,7 @@
 from typing import Optional
 
 from pw_rpc import descriptors, packets
+from pw_rpc.packet_pb2 import RpcPacket
 from pw_rpc.packets import PacketType
 from pw_rpc.descriptors import Channel, Service, Method
 from pw_status import Status
@@ -42,16 +43,16 @@
 
 
 class PendingRpcs:
-    """Internal object for tracking whether an RPC is pending."""
+    """Tracks pending RPCs and encodes outgoing RPC packets."""
     def __init__(self):
         self._pending: Dict[PendingRpc, List] = {}
 
-    def invoke(self,
-               rpc: PendingRpc,
-               request,
-               context,
-               override_pending=False) -> None:
-        """Invokes the provided RPC."""
+    def request(self,
+                rpc: PendingRpc,
+                request,
+                context,
+                override_pending: bool = False) -> bytes:
+        """Starts the provided RPC and returns the encoded packet to send."""
         # Ensure that every context is a unique object by wrapping it in a list.
         unique_ctx = [context]
 
@@ -63,36 +64,50 @@
                         'Cancel the RPC before invoking it again')
 
         _LOG.debug('Starting %s', rpc)
-        rpc.channel.output(packets.encode_request(rpc, request))
+        return packets.encode_request(rpc, request)
 
-    def cancel(self, rpc: PendingRpc) -> bool:
-        """Cancels the RPC, including sending a CANCEL packet.
+    def send_request(self,
+                     rpc: PendingRpc,
+                     request,
+                     context,
+                     override_pending: bool = False) -> None:
+        """Calls request and sends the resulting packet to the channel."""
+        # TODO(hepler): Remove `type: ignore` on this and similar lines when
+        #     https://github.com/python/mypy/issues/5485 is fixed
+        rpc.channel.output(  # type: ignore
+            self.request(rpc, request, context, override_pending))
+
+    def cancel(self, rpc: PendingRpc) -> Optional[bytes]:
+        """Cancels the RPC. Returns the CANCEL packet to send.
 
         Returns:
           True if the RPC was cancelled; False if it was not pending
+
+        Raises:
+          KeyError if the RPC is not pending
         """
+        _LOG.debug('Cancelling %s', rpc)
+        del self._pending[rpc]
+
+        if rpc.method.type is Method.Type.UNARY:
+            return None
+
+        return packets.encode_cancel(rpc)
+
+    def send_cancel(self, rpc: PendingRpc) -> bool:
+        """Calls cancel and sends the cancel packet, if any, to the channel."""
         try:
-            _LOG.debug('Cancelling %s', rpc)
-            del self._pending[rpc]
+            packet = self.cancel(rpc)
         except KeyError:
             return False
 
-        if rpc.method.type is not Method.Type.UNARY:
-            rpc.channel.output(packets.encode_cancel(rpc))
-
-        return True
-
-    def clear(self, rpc: PendingRpc) -> bool:
-        """Clears the RPC's pending status without sending a CANCEL packet."""
-        try:
-            _LOG.debug('Clearing %s', rpc)
-            del self._pending[rpc]
-        except KeyError:
-            return False
+        if packet:
+            rpc.channel.output(packet)  # type: ignore
 
         return True
 
     def get_pending(self, rpc: PendingRpc, status: Optional[Status]):
+        """Gets the pending RPC's context. If status is set, clears the RPC."""
         if status is None:
             return self._pending[rpc][0]  # Unwrap the context from the list
 
@@ -132,33 +147,51 @@
         """
 
 
-class _MethodClients(descriptors.ServiceAccessor):
+class ServiceClient(descriptors.ServiceAccessor):
     """Navigates the methods in a service provided by a ChannelClient."""
     def __init__(self, rpcs: PendingRpcs, client_impl: ClientImpl,
-                 channel: Channel, methods: Collection[Method]):
+                 channel: Channel, service: Service):
         super().__init__(
             {
                 method: client_impl.method_client(rpcs, channel, method)
-                for method in methods
+                for method in service.methods
             },
             as_attrs='members')
 
+        self._channel = channel
+        self._service = service
 
-class _ServiceClients(descriptors.ServiceAccessor[_MethodClients]):
+    def __repr__(self) -> str:
+        return (f'Service({self._service.full_name!r}, '
+                f'methods={[m.name for m in self._service.methods]}, '
+                f'channel={self._channel.id})')
+
+    def __str__(self) -> str:
+        return str(self._service)
+
+
+class Services(descriptors.ServiceAccessor[ServiceClient]):
     """Navigates the services provided by a ChannelClient."""
     def __init__(self, rpcs: PendingRpcs, client_impl, channel: Channel,
                  services: Collection[Service]):
         super().__init__(
             {
-                s: _MethodClients(rpcs, client_impl, channel, s.methods)
+                s: ServiceClient(rpcs, client_impl, channel, s)
                 for s in services
             },
             as_attrs='packages')
 
+        self._channel = channel
+        self._services = services
+
+    def __repr__(self) -> str:
+        return (f'Services(channel={self._channel.id}, '
+                f'services={[s.full_name for s in self._services]})')
+
 
 def _decode_status(rpc: PendingRpc, packet) -> Optional[Status]:
     # Server streaming RPC packets never have a status; all other packets do.
-    if packet.type == PacketType.RPC and rpc.method.server_streaming:
+    if packet.type == PacketType.RESPONSE and rpc.method.server_streaming:
         return None
 
     try:
@@ -170,7 +203,7 @@
 
 
 def _decode_payload(rpc: PendingRpc, packet):
-    if packet.type == PacketType.RPC:
+    if packet.type == PacketType.RESPONSE:
         try:
             return packets.decode_payload(packet, rpc.method.response_type)
         except packets.DecodeError as err:
@@ -209,7 +242,7 @@
     """
     client: 'Client'
     channel: Channel
-    rpcs: _ServiceClients
+    rpcs: Services
 
     def method(self, method_name: str):
         """Returns a method client matching the given name.
@@ -223,11 +256,18 @@
         """
         return descriptors.get_method(self.rpcs, method_name)
 
+    def services(self) -> Iterator:
+        return iter(self.rpcs)
+
     def methods(self) -> Iterator:
         """Iterates over all method clients in this ChannelClient."""
         for service_client in self.rpcs:
             yield from service_client
 
+    def __repr__(self) -> str:
+        return (f'ChannelClient(channel={self.channel.id}, '
+                f'services={[str(s) for s in self.services()]})')
+
 
 class Client:
     """Sends requests and handles responses for a set of channels.
@@ -253,8 +293,7 @@
         self._channels_by_id = {
             channel.id: ChannelClient(
                 self, channel,
-                _ServiceClients(self._rpcs, self._impl, channel,
-                                self.services))
+                Services(self._rpcs, self._impl, channel, self.services))
             for channel in channels
         }
 
@@ -262,6 +301,10 @@
         """Returns a ChannelClient, which is used to call RPCs on a channel."""
         return self._channels_by_id[channel_id]
 
+    def channels(self) -> Iterable[ChannelClient]:
+        """Accesses the ChannelClients in this client."""
+        return self._channels_by_id.values()
+
     def method(self, method_name: str) -> Method:
         """Returns a Method matching the given name.
 
@@ -280,50 +323,69 @@
             yield from service.methods
 
     def process_packet(self, pw_rpc_raw_packet_data: bytes, *impl_args,
-                       **impl_kwargs) -> bool:
+                       **impl_kwargs) -> Status:
         """Processes an incoming packet.
 
         Args:
-            pw_rpc_raw_packet_data: raw binary data for exactly one RPC packet
-            impl_args: optional positional arguments passed to the ClientImpl
-            impl_kwargs: optional keyword arguments passed to the ClientImpl
+          pw_rpc_raw_packet_data: raw binary data for exactly one RPC packet
+          impl_args: optional positional arguments passed to the ClientImpl
+          impl_kwargs: optional keyword arguments passed to the ClientImpl
 
         Returns:
-            True if the packet was decoded and handled by this client
+          OK - the packet was processed by this client
+          DATA_LOSS - the packet could not be decoded
+          INVALID_ARGUMENT - the packet is for a server, not a client
+          NOT_FOUND - the packet's channel ID is not known to this client
         """
         try:
             packet = packets.decode(pw_rpc_raw_packet_data)
         except packets.DecodeError as err:
             _LOG.warning('Failed to decode packet: %s', err)
             _LOG.debug('Raw packet: %r', pw_rpc_raw_packet_data)
-            return False
+            return Status.DATA_LOSS
+
+        if packets.for_server(packet):
+            return Status.INVALID_ARGUMENT
 
         try:
-            rpc = self._lookup_rpc(packet)
+            channel_client = self._channels_by_id[packet.channel_id]
+        except KeyError:
+            _LOG.warning('Unrecognized channel ID %d', packet.channel_id)
+            return Status.NOT_FOUND
+
+        try:
+            rpc = self._look_up_service_and_method(packet, channel_client)
         except ValueError as err:
-            _LOG.warning('Unable to process packet: %s', err)
-            _LOG.debug('Packet:\n%s', packet)
-            return False
+            channel_client.channel.output(  # type: ignore
+                packets.encode_client_error(packet, Status.NOT_FOUND))
+            _LOG.warning('%s', err)
+            return Status.OK
 
         status = _decode_status(rpc, packet)
 
-        if packet.type == PacketType.ERROR:
-            self._rpcs.clear(rpc)
-            _LOG.warning('%s: invocation failed with %s', rpc, status)
-            return True  # Handled packet, even though it was an error
-
-        if packet.type not in (PacketType.RPC, PacketType.STREAM_END):
+        if packet.type not in (PacketType.RESPONSE,
+                               PacketType.SERVER_STREAM_END,
+                               PacketType.SERVER_ERROR):
             _LOG.error('%s: unexpected PacketType %s', rpc, packet.type)
             _LOG.debug('Packet:\n%s', packet)
-            return True
+            return Status.OK
 
         payload = _decode_payload(rpc, packet)
 
         try:
             context = self._rpcs.get_pending(rpc, status)
         except KeyError:
+            channel_client.channel.output(  # type: ignore
+                packets.encode_client_error(packet,
+                                            Status.FAILED_PRECONDITION))
             _LOG.debug('Discarding response for %s, which is not pending', rpc)
-            return True
+            return Status.OK
+
+        if packet.type == PacketType.SERVER_ERROR:
+            _LOG.warning('%s: invocation failed with %s', rpc, status)
+
+            # Do not return yet -- call process_response so the ClientImpl can
+            # do any necessary cleanup.
 
         self._impl.process_response(self._rpcs,
                                     rpc,
@@ -332,14 +394,11 @@
                                     payload,
                                     args=impl_args,
                                     kwargs=impl_kwargs)
-        return True
+        return Status.OK
 
-    def _lookup_rpc(self, packet: packets.RpcPacket) -> PendingRpc:
-        try:
-            channel_client = self._channels_by_id[packet.channel_id]
-        except KeyError:
-            raise ValueError(f'Unrecognized channel ID {packet.channel_id}')
-
+    def _look_up_service_and_method(
+            self, packet: RpcPacket,
+            channel_client: ChannelClient) -> PendingRpc:
         try:
             service = self.services[packet.service_id]
         except KeyError:
@@ -352,3 +411,7 @@
                 f'No method ID {packet.method_id} in service {service.name}')
 
         return PendingRpc(channel_client.channel, service, method)
+
+    def __repr__(self) -> str:
+        return (f'pw_rpc.Client(channels={list(self._channels_by_id)}, '
+                f'services={[s.full_name for s in self.services]})')
diff --git a/pw_rpc/py/pw_rpc/codegen_nanopb.py b/pw_rpc/py/pw_rpc/codegen_nanopb.py
index 5aa21b8..9e5ca78 100644
--- a/pw_rpc/py/pw_rpc/codegen_nanopb.py
+++ b/pw_rpc/py/pw_rpc/codegen_nanopb.py
@@ -15,10 +15,10 @@
 
 from datetime import datetime
 import os
-from typing import Iterable
+from typing import Iterable, cast
 
 from pw_protobuf.output_file import OutputFile
-from pw_protobuf.proto_tree import ProtoNode, ProtoServiceMethod
+from pw_protobuf.proto_tree import ProtoNode, ProtoService, ProtoServiceMethod
 from pw_protobuf.proto_tree import build_node_tree
 import pw_rpc.ids
 
@@ -32,16 +32,6 @@
 RPC_NAMESPACE = '::pw::rpc'
 
 
-def _invoker_name(method: ProtoServiceMethod) -> str:
-    """Name for the function that invokes the user-defined service method.
-
-    The generated class uses a different name for the methods than the derived
-    class. If it used the same name, the invocation would appear to the compiler
-    as a recursive function call if the derived class failed to define a method.
-    """
-    return 'Invoke_' + method.name()
-
-
 def _proto_filename_to_nanopb_header(proto_file: str) -> str:
     """Returns the generated nanopb header name for a .proto file."""
     return os.path.splitext(proto_file)[0] + NANOPB_H_EXTENSION
@@ -57,94 +47,48 @@
                                 output: OutputFile) -> None:
     """Generates a nanopb method descriptor for an RPC method."""
 
-    method_class = f'{RPC_NAMESPACE}::internal::Method'
-
-    if method.type() == ProtoServiceMethod.Type.UNARY:
-        func = f'{method_class}::Unary<{_invoker_name(method)}>'
-    elif method.type() == ProtoServiceMethod.Type.SERVER_STREAMING:
-        func = f'{method_class}::ServerStreaming<{_invoker_name(method)}>'
-    else:
-        raise NotImplementedError(
-            'Only unary and server streaming RPCs are currently supported')
-
     method_id = pw_rpc.ids.calculate(method.name())
     req_fields = f'{method.request_type().nanopb_name()}_fields'
     res_fields = f'{method.response_type().nanopb_name()}_fields'
+    impl_method = f'&Implementation::{method.name()}'
 
-    output.write_line(f'{func}(')
+    output.write_line(
+        f'{RPC_NAMESPACE}::internal::GetNanopbOrRawMethodFor<{impl_method}>(')
     with output.indent(4):
         output.write_line(f'0x{method_id:08x},  // Hash of "{method.name()}"')
         output.write_line(f'{req_fields},')
         output.write_line(f'{res_fields}),')
 
 
-def _generate_code_for_method(method: ProtoServiceMethod,
-                              output: OutputFile) -> None:
-    """Generates the function singature of a nanopb RPC method."""
+def _generate_method_lookup_function(output: OutputFile):
+    """Generates a function that gets a Method object from its ID."""
+    nanopb_method = f'{RPC_NAMESPACE}::internal::NanopbMethod'
 
-    req_type = method.request_type().nanopb_name()
-    res_type = method.response_type().nanopb_name()
-    implementation_cast = 'static_cast<Implementation&>(call.service())'
-
-    output.write_line()
-
-    if method.type() == ProtoServiceMethod.Type.UNARY:
-        output.write_line(f'static ::pw::Status {_invoker_name(method)}(')
-        with output.indent(4):
-            output.write_line('::pw::rpc::internal::ServerCall& call,')
-            output.write_line(f'const {req_type}& request,')
-            output.write_line(f'{res_type}& response) {{')
-        with output.indent():
-            output.write_line(f'return {implementation_cast}')
-            output.write_line(
-                f'    .{method.name()}(call.context(), request, response);')
-        output.write_line('}')
-    elif method.type() == ProtoServiceMethod.Type.SERVER_STREAMING:
-        output.write_line(f'static void {_invoker_name(method)}(')
-        with output.indent(4):
-            output.write_line('::pw::rpc::internal::ServerCall& call,')
-            output.write_line(f'const {req_type}& request,')
-            output.write_line(f'ServerWriter<{res_type}>& writer) {{')
-        with output.indent():
-            output.write_line(implementation_cast)
-            output.write_line(
-                f'    .{method.name()}(call.context(), request, writer);')
-        output.write_line('}')
-    else:
-        raise NotImplementedError(
-            'Only unary and server streaming RPCs are currently supported')
-
-
-def _generate_method_lookup_function(service: ProtoNode, output: OutputFile):
-    """Generates a function that gets the Method from a function pointer."""
-    output.write_line('template <auto impl_method>')
     output.write_line(
-        'static constexpr const ::pw::rpc::internal::Method* MethodFor() {')
+        f'static constexpr const {nanopb_method}* NanopbMethodFor(')
+    output.write_line('    uint32_t id) {')
 
     with output.indent():
-        for i, method in enumerate(service.methods()):
+        output.write_line('for (auto& method : kMethods) {')
+        with output.indent():
+            output.write_line('if (method.nanopb_method().id() == id) {')
             output.write_line(
-                'if constexpr (std::is_same_v<decltype(impl_method), '
-                f'decltype(&Implementation::{method.name()})>) {{')
-
-            with output.indent():
-                output.write_line(
-                    'if constexpr ('
-                    f'impl_method == &Implementation::{method.name()}) {{')
-                output.write_line(f'  return &std::get<{i}>(kMethods);')
-                output.write_line('}')
-
+                f'  return &static_cast<const {nanopb_method}&>(')
+            output.write_line('    method.nanopb_method());')
             output.write_line('}')
+        output.write_line('}')
 
         output.write_line('return nullptr;')
 
     output.write_line('}')
 
 
-def _generate_code_for_service(service: ProtoNode, root: ProtoNode,
+def _generate_code_for_service(service: ProtoService, root: ProtoNode,
                                output: OutputFile) -> None:
     """Generates a C++ derived class for a nanopb RPC service."""
 
+    output.write_line('namespace generated {')
+
     base_class = f'{RPC_NAMESPACE}::Service'
     output.write_line('\ntemplate <typename Implementation>')
     output.write_line(
@@ -178,6 +122,9 @@
         output.write_line(
             'constexpr void _PwRpcInternalGeneratedBase() const {}')
 
+        output.write_line()
+        _generate_method_lookup_function(output)
+
     service_name_hash = pw_rpc.ids.calculate(service.proto_path())
     output.write_line('\n private:')
 
@@ -187,31 +134,97 @@
             f'static constexpr uint32_t kServiceId = 0x{service_name_hash:08x};'
         )
 
-        for method in service.methods():
-            _generate_code_for_method(method, output)
-
         output.write_line()
 
         # Generate the method table
-        output.write_line(
-            f'static constexpr std::array<{RPC_NAMESPACE}::internal::Method,'
-            f' {len(service.methods())}> kMethods = {{')
+        output.write_line('static constexpr std::array<'
+                          f'{RPC_NAMESPACE}::internal::NanopbMethodUnion,'
+                          f' {len(service.methods())}> kMethods = {{')
 
         with output.indent(4):
             for method in service.methods():
                 _generate_method_descriptor(method, output)
 
-        output.write_line('};\n')
-
-        _generate_method_lookup_function(service, output)
-
-        output.write_line()
-        output.write_line('template <auto>')
-        output.write_line(
-            'friend class ::pw::rpc::internal::ServiceMethodTraits;')
+        output.write_line('};')
 
     output.write_line('};')
 
+    output.write_line('\n}  // namespace generated\n')
+
+
+def _generate_code_for_client_method(method: ProtoServiceMethod,
+                                     output: OutputFile) -> None:
+    """Outputs client code for a single RPC method."""
+
+    req = method.request_type().nanopb_name()
+    res = method.response_type().nanopb_name()
+    method_id = pw_rpc.ids.calculate(method.name())
+
+    if method.type() == ProtoServiceMethod.Type.UNARY:
+        callback = f'{RPC_NAMESPACE}::UnaryResponseHandler<{res}>'
+    elif method.type() == ProtoServiceMethod.Type.SERVER_STREAMING:
+        callback = f'{RPC_NAMESPACE}::ServerStreamingResponseHandler<{res}>'
+    else:
+        raise NotImplementedError(
+            'Only unary and server streaming RPCs are currently supported')
+
+    output.write_line()
+    output.write_line(f'static NanopbClientCall<\n    {callback}>')
+    output.write_line(f'{method.name()}({RPC_NAMESPACE}::Channel& channel,')
+    with output.indent(len(method.name()) + 1):
+        output.write_line(f'const {req}& request,')
+        output.write_line(f'{callback}& callback) {{')
+
+    with output.indent():
+        output.write_line(f'NanopbClientCall<{callback}>')
+        output.write_line('    call(&channel,')
+        with output.indent(9):
+            output.write_line('kServiceId,')
+            output.write_line(
+                f'0x{method_id:08x},  // Hash of "{method.name()}"')
+            output.write_line('callback,')
+            output.write_line(f'{req}_fields,')
+            output.write_line(f'{res}_fields);')
+        output.write_line('call.SendRequest(&request);')
+        output.write_line('return call;')
+
+    output.write_line('}')
+
+
+def _generate_code_for_client(service: ProtoService, root: ProtoNode,
+                              output: OutputFile) -> None:
+    """Outputs client code for an RPC service."""
+
+    output.write_line('namespace nanopb {')
+
+    class_name = f'{service.cpp_namespace(root)}Client'
+    output.write_line(f'\nclass {class_name} {{')
+    output.write_line(' public:')
+
+    with output.indent():
+        output.write_line('template <typename T>')
+        output.write_line(
+            f'using NanopbClientCall = {RPC_NAMESPACE}::NanopbClientCall<T>;')
+
+        output.write_line('')
+        output.write_line(f'{class_name}() = delete;')
+
+        for method in service.methods():
+            _generate_code_for_client_method(method, output)
+
+    service_name_hash = pw_rpc.ids.calculate(service.proto_path())
+    output.write_line('\n private:')
+
+    with output.indent():
+        output.write_line(f'// Hash of "{service.proto_path()}".')
+        output.write_line(
+            f'static constexpr uint32_t kServiceId = 0x{service_name_hash:08x};'
+        )
+
+    output.write_line('};')
+
+    output.write_line('\n}  // namespace nanopb\n')
+
 
 def generate_code_for_package(file_descriptor_proto, package: ProtoNode,
                               output: OutputFile) -> None:
@@ -221,14 +234,15 @@
 
     output.write_line(f'// {os.path.basename(output.name())} automatically '
                       f'generated by {PLUGIN_NAME} {PLUGIN_VERSION}')
-    output.write_line(f'// on {datetime.now()}')
+    output.write_line(f'// on {datetime.now().isoformat()}')
     output.write_line('// clang-format off')
     output.write_line('#pragma once\n')
     output.write_line('#include <array>')
     output.write_line('#include <cstddef>')
     output.write_line('#include <cstdint>')
     output.write_line('#include <type_traits>\n')
-    output.write_line('#include "pw_rpc/internal/method.h"')
+    output.write_line('#include "pw_rpc/internal/nanopb_method_union.h"')
+    output.write_line('#include "pw_rpc/nanopb_client_call.h"')
     output.write_line('#include "pw_rpc/server_context.h"')
     output.write_line('#include "pw_rpc/service.h"')
 
@@ -239,11 +253,6 @@
         file_descriptor_proto.name)
     output.write_line(f'#include "{nanopb_header}"\n')
 
-    output.write_line('namespace pw::rpc::internal {\n')
-    output.write_line('template <auto>')
-    output.write_line('class ServiceMethodTraits;')
-    output.write_line('\n}  // namespace pw::rpc::internal\n')
-
     if package.cpp_namespace():
         file_namespace = package.cpp_namespace()
         if file_namespace.startswith('::'):
@@ -251,13 +260,12 @@
 
         output.write_line(f'namespace {file_namespace} {{')
 
-    output.write_line('namespace generated {')
-
     for node in package:
         if node.type() == ProtoNode.Type.SERVICE:
-            _generate_code_for_service(node, package, output)
-
-    output.write_line('\n}  // namespace generated')
+            _generate_code_for_service(cast(ProtoService, node), package,
+                                       output)
+            _generate_code_for_client(cast(ProtoService, node), package,
+                                      output)
 
     if package.cpp_namespace():
         output.write_line(f'}}  // namespace {file_namespace}')
diff --git a/pw_rpc/py/pw_rpc/codegen_raw.py b/pw_rpc/py/pw_rpc/codegen_raw.py
new file mode 100644
index 0000000..6babe1c
--- /dev/null
+++ b/pw_rpc/py/pw_rpc/codegen_raw.py
@@ -0,0 +1,181 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""This module generates the code for raw pw_rpc services."""
+
+from datetime import datetime
+import os
+from typing import Iterable, cast
+
+from pw_protobuf.output_file import OutputFile
+from pw_protobuf.proto_tree import ProtoNode, ProtoService, ProtoServiceMethod
+from pw_protobuf.proto_tree import build_node_tree
+import pw_rpc.ids
+
+PLUGIN_NAME = 'pw_rpc_codegen'
+PLUGIN_VERSION = '0.1.0'
+
+PROTO_H_EXTENSION = '.pb.h'
+
+RPC_NAMESPACE = '::pw::rpc'
+
+
+def _proto_filename_to_generated_header(proto_file: str) -> str:
+    """Returns the generated C++ RPC header name for a .proto file."""
+    filename = os.path.splitext(proto_file)[0]
+    return f'{filename}.raw_rpc{PROTO_H_EXTENSION}'
+
+
+def _generate_method_descriptor(method: ProtoServiceMethod,
+                                output: OutputFile) -> None:
+    """Generates a method descriptor for a raw RPC method."""
+
+    method_id = pw_rpc.ids.calculate(method.name())
+    impl_method = f'&Implementation::{method.name()}'
+
+    output.write_line(
+        f'{RPC_NAMESPACE}::internal::GetRawMethodFor<{impl_method}>(')
+    output.write_line(f'    0x{method_id:08x}),  // Hash of "{method.name()}"')
+
+
+def _generate_method_lookup_function(output: OutputFile):
+    """Generates a function that gets a Method object from its ID."""
+    raw_method = f'{RPC_NAMESPACE}::internal::RawMethod'
+
+    output.write_line(f'static constexpr const {raw_method}* RawMethodFor(')
+    output.write_line('    uint32_t id) {')
+
+    with output.indent():
+        output.write_line('for (auto& method : kMethods) {')
+        with output.indent():
+            output.write_line('if (method.raw_method().id() == id) {')
+            output.write_line(f'  return &static_cast<const {raw_method}&>(')
+            output.write_line('    method.raw_method());')
+            output.write_line('}')
+        output.write_line('}')
+
+        output.write_line('return nullptr;')
+
+    output.write_line('}')
+
+
+def _generate_code_for_service(service: ProtoService, root: ProtoNode,
+                               output: OutputFile) -> None:
+    """Generates a C++ base class for a raw RPC service."""
+
+    base_class = f'{RPC_NAMESPACE}::Service'
+    output.write_line('\ntemplate <typename Implementation>')
+    output.write_line(
+        f'class {service.cpp_namespace(root)} : public {base_class} {{')
+    output.write_line(' public:')
+
+    with output.indent():
+        output.write_line(
+            f'using ServerContext = {RPC_NAMESPACE}::ServerContext;')
+        output.write_line(
+            f'using RawServerWriter = {RPC_NAMESPACE}::RawServerWriter;')
+        output.write_line()
+
+        output.write_line(f'constexpr {service.name()}()')
+        output.write_line(f'    : {base_class}(kServiceId, kMethods) {{}}')
+
+        output.write_line()
+        output.write_line(
+            f'{service.name()}(const {service.name()}&) = delete;')
+        output.write_line(f'{service.name()}& operator='
+                          f'(const {service.name()}&) = delete;')
+
+        output.write_line()
+        output.write_line(f'static constexpr const char* name() '
+                          f'{{ return "{service.name()}"; }}')
+
+        output.write_line()
+        output.write_line(
+            '// Used by ServiceMethodTraits to identify a base service.')
+        output.write_line(
+            'constexpr void _PwRpcInternalGeneratedBase() const {}')
+
+        output.write_line()
+        _generate_method_lookup_function(output)
+
+    service_name_hash = pw_rpc.ids.calculate(service.proto_path())
+    output.write_line('\n private:')
+
+    with output.indent():
+        output.write_line(f'// Hash of "{service.proto_path()}".')
+        output.write_line(
+            f'static constexpr uint32_t kServiceId = 0x{service_name_hash:08x};'
+        )
+
+        output.write_line()
+
+        # Generate the method table
+        output.write_line('static constexpr std::array<'
+                          f'{RPC_NAMESPACE}::internal::RawMethodUnion,'
+                          f' {len(service.methods())}> kMethods = {{')
+
+        with output.indent(4):
+            for method in service.methods():
+                _generate_method_descriptor(method, output)
+
+        output.write_line('};')
+
+    output.write_line('};')
+
+
+def _generate_code_for_package(package: ProtoNode, output: OutputFile) -> None:
+    """Generates code for a header file corresponding to a .proto file."""
+    assert package.type() == ProtoNode.Type.PACKAGE
+
+    output.write_line(f'// {os.path.basename(output.name())} automatically '
+                      f'generated by {PLUGIN_NAME} {PLUGIN_VERSION}')
+    output.write_line(f'// on {datetime.now().isoformat()}')
+    output.write_line('// clang-format off')
+    output.write_line('#pragma once\n')
+    output.write_line('#include <array>')
+    output.write_line('#include <cstddef>')
+    output.write_line('#include <cstdint>')
+    output.write_line('#include <type_traits>\n')
+    output.write_line('#include "pw_rpc/internal/raw_method_union.h"')
+    output.write_line('#include "pw_rpc/server_context.h"')
+    output.write_line('#include "pw_rpc/service.h"\n')
+
+    if package.cpp_namespace():
+        file_namespace = package.cpp_namespace()
+        if file_namespace.startswith('::'):
+            file_namespace = file_namespace[2:]
+
+        output.write_line(f'namespace {file_namespace} {{')
+
+    output.write_line('namespace generated {')
+
+    for node in package:
+        if node.type() == ProtoNode.Type.SERVICE:
+            _generate_code_for_service(cast(ProtoService, node), package,
+                                       output)
+
+    output.write_line('\n}  // namespace generated')
+
+    if package.cpp_namespace():
+        output.write_line(f'}}  // namespace {file_namespace}')
+
+
+def process_proto_file(proto_file) -> Iterable[OutputFile]:
+    """Generates code for a single .proto file."""
+
+    _, package_root = build_node_tree(proto_file)
+    output_filename = _proto_filename_to_generated_header(proto_file.name)
+    output_file = OutputFile(output_filename)
+    _generate_code_for_package(package_root, output_file)
+
+    return [output_file]
diff --git a/pw_rpc/py/pw_rpc/descriptors.py b/pw_rpc/py/pw_rpc/descriptors.py
index c9e8607..ce94b32 100644
--- a/pw_rpc/py/pw_rpc/descriptors.py
+++ b/pw_rpc/py/pw_rpc/descriptors.py
@@ -107,7 +107,7 @@
 
     @property
     def full_name(self) -> str:
-        return f'{self.service.full_name}/{self.name}'
+        return f'{self.service.full_name}.{self.name}'
 
     @property
     def type(self) -> 'Method.Type':
@@ -151,7 +151,18 @@
         return proto
 
     def __repr__(self) -> str:
-        return f'Method({self.name!r})'
+        req = self._method_parameter(self.request_type, self.client_streaming)
+        res = self._method_parameter(self.response_type, self.server_streaming)
+        return f'<{self.full_name}({req}) returns ({res})>'
+
+    def _method_parameter(self, proto, streaming: bool) -> str:
+        """Returns a description of the method's request or response type."""
+        stream = 'stream ' if streaming else ''
+
+        if proto.DESCRIPTOR.file.package == self.service.package:
+            return stream + proto.DESCRIPTOR.name
+
+        return stream + proto.DESCRIPTOR.full_name
 
     def __str__(self) -> str:
         return self.full_name
@@ -233,7 +244,7 @@
         super().__init__(services)
 
 
-def get_method(service_accessor: ServiceAccessor[T], name: str) -> T:
+def get_method(service_accessor: ServiceAccessor, name: str):
     """Returns a method matching the given full name in a ServiceAccessor.
 
     Args:
diff --git a/pw_rpc/py/pw_rpc/packet_pb2.py b/pw_rpc/py/pw_rpc/packet_pb2.py
new file mode 100644
index 0000000..a86d53c
--- /dev/null
+++ b/pw_rpc/py/pw_rpc/packet_pb2.py
@@ -0,0 +1,168 @@
+# [Pigweed] This file is a checked-in version of a generated protobuf module.
+# TODO(pwbug/239) Implement the pw_protobuf_package GN template and Python
+#     proto generation, then delete this file.
+
+# pylint: skip-file
+
+# type: ignore
+
+# yapf: disable
+
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: pw_rpc/pw_rpc_protos/packet.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf.internal import enum_type_wrapper
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='pw_rpc/pw_rpc_protos/packet.proto',
+  package='pw.rpc.internal',
+  syntax='proto3',
+  serialized_options=None,
+  serialized_pb=_b('\n!pw_rpc/pw_rpc_protos/packet.proto\x12\x0fpw.rpc.internal\"\x92\x01\n\tRpcPacket\x12)\n\x04type\x18\x01 \x01(\x0e\x32\x1b.pw.rpc.internal.PacketType\x12\x12\n\nchannel_id\x18\x02 \x01(\r\x12\x12\n\nservice_id\x18\x03 \x01(\x07\x12\x11\n\tmethod_id\x18\x04 \x01(\x07\x12\x0f\n\x07payload\x18\x05 \x01(\x0c\x12\x0e\n\x06status\x18\x06 \x01(\r*\x93\x01\n\nPacketType\x12\x0b\n\x07REQUEST\x10\x00\x12\x15\n\x11\x43LIENT_STREAM_END\x10\x02\x12\x10\n\x0c\x43LIENT_ERROR\x10\x04\x12\x18\n\x14\x43\x41NCEL_SERVER_STREAM\x10\x06\x12\x0c\n\x08RESPONSE\x10\x01\x12\x15\n\x11SERVER_STREAM_END\x10\x03\x12\x10\n\x0cSERVER_ERROR\x10\x05\x62\x06proto3')
+)
+
+_PACKETTYPE = _descriptor.EnumDescriptor(
+  name='PacketType',
+  full_name='pw.rpc.internal.PacketType',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='REQUEST', index=0, number=0,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='CLIENT_STREAM_END', index=1, number=2,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='CLIENT_ERROR', index=2, number=4,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='CANCEL_SERVER_STREAM', index=3, number=6,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='RESPONSE', index=4, number=1,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='SERVER_STREAM_END', index=5, number=3,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='SERVER_ERROR', index=6, number=5,
+      serialized_options=None,
+      type=None),
+  ],
+  containing_type=None,
+  serialized_options=None,
+  serialized_start=204,
+  serialized_end=351,
+)
+_sym_db.RegisterEnumDescriptor(_PACKETTYPE)
+
+PacketType = enum_type_wrapper.EnumTypeWrapper(_PACKETTYPE)
+REQUEST = 0
+CLIENT_STREAM_END = 2
+CLIENT_ERROR = 4
+CANCEL_SERVER_STREAM = 6
+RESPONSE = 1
+SERVER_STREAM_END = 3
+SERVER_ERROR = 5
+
+
+
+_RPCPACKET = _descriptor.Descriptor(
+  name='RpcPacket',
+  full_name='pw.rpc.internal.RpcPacket',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='type', full_name='pw.rpc.internal.RpcPacket.type', index=0,
+      number=1, type=14, cpp_type=8, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='channel_id', full_name='pw.rpc.internal.RpcPacket.channel_id', index=1,
+      number=2, type=13, cpp_type=3, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='service_id', full_name='pw.rpc.internal.RpcPacket.service_id', index=2,
+      number=3, type=7, cpp_type=3, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='method_id', full_name='pw.rpc.internal.RpcPacket.method_id', index=3,
+      number=4, type=7, cpp_type=3, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='payload', full_name='pw.rpc.internal.RpcPacket.payload', index=4,
+      number=5, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='status', full_name='pw.rpc.internal.RpcPacket.status', index=5,
+      number=6, type=13, cpp_type=3, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      serialized_options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  serialized_options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=55,
+  serialized_end=201,
+)
+
+_RPCPACKET.fields_by_name['type'].enum_type = _PACKETTYPE
+DESCRIPTOR.message_types_by_name['RpcPacket'] = _RPCPACKET
+DESCRIPTOR.enum_types_by_name['PacketType'] = _PACKETTYPE
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+RpcPacket = _reflection.GeneratedProtocolMessageType('RpcPacket', (_message.Message,), {
+  'DESCRIPTOR' : _RPCPACKET,
+  '__module__' : 'pw_rpc.pw_rpc_protos.packet_pb2'
+  # @@protoc_insertion_point(class_scope:pw.rpc.internal.RpcPacket)
+  })
+_sym_db.RegisterMessage(RpcPacket)
+
+
+# @@protoc_insertion_point(module_scope)
diff --git a/pw_rpc/py/pw_rpc/packet_pb2.pyi b/pw_rpc/py/pw_rpc/packet_pb2.pyi
new file mode 100644
index 0000000..e13b07d
--- /dev/null
+++ b/pw_rpc/py/pw_rpc/packet_pb2.pyi
@@ -0,0 +1,76 @@
+# @generated by generate_proto_mypy_stubs.py.  Do not edit!
+import sys
+from google.protobuf.descriptor import (
+    Descriptor as google___protobuf___descriptor___Descriptor,
+    EnumDescriptor as google___protobuf___descriptor___EnumDescriptor,
+    FileDescriptor as google___protobuf___descriptor___FileDescriptor,
+)
+
+from google.protobuf.internal.enum_type_wrapper import (  # type: ignore
+    _EnumTypeWrapper as google___protobuf___internal___enum_type_wrapper____EnumTypeWrapper,
+)
+
+from google.protobuf.message import (
+    Message as google___protobuf___message___Message,
+)
+
+from typing import (
+    NewType as typing___NewType,
+    Optional as typing___Optional,
+    cast as typing___cast,
+)
+
+from typing_extensions import (
+    Literal as typing_extensions___Literal,
+)
+
+
+builtin___bool = bool
+builtin___bytes = bytes
+builtin___float = float
+builtin___int = int
+
+
+DESCRIPTOR: google___protobuf___descriptor___FileDescriptor = ...
+
+PacketTypeValue = typing___NewType('PacketTypeValue', builtin___int)
+type___PacketTypeValue = PacketTypeValue
+PacketType: _PacketType
+class _PacketType(google___protobuf___internal___enum_type_wrapper____EnumTypeWrapper[PacketTypeValue]):
+    DESCRIPTOR: google___protobuf___descriptor___EnumDescriptor = ...
+    REQUEST = typing___cast(PacketTypeValue, 0)
+    CLIENT_STREAM_END = typing___cast(PacketTypeValue, 2)
+    CLIENT_ERROR = typing___cast(PacketTypeValue, 4)
+    CANCEL_SERVER_STREAM = typing___cast(PacketTypeValue, 6)
+    RESPONSE = typing___cast(PacketTypeValue, 1)
+    SERVER_STREAM_END = typing___cast(PacketTypeValue, 3)
+    SERVER_ERROR = typing___cast(PacketTypeValue, 5)
+REQUEST = typing___cast(PacketTypeValue, 0)
+CLIENT_STREAM_END = typing___cast(PacketTypeValue, 2)
+CLIENT_ERROR = typing___cast(PacketTypeValue, 4)
+CANCEL_SERVER_STREAM = typing___cast(PacketTypeValue, 6)
+RESPONSE = typing___cast(PacketTypeValue, 1)
+SERVER_STREAM_END = typing___cast(PacketTypeValue, 3)
+SERVER_ERROR = typing___cast(PacketTypeValue, 5)
+type___PacketType = PacketType
+
+class RpcPacket(google___protobuf___message___Message):
+    DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
+    type: type___PacketTypeValue = ...
+    channel_id: builtin___int = ...
+    service_id: builtin___int = ...
+    method_id: builtin___int = ...
+    payload: builtin___bytes = ...
+    status: builtin___int = ...
+
+    def __init__(self,
+        *,
+        type : typing___Optional[type___PacketTypeValue] = None,
+        channel_id : typing___Optional[builtin___int] = None,
+        service_id : typing___Optional[builtin___int] = None,
+        method_id : typing___Optional[builtin___int] = None,
+        payload : typing___Optional[builtin___bytes] = None,
+        status : typing___Optional[builtin___int] = None,
+        ) -> None: ...
+    def ClearField(self, field_name: typing_extensions___Literal[u"channel_id",b"channel_id",u"method_id",b"method_id",u"payload",b"payload",u"service_id",b"service_id",u"status",b"status",u"type",b"type"]) -> None: ...
+type___RpcPacket = RpcPacket
diff --git a/pw_rpc/py/pw_rpc/packets.py b/pw_rpc/py/pw_rpc/packets.py
index 1b1c6f5..dca5a10 100644
--- a/pw_rpc/py/pw_rpc/packets.py
+++ b/pw_rpc/py/pw_rpc/packets.py
@@ -13,23 +13,18 @@
 # the License.
 """Functions for working with pw_rpc packets."""
 
-import os
-
 from google.protobuf import message
-from pw_protobuf_compiler import python_protos
-
-packet_pb2 = python_protos.compile_and_import_file(
-    os.path.join(__file__, '..', '..', '..', 'pw_rpc_protos', 'packet.proto'))
-
-PacketType = packet_pb2.PacketType
-RpcPacket = packet_pb2.RpcPacket
+from pw_status import Status
+from pw_rpc import packet_pb2
 
 DecodeError = message.DecodeError
 Message = message.Message
 
+PacketType = packet_pb2.PacketType
+
 
 def decode(data: bytes):
-    packet = RpcPacket()
+    packet = packet_pb2.RpcPacket()
     packet.MergeFromString(data)
     return packet
 
@@ -47,16 +42,41 @@
 def encode_request(rpc: tuple, request: message.Message) -> bytes:
     channel, service, method = _ids(rpc)
 
-    return RpcPacket(type=PacketType.RPC,
-                     channel_id=channel,
-                     service_id=service,
-                     method_id=method,
-                     payload=request.SerializeToString()).SerializeToString()
+    return packet_pb2.RpcPacket(
+        type=packet_pb2.PacketType.REQUEST,
+        channel_id=channel,
+        service_id=service,
+        method_id=method,
+        payload=request.SerializeToString()).SerializeToString()
+
+
+def encode_response(rpc: tuple, response: message.Message) -> bytes:
+    channel, service, method = _ids(rpc)
+
+    return packet_pb2.RpcPacket(
+        type=packet_pb2.PacketType.RESPONSE,
+        channel_id=channel,
+        service_id=service,
+        method_id=method,
+        payload=response.SerializeToString()).SerializeToString()
+
+
+def encode_client_error(packet, status: Status) -> bytes:
+    return packet_pb2.RpcPacket(type=packet_pb2.PacketType.CLIENT_ERROR,
+                                channel_id=packet.channel_id,
+                                service_id=packet.service_id,
+                                method_id=packet.method_id,
+                                status=status.value).SerializeToString()
 
 
 def encode_cancel(rpc: tuple) -> bytes:
     channel, service, method = _ids(rpc)
-    return RpcPacket(type=PacketType.CANCEL,
-                     channel_id=channel,
-                     service_id=service,
-                     method_id=method).SerializeToString()
+    return packet_pb2.RpcPacket(
+        type=packet_pb2.PacketType.CANCEL_SERVER_STREAM,
+        channel_id=channel,
+        service_id=service,
+        method_id=method).SerializeToString()
+
+
+def for_server(packet):
+    return packet.type % 2 == 0
diff --git a/pw_rpc/py/pw_rpc/plugin.py b/pw_rpc/py/pw_rpc/plugin.py
index a7ea616..595b436 100644
--- a/pw_rpc/py/pw_rpc/plugin.py
+++ b/pw_rpc/py/pw_rpc/plugin.py
@@ -11,19 +11,24 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations under
 # the License.
-"""pw_rpc compiler plugin.
+"""pw_rpc protoc plugin entrypoint to generate code for RPC services."""
 
-protoc plugin which generates C++ code for pw_rpc services using nanopb.
-"""
-
+import enum
 import sys
 
 import google.protobuf.compiler.plugin_pb2 as plugin_pb2
 
 import pw_rpc.codegen_nanopb as codegen_nanopb
+import pw_rpc.codegen_raw as codegen_raw
 
 
-def process_proto_request(req: plugin_pb2.CodeGeneratorRequest,
+class Codegen(enum.Enum):
+    RAW = 0
+    NANOPB = 1
+
+
+def process_proto_request(codegen: Codegen,
+                          req: plugin_pb2.CodeGeneratorRequest,
                           res: plugin_pb2.CodeGeneratorResponse) -> None:
     """Handles a protoc CodeGeneratorRequest message.
 
@@ -35,14 +40,20 @@
       res: A CodeGeneratorResponse to populate with the plugin's output.
     """
     for proto_file in req.proto_file:
-        output_files = codegen_nanopb.process_proto_file(proto_file)
+        if codegen is Codegen.RAW:
+            output_files = codegen_raw.process_proto_file(proto_file)
+        elif codegen is Codegen.NANOPB:
+            output_files = codegen_nanopb.process_proto_file(proto_file)
+        else:
+            raise NotImplementedError(f'Unknown codegen type {codegen}')
+
         for output_file in output_files:
             fd = res.file.add()
             fd.name = output_file.name()
             fd.content = output_file.content()
 
 
-def main() -> int:
+def main(codegen: Codegen) -> int:
     """Protobuf compiler plugin entrypoint.
 
     Reads a CodeGeneratorRequest proto from stdin and writes a
@@ -51,10 +62,6 @@
     data = sys.stdin.buffer.read()
     request = plugin_pb2.CodeGeneratorRequest.FromString(data)
     response = plugin_pb2.CodeGeneratorResponse()
-    process_proto_request(request, response)
+    process_proto_request(codegen, request, response)
     sys.stdout.buffer.write(response.SerializeToString())
     return 0
-
-
-if __name__ == '__main__':
-    sys.exit(main())
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_rpc/py/pw_rpc/plugin_nanopb.py
old mode 100644
new mode 100755
similarity index 72%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_rpc/py/pw_rpc/plugin_nanopb.py
index 3c3be32..2dfcf2d
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_rpc/py/pw_rpc/plugin_nanopb.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2020 The Pigweed Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may not
@@ -11,9 +12,16 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations under
 # the License.
+"""pw_rpc nanopb protoc plugin."""
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+import sys
+
+from pw_rpc import plugin
+
+
+def main() -> int:
+    return plugin.main(plugin.Codegen.NANOPB)
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_rpc/py/pw_rpc/plugin_raw.py
old mode 100644
new mode 100755
similarity index 73%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_rpc/py/pw_rpc/plugin_raw.py
index 3c3be32..8c22359
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_rpc/py/pw_rpc/plugin_raw.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
 # Copyright 2020 The Pigweed Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may not
@@ -11,9 +12,16 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations under
 # the License.
+"""pw_rpc raw protoc plugin."""
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+import sys
+
+from pw_rpc import plugin
+
+
+def main() -> int:
+    return plugin.main(plugin.Codegen.RAW)
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/pw_rpc/py/pw_rpc/py.typed b/pw_rpc/py/pw_rpc/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_rpc/py/pw_rpc/py.typed
diff --git a/pw_rpc/py/setup.py b/pw_rpc/py/setup.py
index 0d792bb..957a286 100644
--- a/pw_rpc/py/setup.py
+++ b/pw_rpc/py/setup.py
@@ -13,7 +13,7 @@
 # the License.
 """pw_rpc"""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='pw_rpc',
@@ -22,10 +22,18 @@
     author_email='pigweed-developers@googlegroups.com',
     description='On-device remote procedure calls',
     packages=setuptools.find_packages(),
-    entry_points={'console_scripts': ['pw_rpc_codegen = pw_rpc.plugin:main']},
+    package_data={'pw_rpc': ['py.typed']},
+    zip_safe=False,
+    entry_points={
+        'console_scripts': [
+            'pw_rpc_codegen_nanopb = pw_rpc.plugin_nanopb:main',
+            'pw_rpc_codegen_raw = pw_rpc.plugin_raw:main'
+        ]
+    },
     install_requires=[
         'protobuf',
-        'pw_protobuf',
-        'pw_protobuf_compiler',
+        # 'pw_protobuf_compiler',
+        # 'pw_status',
     ],
+    tests_require=['pw_build'],
 )
diff --git a/pw_rpc/raw/BUILD b/pw_rpc/raw/BUILD
new file mode 100644
index 0000000..229718e
--- /dev/null
+++ b/pw_rpc/raw/BUILD
@@ -0,0 +1,105 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_library",
+    "pw_cc_test",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])  # Apache License 2.0
+
+pw_cc_library(
+    name = "method",
+    srcs = [
+        "raw_method.cc",
+    ],
+    hdrs = [
+        "public/pw_rpc/internal/raw_method.h",
+    ],
+    deps = [
+        "//pw_bytes",
+        "//pw_rpc:server",
+    ]
+)
+
+pw_cc_library(
+    name = "method_union",
+    hdrs = [
+        "public/pw_rpc/internal/raw_method_union.h",
+    ],
+    deps = [
+        ":method",
+    ]
+)
+
+pw_cc_library(
+    name = "service_method_traits",
+    hdrs = [
+        "public/pw_rpc/internal/raw_service_method_traits.h",
+    ],
+    deps = [
+        ":method_union",
+        "//pw_rpc:service_method_traits",
+    ]
+)
+
+pw_cc_library(
+    name = "test_method_context",
+    hdrs = [
+        "public/pw_rpc/raw_test_method_context.h",
+    ],
+    deps = [
+        ":method_union",
+        "//pw_assert",
+        "//pw_containers",
+    ]
+)
+
+pw_cc_test(
+    name = "codegen_test",
+    srcs = [
+        "codegen_test.cc",
+    ],
+    deps = [
+        ":method_union",
+        "//pw_protobuf",
+    ],
+)
+
+pw_cc_test(
+    name = "raw_method_test",
+    srcs = [
+        "raw_method_test.cc",
+    ],
+    deps = [
+        ":method_union",
+        "//pw_protobuf",
+        "//pw_rpc:internal_test_utils",
+    ],
+)
+
+pw_cc_test(
+    name = "raw_method_union_test",
+    srcs = [
+        "raw_method_union_test.cc",
+    ],
+    deps = [
+        ":method_union",
+        "//pw_protobuf",
+        "//pw_rpc:internal_test_utils",
+    ],
+)
diff --git a/pw_rpc/raw/BUILD.gn b/pw_rpc/raw/BUILD.gn
new file mode 100644
index 0000000..89d0492
--- /dev/null
+++ b/pw_rpc/raw/BUILD.gn
@@ -0,0 +1,98 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# gn-format disable
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_unit_test/test.gni")
+config("public") {
+  include_dirs = [ "public" ]
+  visibility = [ ":*" ]
+}
+
+pw_source_set("method") {
+  public_configs = [ ":public" ]
+  public = [ "public/pw_rpc/internal/raw_method.h" ]
+  sources = [ "raw_method.cc" ]
+  public_deps = [
+    "..:server",
+    dir_pw_bytes,
+  ]
+  deps = [ dir_pw_log ]
+}
+
+pw_source_set("method_union") {
+  public_configs = [ ":public" ]
+  public = [ "public/pw_rpc/internal/raw_method_union.h" ]
+  public_deps = [ ":method" ]
+}
+
+pw_source_set("service_method_traits") {
+  public_configs = [ ":public" ]
+  public = [ "public/pw_rpc/internal/raw_service_method_traits.h" ]
+  public_deps = [
+    ":method_union",
+    "..:service_method_traits",
+  ]
+}
+
+pw_source_set("test_method_context") {
+  public_configs = [ ":public" ]
+  public = [ "public/pw_rpc/raw_test_method_context.h" ]
+  public_deps = [
+    ":service_method_traits",
+    dir_pw_assert,
+    dir_pw_containers,
+  ]
+}
+
+pw_test_group("tests") {
+  tests = [
+    ":codegen_test",
+    ":raw_method_test",
+    ":raw_method_union_test",
+  ]
+}
+
+pw_test("codegen_test") {
+  deps = [
+    ":test_method_context",
+    "..:test_protos.pwpb",
+    "..:test_protos.raw_rpc",
+    dir_pw_protobuf,
+  ]
+  sources = [ "codegen_test.cc" ]
+}
+
+pw_test("raw_method_test") {
+  deps = [
+    ":method_union",
+    "..:test_protos.pwpb",
+    "..:test_utils",
+    dir_pw_protobuf,
+  ]
+  sources = [ "raw_method_test.cc" ]
+}
+
+pw_test("raw_method_union_test") {
+  deps = [
+    ":method_union",
+    "..:test_protos.pwpb",
+    "..:test_utils",
+    dir_pw_protobuf,
+  ]
+  sources = [ "raw_method_union_test.cc" ]
+}
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_rpc/raw/CMakeLists.txt
similarity index 69%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_rpc/raw/CMakeLists.txt
index 3c3be32..f0e1225 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_rpc/raw/CMakeLists.txt
@@ -12,8 +12,15 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
+pw_auto_add_simple_module(pw_rpc.raw
+  PUBLIC_DEPS
+    pw_rpc.client
+    pw_rpc.common
+    pw_rpc.server
+  TEST_DEPS
+    pw_rpc.test_protos.pwpb
+    pw_rpc.test_protos.raw_rpc
+    pw_rpc.test_utils
+)
diff --git a/pw_rpc/raw/codegen_test.cc b/pw_rpc/raw/codegen_test.cc
new file mode 100644
index 0000000..6194031
--- /dev/null
+++ b/pw_rpc/raw/codegen_test.cc
@@ -0,0 +1,147 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "gtest/gtest.h"
+#include "pw_protobuf/decoder.h"
+#include "pw_rpc/internal/hash.h"
+#include "pw_rpc/raw_test_method_context.h"
+#include "pw_rpc_test_protos/test.pwpb.h"
+#include "pw_rpc_test_protos/test.raw_rpc.pb.h"
+
+namespace pw::rpc {
+namespace test {
+
+class TestService final : public generated::TestService<TestService> {
+ public:
+  StatusWithSize TestRpc(ServerContext&,
+                         ConstByteSpan request,
+                         ByteSpan response) {
+    int64_t integer;
+    Status status;
+    DecodeRequest(request, integer, status);
+
+    protobuf::NestedEncoder encoder(response);
+    TestResponse::Encoder test_response(&encoder);
+    test_response.WriteValue(integer + 1);
+
+    return StatusWithSize(status, encoder.Encode().value().size());
+  }
+
+  void TestStreamRpc(ServerContext&,
+                     ConstByteSpan request,
+                     RawServerWriter& writer) {
+    int64_t integer;
+    Status status;
+    DecodeRequest(request, integer, status);
+
+    for (int i = 0; i < integer; ++i) {
+      ByteSpan buffer = writer.PayloadBuffer();
+      protobuf::NestedEncoder encoder(buffer);
+      TestStreamResponse::Encoder test_stream_response(&encoder);
+      test_stream_response.WriteNumber(i);
+      writer.Write(encoder.Encode().value());
+    }
+
+    writer.Finish(status);
+  }
+
+ private:
+  void DecodeRequest(ConstByteSpan request, int64_t& integer, Status& status) {
+    protobuf::Decoder decoder(request);
+
+    while (decoder.Next().ok()) {
+      switch (static_cast<TestRequest::Fields>(decoder.FieldNumber())) {
+        case TestRequest::Fields::INTEGER:
+          decoder.ReadInt64(&integer);
+          break;
+        case TestRequest::Fields::STATUS_CODE: {
+          uint32_t status_code;
+          decoder.ReadUint32(&status_code);
+          status = static_cast<Status::Code>(status_code);
+          break;
+        }
+      }
+    }
+  }
+};
+
+}  // namespace test
+
+namespace {
+
+TEST(RawCodegen, CompilesProperly) {
+  test::TestService service;
+  EXPECT_EQ(service.id(), internal::Hash("pw.rpc.test.TestService"));
+  EXPECT_STREQ(service.name(), "TestService");
+}
+
+TEST(RawCodegen, Server_InvokeUnaryRpc) {
+  PW_RAW_TEST_METHOD_CONTEXT(test::TestService, TestRpc) context;
+
+  std::byte buffer[64];
+  protobuf::NestedEncoder encoder(buffer);
+  test::TestRequest::Encoder test_request(&encoder);
+  test_request.WriteInteger(123);
+  test_request.WriteStatusCode(Status::Ok().code());
+
+  auto sws = context.call(encoder.Encode().value());
+  EXPECT_EQ(Status::Ok(), sws.status());
+
+  protobuf::Decoder decoder(context.response());
+
+  while (decoder.Next().ok()) {
+    switch (static_cast<test::TestResponse::Fields>(decoder.FieldNumber())) {
+      case test::TestResponse::Fields::VALUE: {
+        int32_t value;
+        decoder.ReadInt32(&value);
+        EXPECT_EQ(value, 124);
+        break;
+      }
+    }
+  }
+}
+
+TEST(RawCodegen, Server_InvokeServerStreamingRpc) {
+  PW_RAW_TEST_METHOD_CONTEXT(test::TestService, TestStreamRpc) context;
+
+  std::byte buffer[64];
+  protobuf::NestedEncoder encoder(buffer);
+  test::TestRequest::Encoder test_request(&encoder);
+  test_request.WriteInteger(5);
+  test_request.WriteStatusCode(Status::Unauthenticated().code());
+
+  context.call(encoder.Encode().value());
+  EXPECT_TRUE(context.done());
+  EXPECT_EQ(Status::Unauthenticated(), context.status());
+  EXPECT_EQ(context.total_responses(), 5u);
+
+  protobuf::Decoder decoder(context.responses().back());
+  while (decoder.Next().ok()) {
+    switch (
+        static_cast<test::TestStreamResponse::Fields>(decoder.FieldNumber())) {
+      case test::TestStreamResponse::Fields::NUMBER: {
+        int32_t value;
+        decoder.ReadInt32(&value);
+        EXPECT_EQ(value, 4);
+        break;
+      }
+      case test::TestStreamResponse::Fields::CHUNK:
+        FAIL();
+        break;
+    }
+  }
+}
+
+}  // namespace
+}  // namespace pw::rpc
diff --git a/pw_rpc/raw/public/pw_rpc/internal/raw_method.h b/pw_rpc/raw/public/pw_rpc/internal/raw_method.h
new file mode 100644
index 0000000..a1e3b66
--- /dev/null
+++ b/pw_rpc/raw/public/pw_rpc/internal/raw_method.h
@@ -0,0 +1,109 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_bytes/span.h"
+#include "pw_rpc/internal/base_server_writer.h"
+#include "pw_rpc/internal/method.h"
+#include "pw_rpc/internal/method_type.h"
+#include "pw_status/status_with_size.h"
+
+namespace pw::rpc {
+
+class RawServerWriter : public internal::BaseServerWriter {
+ public:
+  RawServerWriter() = default;
+  RawServerWriter(RawServerWriter&&) = default;
+  RawServerWriter& operator=(RawServerWriter&&) = default;
+
+  ~RawServerWriter();
+
+  // Returns a buffer in which a response payload can be built.
+  ByteSpan PayloadBuffer() { return AcquirePayloadBuffer(); }
+
+  // Sends a response packet with the given raw payload. The payload can either
+  // be in the buffer previously acquired from PayloadBuffer(), or an arbitrary
+  // external buffer.
+  Status Write(ConstByteSpan response);
+};
+
+namespace internal {
+
+// A RawMethod is a method invoker which does not perform any automatic protobuf
+// serialization or deserialization. The implementer is given the raw binary
+// payload of incoming requests, and is responsible for encoding responses to a
+// provided buffer. This is intended for use in methods which would have large
+// protobuf data structure overhead to lower stack usage, or in methods packing
+// responses up to a channel's MTU.
+class RawMethod : public Method {
+ public:
+  template <auto method>
+  constexpr static RawMethod Unary(uint32_t id) {
+    return RawMethod(
+        id,
+        UnaryInvoker,
+        {.unary = [](ServerCall& call, ConstByteSpan req, ByteSpan res) {
+          return method(call, req, res);
+        }});
+  }
+
+  template <auto method>
+  constexpr static RawMethod ServerStreaming(uint32_t id) {
+    return RawMethod(id,
+                     ServerStreamingInvoker,
+                     Function{.server_streaming = [](ServerCall& call,
+                                                     ConstByteSpan req,
+                                                     BaseServerWriter& writer) {
+                       method(call, req, static_cast<RawServerWriter&>(writer));
+                     }});
+  }
+
+ private:
+  using UnaryFunction = StatusWithSize (*)(ServerCall&,
+                                           ConstByteSpan,
+                                           ByteSpan);
+
+  using ServerStreamingFunction = void (*)(ServerCall&,
+                                           ConstByteSpan,
+                                           BaseServerWriter&);
+  union Function {
+    UnaryFunction unary;
+    ServerStreamingFunction server_streaming;
+    // TODO(frolv): Support client and bidirectional streaming.
+  };
+
+  constexpr RawMethod(uint32_t id, Invoker invoker, Function function)
+      : Method(id, invoker), function_(function) {}
+
+  static void UnaryInvoker(const Method& method,
+                           ServerCall& call,
+                           const Packet& request) {
+    static_cast<const RawMethod&>(method).CallUnary(call, request);
+  }
+
+  static void ServerStreamingInvoker(const Method& method,
+                                     ServerCall& call,
+                                     const Packet& request) {
+    static_cast<const RawMethod&>(method).CallServerStreaming(call, request);
+  }
+
+  void CallUnary(ServerCall& call, const Packet& request) const;
+  void CallServerStreaming(ServerCall& call, const Packet& request) const;
+
+  // Stores the user-defined RPC in a generic wrapper.
+  Function function_;
+};
+
+}  // namespace internal
+}  // namespace pw::rpc
diff --git a/pw_rpc/raw/public/pw_rpc/internal/raw_method_union.h b/pw_rpc/raw/public/pw_rpc/internal/raw_method_union.h
new file mode 100644
index 0000000..90267c5
--- /dev/null
+++ b/pw_rpc/raw/public/pw_rpc/internal/raw_method_union.h
@@ -0,0 +1,93 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_bytes/span.h"
+#include "pw_rpc/internal/method_union.h"
+#include "pw_rpc/internal/raw_method.h"
+
+namespace pw::rpc::internal {
+
+// MethodUnion which stores only a raw method. For use in fully raw RPC
+// services, without any additional memory overhead.
+class RawMethodUnion : public MethodUnion {
+ public:
+  constexpr RawMethodUnion(RawMethod&& method)
+      : impl_({.raw = std::move(method)}) {}
+
+  constexpr const Method& method() const { return impl_.method; }
+  constexpr const RawMethod& raw_method() const { return impl_.raw; }
+
+ private:
+  union {
+    Method method;
+    RawMethod raw;
+  } impl_;
+};
+
+// MethodTraits specialization for a unary method.
+template <typename T>
+struct MethodTraits<StatusWithSize (T::*)(
+    ServerContext&, ConstByteSpan, ByteSpan)> {
+  static constexpr MethodType kType = MethodType::kUnary;
+  using Service = T;
+  using Implementation = RawMethod;
+};
+
+// MethodTraits specialization for a raw server streaming method.
+template <typename T>
+struct MethodTraits<void (T::*)(
+    ServerContext&, ConstByteSpan, RawServerWriter&)> {
+  static constexpr MethodType kType = MethodType::kServerStreaming;
+  using Service = T;
+  using Implementation = RawMethod;
+};
+
+template <auto method>
+constexpr bool kIsRaw = std::is_same_v<MethodImplementation<method>, RawMethod>;
+
+// Deduces the type of an implemented service method from its signature, and
+// returns the appropriate MethodUnion object to invoke it.
+template <auto method>
+constexpr RawMethod GetRawMethodFor(uint32_t id) {
+  static_assert(kIsRaw<method>,
+                "GetRawMethodFor should only be called on raw RPC methods");
+
+  using Traits = MethodTraits<decltype(method)>;
+  using ServiceImpl = typename Traits::Service;
+
+  if constexpr (Traits::kType == MethodType::kUnary) {
+    constexpr auto invoker =
+        +[](ServerCall& call, ConstByteSpan request, ByteSpan response) {
+          return (static_cast<ServiceImpl&>(call.service()).*method)(
+              call.context(), request, response);
+        };
+    return RawMethod::Unary<invoker>(id);
+  }
+
+  if constexpr (Traits::kType == MethodType::kServerStreaming) {
+    constexpr auto invoker =
+        +[](ServerCall& call, ConstByteSpan request, RawServerWriter& writer) {
+          (static_cast<ServiceImpl&>(call.service()).*method)(
+              call.context(), request, writer);
+        };
+    return RawMethod::ServerStreaming<invoker>(id);
+  }
+
+  constexpr auto fake_invoker =
+      +[](ServerCall&, ConstByteSpan, RawServerWriter&) {};
+  return RawMethod::ServerStreaming<fake_invoker>(0);
+};
+
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/raw/public/pw_rpc/internal/raw_service_method_traits.h b/pw_rpc/raw/public/pw_rpc/internal/raw_service_method_traits.h
new file mode 100644
index 0000000..e304fc2
--- /dev/null
+++ b/pw_rpc/raw/public/pw_rpc/internal/raw_service_method_traits.h
@@ -0,0 +1,27 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_rpc/internal/raw_method_union.h"
+#include "pw_rpc/internal/service_method_traits.h"
+
+namespace pw::rpc::internal {
+
+template <auto impl_method, uint32_t method_id>
+using RawServiceMethodTraits =
+    ServiceMethodTraits<&MethodBaseService<impl_method>::RawMethodFor,
+                        impl_method,
+                        method_id>;
+
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/raw/public/pw_rpc/raw_test_method_context.h b/pw_rpc/raw/public/pw_rpc/raw_test_method_context.h
new file mode 100644
index 0000000..732a2b2
--- /dev/null
+++ b/pw_rpc/raw/public/pw_rpc/raw_test_method_context.h
@@ -0,0 +1,312 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <type_traits>
+
+#include "pw_assert/light.h"
+#include "pw_bytes/span.h"
+#include "pw_containers/vector.h"
+#include "pw_rpc/channel.h"
+#include "pw_rpc/internal/hash.h"
+#include "pw_rpc/internal/packet.h"
+#include "pw_rpc/internal/raw_service_method_traits.h"
+#include "pw_rpc/internal/server.h"
+
+namespace pw::rpc {
+
+// Declares a context object that may be used to invoke an RPC. The context is
+// declared with the name of the implemented service and the method to invoke.
+// The RPC can then be invoked with the call method.
+//
+// For a unary RPC, context.call(request) returns the status, and the response
+// struct can be accessed via context.response().
+//
+//   PW_RAW_TEST_METHOD_CONTEXT(my::CoolService, TheMethod) context;
+//   EXPECT_EQ(Status::Ok(), context.call(encoded_request).status());
+//   EXPECT_EQ(0,
+//             std::memcmp(encoded_response,
+//                         context.response().data(),
+//                         sizeof(encoded_response)));
+//
+// For a server streaming RPC, context.call(request) invokes the method. As in a
+// normal RPC, the method completes when the ServerWriter's Finish method is
+// called (or it goes out of scope).
+//
+//   PW_RAW_TEST_METHOD_CONTEXT(my::CoolService, TheStreamingMethod) context;
+//   context.call(encoded_response);
+//
+//   EXPECT_TRUE(context.done());  // Check that the RPC completed
+//   EXPECT_EQ(Status::Ok(), context.status());  // Check the status
+//
+//   EXPECT_EQ(3u, context.responses().size());
+//   ByteSpan& response = context.responses()[0];  // check individual responses
+//
+//   for (ByteSpan& response : context.responses()) {
+//     // iterate over the responses
+//   }
+//
+// PW_RAW_TEST_METHOD_CONTEXT forwards its constructor arguments to the
+// underlying service. For example:
+//
+//   PW_RAW_TEST_METHOD_CONTEXT(MyService, Go) context(service, args);
+//
+// PW_RAW_TEST_METHOD_CONTEXT takes two optional arguments:
+//
+//   size_t max_responses: maximum responses to store; ignored unless streaming
+//   size_t output_size_bytes: buffer size; must be large enough for a packet
+//
+// Example:
+//
+//   PW_RAW_TEST_METHOD_CONTEXT(MyService, BestMethod, 3, 256) context;
+//   ASSERT_EQ(3u, context.responses().max_size());
+//
+#define PW_RAW_TEST_METHOD_CONTEXT(service, method, ...)              \
+  ::pw::rpc::RawTestMethodContext<&service::method,                   \
+                                  ::pw::rpc::internal::Hash(#method), \
+                                  ##__VA_ARGS__>
+template <auto method,
+          uint32_t method_id,
+          size_t max_responses = 4,
+          size_t output_size_bytes = 128>
+class RawTestMethodContext;
+
+// Internal classes that implement RawTestMethodContext.
+namespace internal::test::raw {
+
+// A ChannelOutput implementation that stores the outgoing payloads and status.
+template <size_t output_size>
+class MessageOutput final : public ChannelOutput {
+ public:
+  using ResponseBuffer = std::array<std::byte, output_size>;
+
+  MessageOutput(Vector<ByteSpan>& responses,
+                Vector<ResponseBuffer>& buffers,
+                ByteSpan packet_buffer)
+      : ChannelOutput("internal::test::raw::MessageOutput"),
+        responses_(responses),
+        buffers_(buffers),
+        packet_buffer_(packet_buffer) {
+    clear();
+  }
+
+  Status last_status() const { return last_status_; }
+  void set_last_status(Status status) { last_status_ = status; }
+
+  size_t total_responses() const { return total_responses_; }
+
+  bool stream_ended() const { return stream_ended_; }
+
+  void clear() {
+    responses_.clear();
+    buffers_.clear();
+    total_responses_ = 0;
+    stream_ended_ = false;
+    last_status_ = Status::Unknown();
+  }
+
+ private:
+  ByteSpan AcquireBuffer() override { return packet_buffer_; }
+
+  Status SendAndReleaseBuffer(size_t size) override;
+
+  Vector<ByteSpan>& responses_;
+  Vector<ResponseBuffer>& buffers_;
+  ByteSpan packet_buffer_;
+  size_t total_responses_;
+  bool stream_ended_;
+  Status last_status_;
+};
+
+// Collects everything needed to invoke a particular RPC.
+template <auto method,
+          uint32_t method_id,
+          size_t max_responses,
+          size_t output_size>
+struct InvocationContext {
+  template <typename... Args>
+  InvocationContext(Args&&... args)
+      : output(responses, buffers, packet_buffer),
+        channel(Channel::Create<123>(&output)),
+        server(std::span(&channel, 1)),
+        service(std::forward<Args>(args)...),
+        call(static_cast<internal::Server&>(server),
+             static_cast<internal::Channel&>(channel),
+             service,
+             RawServiceMethodTraits<method, method_id>::method()) {}
+
+  using ResponseBuffer = std::array<std::byte, output_size>;
+  using Service = typename RawServiceMethodTraits<method, method_id>::Service;
+
+  MessageOutput<output_size> output;
+  rpc::Channel channel;
+  rpc::Server server;
+  Service service;
+  Vector<ByteSpan, max_responses> responses;
+  Vector<ResponseBuffer, max_responses> buffers;
+  std::array<std::byte, output_size> packet_buffer = {};
+  internal::ServerCall call;
+};
+
+// Method invocation context for a unary RPC. Returns the status in call() and
+// provides the response through the response() method.
+template <auto method, uint32_t method_id, size_t output_size>
+class UnaryContext {
+ private:
+  using Context = InvocationContext<method, method_id, 1, output_size>;
+  Context ctx_;
+
+ public:
+  template <typename... Args>
+  UnaryContext(Args&&... args) : ctx_(std::forward<Args>(args)...) {}
+
+  typename Context::Service& service() { return ctx_.service; }
+
+  // Invokes the RPC with the provided request. Returns RPC's StatusWithSize.
+  StatusWithSize call(ConstByteSpan request) {
+    ctx_.output.clear();
+    ctx_.buffers.emplace_back();
+    ctx_.buffers.back() = {};
+    ctx_.responses.emplace_back();
+    auto& response = ctx_.responses.back();
+    response = {ctx_.buffers.back().data(), ctx_.buffers.back().size()};
+    auto sws = (ctx_.service.*method)(ctx_.call.context(), request, response);
+    response = response.first(sws.size());
+    return sws;
+  }
+
+  // Gives access to the RPC's response.
+  ConstByteSpan response() const {
+    PW_ASSERT(ctx_.responses.size() > 0u);
+    return ctx_.responses.back();
+  }
+};
+
+// Method invocation context for a server streaming RPC.
+template <auto method,
+          uint32_t method_id,
+          size_t max_responses,
+          size_t output_size>
+class ServerStreamingContext {
+ private:
+  using Context =
+      InvocationContext<method, method_id, max_responses, output_size>;
+  Context ctx_;
+
+ public:
+  template <typename... Args>
+  ServerStreamingContext(Args&&... args) : ctx_(std::forward<Args>(args)...) {}
+
+  typename Context::Service& service() { return ctx_.service; }
+
+  // Invokes the RPC with the provided request.
+  void call(ConstByteSpan request) {
+    ctx_.output.clear();
+    BaseServerWriter server_writer(ctx_.call);
+    return (ctx_.service.*method)(ctx_.call.context(),
+                                  request,
+                                  static_cast<RawServerWriter&>(server_writer));
+  }
+
+  // Returns a server writer which writes responses into the context's buffer.
+  // This should not be called alongside call(); use one or the other.
+  RawServerWriter writer() {
+    ctx_.output.clear();
+    BaseServerWriter server_writer(ctx_.call);
+    return std::move(static_cast<RawServerWriter&>(server_writer));
+  }
+
+  // Returns the responses that have been recorded. The maximum number of
+  // responses is responses().max_size(). responses().back() is always the most
+  // recent response, even if total_responses() > responses().max_size().
+  const Vector<ByteSpan>& responses() const { return ctx_.responses; }
+
+  // The total number of responses sent, which may be larger than
+  // responses.max_size().
+  size_t total_responses() const { return ctx_.output.total_responses(); }
+
+  // True if the stream has terminated.
+  bool done() const { return ctx_.output.stream_ended(); }
+
+  // The status of the stream. Only valid if done() is true.
+  Status status() const {
+    PW_ASSERT(done());
+    return ctx_.output.last_status();
+  }
+};
+
+// Alias to select the type of the context object to use based on which type of
+// RPC it is for.
+template <auto method, uint32_t method_id, size_t responses, size_t output_size>
+using Context = std::tuple_element_t<
+    static_cast<size_t>(MethodTraits<decltype(method)>::kType),
+    std::tuple<UnaryContext<method, method_id, output_size>,
+               ServerStreamingContext<method, method_id, responses, output_size>
+               // TODO(hepler): Support client and bidi streaming
+               >>;
+
+template <size_t output_size>
+Status MessageOutput<output_size>::SendAndReleaseBuffer(size_t size) {
+  PW_ASSERT(!stream_ended_);
+
+  if (size == 0u) {
+    return Status::Ok();
+  }
+
+  Result<internal::Packet> result =
+      internal::Packet::FromBuffer(std::span(packet_buffer_.data(), size));
+  PW_ASSERT(result.ok());
+
+  last_status_ = result.value().status();
+
+  switch (result.value().type()) {
+    case internal::PacketType::RESPONSE: {
+      // If we run out of space, the back message is always the most recent.
+      buffers_.emplace_back();
+      buffers_.back() = {};
+      auto response = result.value().payload();
+      std::memcpy(&buffers_.back(), response.data(), response.size());
+      responses_.emplace_back();
+      responses_.back() = {buffers_.back().data(), response.size()};
+      total_responses_ += 1;
+      break;
+    }
+    case internal::PacketType::SERVER_STREAM_END:
+      stream_ended_ = true;
+      break;
+    default:
+      PW_CRASH("Unhandled PacketType");
+  }
+  return Status::Ok();
+}
+
+}  // namespace internal::test::raw
+
+template <auto method,
+          uint32_t method_id,
+          size_t max_responses,
+          size_t output_size_bytes>
+class RawTestMethodContext
+    : public internal::test::raw::
+          Context<method, method_id, max_responses, output_size_bytes> {
+ public:
+  // Forwards constructor arguments to the service class.
+  template <typename... ServiceArgs>
+  RawTestMethodContext(ServiceArgs&&... service_args)
+      : internal::test::raw::
+            Context<method, method_id, max_responses, output_size_bytes>(
+                std::forward<ServiceArgs>(service_args)...) {}
+};
+
+}  // namespace pw::rpc
diff --git a/pw_rpc/raw/raw_method.cc b/pw_rpc/raw/raw_method.cc
new file mode 100644
index 0000000..b23ff5b
--- /dev/null
+++ b/pw_rpc/raw/raw_method.cc
@@ -0,0 +1,74 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_rpc/internal/raw_method.h"
+
+#include <cstring>
+
+#include "pw_log/log.h"
+#include "pw_rpc/internal/packet.h"
+
+namespace pw::rpc {
+
+RawServerWriter::~RawServerWriter() {
+  if (!buffer().empty()) {
+    ReleasePayloadBuffer();
+  }
+}
+
+Status RawServerWriter::Write(ConstByteSpan response) {
+  if (buffer().Contains(response)) {
+    return ReleasePayloadBuffer(response);
+  }
+
+  std::span<std::byte> buffer = AcquirePayloadBuffer();
+
+  if (response.size() > buffer.size()) {
+    ReleasePayloadBuffer();
+    return Status::OutOfRange();
+  }
+
+  std::memcpy(buffer.data(), response.data(), response.size());
+  return ReleasePayloadBuffer(buffer.first(response.size()));
+}
+
+namespace internal {
+
+void RawMethod::CallUnary(ServerCall& call, const Packet& request) const {
+  Channel::OutputBuffer response_buffer = call.channel().AcquireBuffer();
+  std::span payload_buffer = response_buffer.payload(request);
+
+  StatusWithSize sws = function_.unary(call, request.payload(), payload_buffer);
+  Packet response = Packet::Response(request);
+
+  response.set_payload(payload_buffer.first(sws.size()));
+  response.set_status(sws.status());
+  if (call.channel().Send(response_buffer, response).ok()) {
+    return;
+  }
+
+  PW_LOG_WARN("Failed to send response packet for channel %u",
+              unsigned(call.channel().id()));
+  call.channel().Send(response_buffer,
+                      Packet::ServerError(request, Status::Internal()));
+}
+
+void RawMethod::CallServerStreaming(ServerCall& call,
+                                    const Packet& request) const {
+  internal::BaseServerWriter server_writer(call);
+  function_.server_streaming(call, request.payload(), server_writer);
+}
+
+}  // namespace internal
+}  // namespace pw::rpc
diff --git a/pw_rpc/raw/raw_method_test.cc b/pw_rpc/raw/raw_method_test.cc
new file mode 100644
index 0000000..9b03402
--- /dev/null
+++ b/pw_rpc/raw/raw_method_test.cc
@@ -0,0 +1,197 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_rpc/internal/raw_method.h"
+
+#include <array>
+
+#include "gtest/gtest.h"
+#include "pw_bytes/array.h"
+#include "pw_protobuf/decoder.h"
+#include "pw_protobuf/encoder.h"
+#include "pw_rpc/internal/raw_method_union.h"
+#include "pw_rpc/server_context.h"
+#include "pw_rpc/service.h"
+#include "pw_rpc_private/internal_test_utils.h"
+#include "pw_rpc_test_protos/test.pwpb.h"
+
+namespace pw::rpc::internal {
+namespace {
+
+struct {
+  int64_t integer;
+  uint32_t status_code;
+} last_request;
+RawServerWriter last_writer;
+
+void DecodeRawTestRequest(ConstByteSpan request) {
+  protobuf::Decoder decoder(request);
+
+  while (decoder.Next().ok()) {
+    test::TestRequest::Fields field =
+        static_cast<test::TestRequest::Fields>(decoder.FieldNumber());
+
+    switch (field) {
+      case test::TestRequest::Fields::INTEGER:
+        decoder.ReadInt64(&last_request.integer);
+        break;
+      case test::TestRequest::Fields::STATUS_CODE:
+        decoder.ReadUint32(&last_request.status_code);
+        break;
+    }
+  }
+};
+
+StatusWithSize AddFive(ServerCall&, ConstByteSpan request, ByteSpan response) {
+  DecodeRawTestRequest(request);
+
+  protobuf::NestedEncoder encoder(response);
+  test::TestResponse::Encoder test_response(&encoder);
+  test_response.WriteValue(last_request.integer + 5);
+  ConstByteSpan payload;
+  encoder.Encode(&payload);
+
+  return StatusWithSize::Unauthenticated(payload.size());
+}
+
+void StartStream(ServerCall&, ConstByteSpan request, RawServerWriter& writer) {
+  DecodeRawTestRequest(request);
+  last_writer = std::move(writer);
+}
+
+class FakeService : public Service {
+ public:
+  FakeService(uint32_t id) : Service(id, kMethods) {}
+
+  static constexpr std::array<RawMethodUnion, 2> kMethods = {
+      RawMethod::Unary<AddFive>(10u),
+      RawMethod::ServerStreaming<StartStream>(11u),
+  };
+};
+
+TEST(RawMethod, UnaryRpc_SendsResponse) {
+  std::byte buffer[16];
+  protobuf::NestedEncoder encoder(buffer);
+  test::TestRequest::Encoder test_request(&encoder);
+  test_request.WriteInteger(456);
+  test_request.WriteStatusCode(7);
+
+  const RawMethod& method = std::get<0>(FakeService::kMethods).raw_method();
+  ServerContextForTest<FakeService> context(method);
+  method.Invoke(context.get(), context.packet(encoder.Encode().value()));
+
+  EXPECT_EQ(last_request.integer, 456);
+  EXPECT_EQ(last_request.status_code, 7u);
+
+  const Packet& response = context.output().sent_packet();
+  EXPECT_EQ(response.status(), Status::Unauthenticated());
+
+  protobuf::Decoder decoder(response.payload());
+  ASSERT_TRUE(decoder.Next().ok());
+  int64_t value;
+  EXPECT_EQ(decoder.ReadInt64(&value), Status::Ok());
+  EXPECT_EQ(value, 461);
+}
+
+TEST(RawMethod, ServerStreamingRpc_SendsNothingWhenInitiallyCalled) {
+  std::byte buffer[16];
+  protobuf::NestedEncoder encoder(buffer);
+  test::TestRequest::Encoder test_request(&encoder);
+  test_request.WriteInteger(777);
+  test_request.WriteStatusCode(2);
+
+  const RawMethod& method = std::get<1>(FakeService::kMethods).raw_method();
+  ServerContextForTest<FakeService> context(method);
+
+  method.Invoke(context.get(), context.packet(encoder.Encode().value()));
+
+  EXPECT_EQ(0u, context.output().packet_count());
+  EXPECT_EQ(777, last_request.integer);
+  EXPECT_EQ(2u, last_request.status_code);
+  EXPECT_TRUE(last_writer.open());
+  last_writer.Finish();
+}
+
+TEST(RawServerWriter, Write_SendsPreviouslyAcquiredBuffer) {
+  const RawMethod& method = std::get<1>(FakeService::kMethods).raw_method();
+  ServerContextForTest<FakeService> context(method);
+
+  method.Invoke(context.get(), context.packet({}));
+
+  auto buffer = last_writer.PayloadBuffer();
+
+  constexpr auto data = bytes::Array<0x0d, 0x06, 0xf0, 0x0d>();
+  std::memcpy(buffer.data(), data.data(), data.size());
+
+  EXPECT_EQ(last_writer.Write(buffer.first(data.size())), Status::Ok());
+
+  const internal::Packet& packet = context.output().sent_packet();
+  EXPECT_EQ(packet.type(), internal::PacketType::RESPONSE);
+  EXPECT_EQ(packet.channel_id(), context.kChannelId);
+  EXPECT_EQ(packet.service_id(), context.kServiceId);
+  EXPECT_EQ(packet.method_id(), context.get().method().id());
+  EXPECT_EQ(std::memcmp(packet.payload().data(), data.data(), data.size()), 0);
+  EXPECT_EQ(packet.status(), Status::Ok());
+}
+
+TEST(RawServerWriter, Write_SendsExternalBuffer) {
+  const RawMethod& method = std::get<1>(FakeService::kMethods).raw_method();
+  ServerContextForTest<FakeService> context(method);
+
+  method.Invoke(context.get(), context.packet({}));
+
+  constexpr auto data = bytes::Array<0x0d, 0x06, 0xf0, 0x0d>();
+  EXPECT_EQ(last_writer.Write(data), Status::Ok());
+
+  const internal::Packet& packet = context.output().sent_packet();
+  EXPECT_EQ(packet.type(), internal::PacketType::RESPONSE);
+  EXPECT_EQ(packet.channel_id(), context.kChannelId);
+  EXPECT_EQ(packet.service_id(), context.kServiceId);
+  EXPECT_EQ(packet.method_id(), context.get().method().id());
+  EXPECT_EQ(std::memcmp(packet.payload().data(), data.data(), data.size()), 0);
+  EXPECT_EQ(packet.status(), Status::Ok());
+}
+
+TEST(RawServerWriter, Write_BufferTooSmall_ReturnsOutOfRange) {
+  const RawMethod& method = std::get<1>(FakeService::kMethods).raw_method();
+  ServerContextForTest<FakeService, 16> context(method);
+
+  method.Invoke(context.get(), context.packet({}));
+
+  constexpr auto data =
+      bytes::Array<0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16>();
+  EXPECT_EQ(last_writer.Write(data), Status::OutOfRange());
+}
+
+TEST(RawServerWriter,
+     Destructor_ReleasesAcquiredBufferWithoutSendingAndCloses) {
+  const RawMethod& method = std::get<1>(FakeService::kMethods).raw_method();
+  ServerContextForTest<FakeService> context(method);
+
+  method.Invoke(context.get(), context.packet({}));
+
+  {
+    RawServerWriter writer = std::move(last_writer);
+    auto buffer = writer.PayloadBuffer();
+    buffer[0] = std::byte{'!'};
+    // Don't release the buffer.
+  }
+
+  auto output = context.output();
+  EXPECT_EQ(output.packet_count(), 1u);
+  EXPECT_EQ(output.sent_packet().type(), PacketType::SERVER_STREAM_END);
+}
+
+}  // namespace
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/raw/raw_method_union_test.cc b/pw_rpc/raw/raw_method_union_test.cc
new file mode 100644
index 0000000..599601e
--- /dev/null
+++ b/pw_rpc/raw/raw_method_union_test.cc
@@ -0,0 +1,145 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_rpc/internal/raw_method_union.h"
+
+#include <array>
+
+#include "gtest/gtest.h"
+#include "pw_bytes/array.h"
+#include "pw_protobuf/decoder.h"
+#include "pw_protobuf/encoder.h"
+#include "pw_rpc/server_context.h"
+#include "pw_rpc/service.h"
+#include "pw_rpc_private/internal_test_utils.h"
+#include "pw_rpc_test_protos/test.pwpb.h"
+
+namespace pw::rpc::internal {
+namespace {
+
+template <typename Implementation>
+class FakeGeneratedService : public Service {
+ public:
+  constexpr FakeGeneratedService(uint32_t id) : Service(id, kMethods) {}
+
+  static constexpr std::array<RawMethodUnion, 3> kMethods = {
+      GetRawMethodFor<&Implementation::DoNothing>(10u),
+      GetRawMethodFor<&Implementation::AddFive>(11u),
+      GetRawMethodFor<&Implementation::StartStream>(12u),
+  };
+};
+
+struct {
+  int64_t integer;
+  uint32_t status_code;
+} last_request;
+RawServerWriter last_writer;
+
+class FakeGeneratedServiceImpl
+    : public FakeGeneratedService<FakeGeneratedServiceImpl> {
+ public:
+  FakeGeneratedServiceImpl(uint32_t id) : FakeGeneratedService(id) {}
+
+  StatusWithSize DoNothing(ServerContext&, ConstByteSpan, ByteSpan) {
+    return StatusWithSize::Unknown();
+  }
+
+  StatusWithSize AddFive(ServerContext&,
+                         ConstByteSpan request,
+                         ByteSpan response) {
+    DecodeRawTestRequest(request);
+
+    protobuf::NestedEncoder encoder(response);
+    test::TestResponse::Encoder test_response(&encoder);
+    test_response.WriteValue(last_request.integer + 5);
+    ConstByteSpan payload;
+    encoder.Encode(&payload);
+
+    return StatusWithSize::Unauthenticated(payload.size());
+  }
+
+  void StartStream(ServerContext&,
+                   ConstByteSpan request,
+                   RawServerWriter& writer) {
+    DecodeRawTestRequest(request);
+    last_writer = std::move(writer);
+  }
+
+ private:
+  void DecodeRawTestRequest(ConstByteSpan request) {
+    protobuf::Decoder decoder(request);
+
+    while (decoder.Next().ok()) {
+      test::TestRequest::Fields field =
+          static_cast<test::TestRequest::Fields>(decoder.FieldNumber());
+
+      switch (field) {
+        case test::TestRequest::Fields::INTEGER:
+          decoder.ReadInt64(&last_request.integer);
+          break;
+        case test::TestRequest::Fields::STATUS_CODE:
+          decoder.ReadUint32(&last_request.status_code);
+          break;
+      }
+    }
+  }
+};
+
+TEST(RawMethodUnion, InvokesUnary) {
+  std::byte buffer[16];
+  protobuf::NestedEncoder encoder(buffer);
+  test::TestRequest::Encoder test_request(&encoder);
+  test_request.WriteInteger(456);
+  test_request.WriteStatusCode(7);
+
+  const Method& method =
+      std::get<1>(FakeGeneratedServiceImpl::kMethods).method();
+  ServerContextForTest<FakeGeneratedServiceImpl> context(method);
+  method.Invoke(context.get(), context.packet(encoder.Encode().value()));
+
+  EXPECT_EQ(last_request.integer, 456);
+  EXPECT_EQ(last_request.status_code, 7u);
+
+  const Packet& response = context.output().sent_packet();
+  EXPECT_EQ(response.status(), Status::Unauthenticated());
+
+  protobuf::Decoder decoder(response.payload());
+  ASSERT_TRUE(decoder.Next().ok());
+  int64_t value;
+  EXPECT_EQ(decoder.ReadInt64(&value), Status::Ok());
+  EXPECT_EQ(value, 461);
+}
+
+TEST(RawMethodUnion, InvokesServerStreaming) {
+  std::byte buffer[16];
+  protobuf::NestedEncoder encoder(buffer);
+  test::TestRequest::Encoder test_request(&encoder);
+  test_request.WriteInteger(777);
+  test_request.WriteStatusCode(2);
+
+  const Method& method =
+      std::get<2>(FakeGeneratedServiceImpl::kMethods).method();
+  ServerContextForTest<FakeGeneratedServiceImpl> context(method);
+
+  method.Invoke(context.get(), context.packet(encoder.Encode().value()));
+
+  EXPECT_EQ(0u, context.output().packet_count());
+  EXPECT_EQ(777, last_request.integer);
+  EXPECT_EQ(2u, last_request.status_code);
+  EXPECT_TRUE(last_writer.open());
+  last_writer.Finish();
+}
+
+}  // namespace
+}  // namespace pw::rpc::internal
diff --git a/pw_rpc/server.cc b/pw_rpc/server.cc
index 891dd2a..845a64f 100644
--- a/pw_rpc/server.cc
+++ b/pw_rpc/server.cc
@@ -17,7 +17,6 @@
 #include <algorithm>
 
 #include "pw_log/log.h"
-#include "pw_rpc/internal/method.h"
 #include "pw_rpc/internal/packet.h"
 #include "pw_rpc/internal/server.h"
 #include "pw_rpc/server_context.h"
@@ -33,11 +32,14 @@
 bool DecodePacket(ChannelOutput& interface,
                   std::span<const byte> data,
                   Packet& packet) {
-  if (Status status = Packet::FromBuffer(data, packet); !status.ok()) {
+  Result<Packet> result = Packet::FromBuffer(data);
+  if (!result.ok()) {
     PW_LOG_WARN("Failed to decode packet on interface %s", interface.name());
     return false;
   }
 
+  packet = result.value();
+
   // If the packet is malformed, don't try to process it.
   if (packet.channel_id() == Channel::kUnassignedChannelId ||
       packet.service_id() == 0 || packet.method_id() == 0) {
@@ -46,7 +48,7 @@
     // Only send an ERROR response if a valid channel ID was provided.
     if (packet.channel_id() != Channel::kUnassignedChannelId) {
       internal::Channel temp_channel(packet.channel_id(), &interface);
-      temp_channel.Send(Packet::Error(packet, Status::DATA_LOSS));
+      temp_channel.Send(Packet::ServerError(packet, Status::DataLoss()));
     }
     return false;
   }
@@ -64,12 +66,15 @@
   }
 }
 
-void Server::ProcessPacket(std::span<const byte> data,
-                           ChannelOutput& interface) {
-  // TODO(hepler): Update the packet parsing code to report when decoding fails.
+Status Server::ProcessPacket(std::span<const byte> data,
+                             ChannelOutput& interface) {
   Packet packet;
   if (!DecodePacket(interface, data, packet)) {
-    return;
+    return Status::DataLoss();
+  }
+
+  if (packet.destination() != Packet::kServer) {
+    return Status::InvalidArgument();
   }
 
   internal::Channel* channel = FindChannel(packet.channel_id());
@@ -79,46 +84,55 @@
     if (channel == nullptr) {
       // If a channel can't be assigned, send a RESOURCE_EXHAUSTED error.
       internal::Channel temp_channel(packet.channel_id(), &interface);
-      temp_channel.Send(Packet::Error(packet, Status::RESOURCE_EXHAUSTED));
-      return;
+      temp_channel.Send(
+          Packet::ServerError(packet, Status::ResourceExhausted()));
+      return Status::Ok();  // OK since the packet was handled
     }
   }
 
+  const auto [service, method] = FindMethod(packet);
+
+  if (method == nullptr) {
+    channel->Send(Packet::ServerError(packet, Status::NotFound()));
+    return Status::Ok();
+  }
+
+  switch (packet.type()) {
+    case PacketType::REQUEST: {
+      internal::ServerCall call(
+          static_cast<internal::Server&>(*this), *channel, *service, *method);
+      method->Invoke(call, packet);
+      break;
+    }
+    case PacketType::CLIENT_STREAM_END:
+      // TODO(hepler): Support client streaming RPCs.
+      break;
+    case PacketType::CLIENT_ERROR:
+      HandleClientError(packet);
+      break;
+    case PacketType::CANCEL_SERVER_STREAM:
+      HandleCancelPacket(packet, *channel);
+      break;
+    default:
+      channel->Send(Packet::ServerError(packet, Status::Unimplemented()));
+      PW_LOG_WARN("Unable to handle packet of type %u",
+                  unsigned(packet.type()));
+  }
+  return Status::Ok();
+}
+
+std::tuple<Service*, const internal::Method*> Server::FindMethod(
+    const internal::Packet& packet) {
   // Packets always include service and method IDs.
   auto service = std::find_if(services_.begin(), services_.end(), [&](auto& s) {
     return s.id() == packet.service_id();
   });
 
   if (service == services_.end()) {
-    channel->Send(Packet::Error(packet, Status::NOT_FOUND));
-    return;
+    return {};
   }
 
-  const internal::Method* method = service->FindMethod(packet.method_id());
-
-  if (method == nullptr) {
-    channel->Send(Packet::Error(packet, Status::NOT_FOUND));
-    return;
-  }
-
-  switch (packet.type()) {
-    case PacketType::RPC: {
-      internal::ServerCall call(
-          static_cast<internal::Server&>(*this), *channel, *service, *method);
-      method->Invoke(call, packet);
-      return;
-    }
-    case PacketType::STREAM_END:
-      // TODO(hepler): Support client streaming RPCs.
-      break;
-    case PacketType::CANCEL:
-      HandleCancelPacket(packet, *channel);
-      return;
-    case PacketType::ERROR:
-      break;
-  }
-  channel->Send(Packet::Error(packet, Status::UNIMPLEMENTED));
-  PW_LOG_WARN("Unable to handle packet of type %u", unsigned(packet.type()));
+  return {&(*service), service->FindMethod(packet.method_id())};
 }
 
 void Server::HandleCancelPacket(const Packet& packet,
@@ -130,10 +144,25 @@
   });
 
   if (writer == writers_.end()) {
-    channel.Send(Packet::Error(packet, Status::FAILED_PRECONDITION));
+    channel.Send(Packet::ServerError(packet, Status::FailedPrecondition()));
     PW_LOG_WARN("Received CANCEL packet for method that is not pending");
   } else {
-    writer->Finish(Status::CANCELLED);
+    writer->Finish(Status::Cancelled());
+  }
+}
+
+void Server::HandleClientError(const Packet& packet) {
+  // A client error indicates that the client received a packet that it did not
+  // expect. If the packet belongs to a streaming RPC, cancel the stream without
+  // sending a final SERVER_STREAM_END packet.
+  auto writer = std::find_if(writers_.begin(), writers_.end(), [&](auto& w) {
+    return w.channel_id() == packet.channel_id() &&
+           w.service_id() == packet.service_id() &&
+           w.method_id() == packet.method_id();
+  });
+
+  if (writer != writers_.end()) {
+    writer->Close();
   }
 }
 
@@ -157,8 +186,4 @@
   return channel;
 }
 
-static_assert(std::is_base_of<internal::BaseMethod, internal::Method>(),
-              "The Method implementation must be derived from "
-              "pw::rpc::internal::BaseMethod");
-
 }  // namespace pw::rpc
diff --git a/pw_rpc/server_test.cc b/pw_rpc/server_test.cc
index e2042ac..e109edf 100644
--- a/pw_rpc/server_test.cc
+++ b/pw_rpc/server_test.cc
@@ -19,7 +19,9 @@
 
 #include "gtest/gtest.h"
 #include "pw_assert/assert.h"
+#include "pw_rpc/internal/method.h"
 #include "pw_rpc/internal/packet.h"
+#include "pw_rpc/internal/test_method.h"
 #include "pw_rpc/service.h"
 #include "pw_rpc_private/internal_test_utils.h"
 
@@ -28,23 +30,24 @@
 
 using std::byte;
 
-using internal::Method;
 using internal::Packet;
 using internal::PacketType;
+using internal::TestMethod;
+using internal::TestMethodUnion;
 
 class TestService : public Service {
  public:
   TestService(uint32_t service_id)
       : Service(service_id, methods_),
         methods_{
-            Method(100),
-            Method(200),
+            TestMethod(100),
+            TestMethod(200),
         } {}
 
-  Method& method(uint32_t id) {
-    for (Method& method : methods_) {
-      if (method.id() == id) {
-        return method;
+  const TestMethod& method(uint32_t id) {
+    for (TestMethodUnion& method : methods_) {
+      if (method.method().id() == id) {
+        return method.test_method();
       }
     }
 
@@ -52,7 +55,7 @@
   }
 
  private:
-  std::array<Method, 2> methods_;
+  std::array<TestMethodUnion, 2> methods_;
 };
 
 class BasicServer : public ::testing::Test {
@@ -77,10 +80,10 @@
       uint32_t service_id,
       uint32_t method_id,
       std::span<const byte> payload = kDefaultPayload) {
-    auto sws = Packet(type, channel_id, service_id, method_id, payload)
-                   .Encode(request_buffer_);
-    EXPECT_EQ(Status::OK, sws.status());
-    return std::span(request_buffer_, sws.size());
+    auto result = Packet(type, channel_id, service_id, method_id, payload)
+                      .Encode(request_buffer_);
+    EXPECT_EQ(Status::Ok(), result.status());
+    return result.value_or(ConstByteSpan());
   }
 
   TestOutput<128> output_;
@@ -93,9 +96,11 @@
 };
 
 TEST_F(BasicServer, ProcessPacket_ValidMethod_InvokesMethod) {
-  server_.ProcessPacket(EncodeRequest(PacketType::RPC, 1, 42, 100), output_);
+  EXPECT_EQ(Status::Ok(),
+            server_.ProcessPacket(
+                EncodeRequest(PacketType::REQUEST, 1, 42, 100), output_));
 
-  const Method& method = service_.method(100);
+  const TestMethod& method = service_.method(100);
   EXPECT_EQ(1u, method.last_channel_id());
   ASSERT_EQ(sizeof(kDefaultPayload), method.last_request().payload().size());
   EXPECT_EQ(std::memcmp(kDefaultPayload,
@@ -105,68 +110,87 @@
 }
 
 TEST_F(BasicServer, ProcessPacket_IncompletePacket_NothingIsInvoked) {
-  server_.ProcessPacket(EncodeRequest(PacketType::RPC, 0, 42, 101), output_);
-  server_.ProcessPacket(EncodeRequest(PacketType::RPC, 1, 0, 101), output_);
-  server_.ProcessPacket(EncodeRequest(PacketType::RPC, 1, 42, 0), output_);
+  EXPECT_EQ(Status::DataLoss(),
+            server_.ProcessPacket(
+                EncodeRequest(PacketType::REQUEST, 0, 42, 101), output_));
+  EXPECT_EQ(Status::DataLoss(),
+            server_.ProcessPacket(EncodeRequest(PacketType::REQUEST, 1, 0, 101),
+                                  output_));
+  EXPECT_EQ(Status::DataLoss(),
+            server_.ProcessPacket(EncodeRequest(PacketType::REQUEST, 1, 42, 0),
+                                  output_));
 
   EXPECT_EQ(0u, service_.method(100).last_channel_id());
   EXPECT_EQ(0u, service_.method(200).last_channel_id());
 }
 
 TEST_F(BasicServer, ProcessPacket_NoChannel_SendsNothing) {
-  server_.ProcessPacket(EncodeRequest(PacketType::RPC, 0, 42, 101), output_);
+  EXPECT_EQ(Status::DataLoss(),
+            server_.ProcessPacket(
+                EncodeRequest(PacketType::REQUEST, 0, 42, 101), output_));
 
   EXPECT_EQ(output_.packet_count(), 0u);
 }
 
 TEST_F(BasicServer, ProcessPacket_NoService_SendsDataLoss) {
-  server_.ProcessPacket(EncodeRequest(PacketType::RPC, 1, 0, 101), output_);
+  EXPECT_EQ(Status::DataLoss(),
+            server_.ProcessPacket(EncodeRequest(PacketType::REQUEST, 1, 0, 101),
+                                  output_));
 
-  EXPECT_EQ(output_.sent_packet().type(), PacketType::ERROR);
-  EXPECT_EQ(output_.sent_packet().status(), Status::DATA_LOSS);
+  EXPECT_EQ(output_.sent_packet().type(), PacketType::SERVER_ERROR);
+  EXPECT_EQ(output_.sent_packet().status(), Status::DataLoss());
 }
 
 TEST_F(BasicServer, ProcessPacket_NoMethod_SendsDataLoss) {
-  server_.ProcessPacket(EncodeRequest(PacketType::RPC, 1, 42, 0), output_);
+  EXPECT_EQ(Status::DataLoss(),
+            server_.ProcessPacket(EncodeRequest(PacketType::REQUEST, 1, 42, 0),
+                                  output_));
 
-  EXPECT_EQ(output_.sent_packet().type(), PacketType::ERROR);
-  EXPECT_EQ(output_.sent_packet().status(), Status::DATA_LOSS);
+  EXPECT_EQ(output_.sent_packet().type(), PacketType::SERVER_ERROR);
+  EXPECT_EQ(output_.sent_packet().status(), Status::DataLoss());
 }
 
 TEST_F(BasicServer, ProcessPacket_InvalidMethod_NothingIsInvoked) {
-  server_.ProcessPacket(EncodeRequest(PacketType::RPC, 1, 42, 101), output_);
+  EXPECT_EQ(Status::Ok(),
+            server_.ProcessPacket(
+                EncodeRequest(PacketType::REQUEST, 1, 42, 101), output_));
 
   EXPECT_EQ(0u, service_.method(100).last_channel_id());
   EXPECT_EQ(0u, service_.method(200).last_channel_id());
 }
 
 TEST_F(BasicServer, ProcessPacket_InvalidMethod_SendsError) {
-  server_.ProcessPacket(EncodeRequest(PacketType::RPC, 1, 42, 27), output_);
+  EXPECT_EQ(Status::Ok(),
+            server_.ProcessPacket(EncodeRequest(PacketType::REQUEST, 1, 42, 27),
+                                  output_));
 
   const Packet& packet = output_.sent_packet();
-  EXPECT_EQ(packet.type(), PacketType::ERROR);
+  EXPECT_EQ(packet.type(), PacketType::SERVER_ERROR);
   EXPECT_EQ(packet.channel_id(), 1u);
   EXPECT_EQ(packet.service_id(), 42u);
   EXPECT_EQ(packet.method_id(), 27u);  // No method ID 27
-  EXPECT_EQ(packet.status(), Status::NOT_FOUND);
+  EXPECT_EQ(packet.status(), Status::NotFound());
 }
 
 TEST_F(BasicServer, ProcessPacket_InvalidService_SendsError) {
-  server_.ProcessPacket(EncodeRequest(PacketType::RPC, 1, 43, 27), output_);
+  EXPECT_EQ(Status::Ok(),
+            server_.ProcessPacket(EncodeRequest(PacketType::REQUEST, 1, 43, 27),
+                                  output_));
 
   const Packet& packet = output_.sent_packet();
-  EXPECT_EQ(packet.type(), PacketType::ERROR);
+  EXPECT_EQ(packet.type(), PacketType::SERVER_ERROR);
   EXPECT_EQ(packet.channel_id(), 1u);
   EXPECT_EQ(packet.service_id(), 43u);  // No service ID 43
   EXPECT_EQ(packet.method_id(), 27u);
-  EXPECT_EQ(packet.status(), Status::NOT_FOUND);
+  EXPECT_EQ(packet.status(), Status::NotFound());
 }
 
 TEST_F(BasicServer, ProcessPacket_UnassignedChannel_AssignsToAvailableSlot) {
   TestOutput<128> unassigned_output;
-  server_.ProcessPacket(
-      EncodeRequest(PacketType::RPC, /*channel_id=*/99, 42, 100),
-      unassigned_output);
+  EXPECT_EQ(Status::Ok(),
+            server_.ProcessPacket(
+                EncodeRequest(PacketType::REQUEST, /*channel_id=*/99, 42, 100),
+                unassigned_output));
   EXPECT_EQ(channels_[2].id(), 99u);
 }
 
@@ -174,11 +198,13 @@
        ProcessPacket_UnassignedChannel_SendsResourceExhaustedIfCannotAssign) {
   channels_[2] = Channel::Create<3>(&output_);  // Occupy only available channel
 
-  server_.ProcessPacket(
-      EncodeRequest(PacketType::RPC, /*channel_id=*/99, 42, 27), output_);
+  EXPECT_EQ(Status::Ok(),
+            server_.ProcessPacket(
+                EncodeRequest(PacketType::REQUEST, /*channel_id=*/99, 42, 27),
+                output_));
 
   const Packet& packet = output_.sent_packet();
-  EXPECT_EQ(packet.status(), Status::RESOURCE_EXHAUSTED);
+  EXPECT_EQ(packet.status(), Status::ResourceExhausted());
   EXPECT_EQ(packet.channel_id(), 99u);
   EXPECT_EQ(packet.service_id(), 42u);
   EXPECT_EQ(packet.method_id(), 27u);
@@ -186,14 +212,17 @@
 
 TEST_F(BasicServer, ProcessPacket_Cancel_MethodNotActive_SendsError) {
   // Set up a fake ServerWriter representing an ongoing RPC.
-  server_.ProcessPacket(EncodeRequest(PacketType::CANCEL, 1, 42, 100), output_);
+  EXPECT_EQ(Status::Ok(),
+            server_.ProcessPacket(
+                EncodeRequest(PacketType::CANCEL_SERVER_STREAM, 1, 42, 100),
+                output_));
 
   const Packet& packet = output_.sent_packet();
-  EXPECT_EQ(packet.type(), PacketType::ERROR);
+  EXPECT_EQ(packet.type(), PacketType::SERVER_ERROR);
   EXPECT_EQ(packet.channel_id(), 1u);
   EXPECT_EQ(packet.service_id(), 42u);
   EXPECT_EQ(packet.method_id(), 100u);
-  EXPECT_EQ(packet.status(), Status::FAILED_PRECONDITION);
+  EXPECT_EQ(packet.status(), Status::FailedPrecondition());
 }
 
 class MethodPending : public BasicServer {
@@ -212,45 +241,70 @@
 };
 
 TEST_F(MethodPending, ProcessPacket_Cancel_ClosesServerWriter) {
-  server_.ProcessPacket(EncodeRequest(PacketType::CANCEL, 1, 42, 100), output_);
+  EXPECT_EQ(Status::Ok(),
+            server_.ProcessPacket(
+                EncodeRequest(PacketType::CANCEL_SERVER_STREAM, 1, 42, 100),
+                output_));
 
   EXPECT_FALSE(writer_.open());
 }
 
 TEST_F(MethodPending, ProcessPacket_Cancel_SendsStreamEndPacket) {
-  server_.ProcessPacket(EncodeRequest(PacketType::CANCEL, 1, 42, 100), output_);
+  EXPECT_EQ(Status::Ok(),
+            server_.ProcessPacket(
+                EncodeRequest(PacketType::CANCEL_SERVER_STREAM, 1, 42, 100),
+                output_));
 
   const Packet& packet = output_.sent_packet();
-  EXPECT_EQ(packet.type(), PacketType::STREAM_END);
+  EXPECT_EQ(packet.type(), PacketType::SERVER_STREAM_END);
   EXPECT_EQ(packet.channel_id(), 1u);
   EXPECT_EQ(packet.service_id(), 42u);
   EXPECT_EQ(packet.method_id(), 100u);
   EXPECT_TRUE(packet.payload().empty());
-  EXPECT_EQ(packet.status(), Status::CANCELLED);
+  EXPECT_EQ(packet.status(), Status::Cancelled());
+}
+
+TEST_F(MethodPending,
+       ProcessPacket_ClientError_ClosesServerWriterWithoutStreamEnd) {
+  EXPECT_EQ(Status::OK,
+            server_.ProcessPacket(
+                EncodeRequest(PacketType::CLIENT_ERROR, 1, 42, 100), output_));
+
+  EXPECT_FALSE(writer_.open());
+  EXPECT_EQ(output_.packet_count(), 0u);
 }
 
 TEST_F(MethodPending, ProcessPacket_Cancel_IncorrectChannel) {
-  server_.ProcessPacket(EncodeRequest(PacketType::CANCEL, 2, 42, 100), output_);
+  EXPECT_EQ(Status::Ok(),
+            server_.ProcessPacket(
+                EncodeRequest(PacketType::CANCEL_SERVER_STREAM, 2, 42, 100),
+                output_));
 
-  EXPECT_EQ(output_.sent_packet().type(), PacketType::ERROR);
-  EXPECT_EQ(output_.sent_packet().status(), Status::FAILED_PRECONDITION);
+  EXPECT_EQ(output_.sent_packet().type(), PacketType::SERVER_ERROR);
+  EXPECT_EQ(output_.sent_packet().status(), Status::FailedPrecondition());
   EXPECT_TRUE(writer_.open());
 }
 
 TEST_F(MethodPending, ProcessPacket_Cancel_IncorrectService) {
-  server_.ProcessPacket(EncodeRequest(PacketType::CANCEL, 1, 43, 100), output_);
+  EXPECT_EQ(Status::Ok(),
+            server_.ProcessPacket(
+                EncodeRequest(PacketType::CANCEL_SERVER_STREAM, 1, 43, 100),
+                output_));
 
-  EXPECT_EQ(output_.sent_packet().type(), PacketType::ERROR);
-  EXPECT_EQ(output_.sent_packet().status(), Status::NOT_FOUND);
+  EXPECT_EQ(output_.sent_packet().type(), PacketType::SERVER_ERROR);
+  EXPECT_EQ(output_.sent_packet().status(), Status::NotFound());
   EXPECT_EQ(output_.sent_packet().service_id(), 43u);
   EXPECT_EQ(output_.sent_packet().method_id(), 100u);
   EXPECT_TRUE(writer_.open());
 }
 
 TEST_F(MethodPending, ProcessPacket_CancelIncorrectMethod) {
-  server_.ProcessPacket(EncodeRequest(PacketType::CANCEL, 1, 42, 101), output_);
-  EXPECT_EQ(output_.sent_packet().type(), PacketType::ERROR);
-  EXPECT_EQ(output_.sent_packet().status(), Status::NOT_FOUND);
+  EXPECT_EQ(Status::Ok(),
+            server_.ProcessPacket(
+                EncodeRequest(PacketType::CANCEL_SERVER_STREAM, 1, 42, 101),
+                output_));
+  EXPECT_EQ(output_.sent_packet().type(), PacketType::SERVER_ERROR);
+  EXPECT_EQ(output_.sent_packet().status(), Status::NotFound());
   EXPECT_TRUE(writer_.open());
 }
 
diff --git a/pw_rpc/service.cc b/pw_rpc/service.cc
index 50e0447..8e44e07 100644
--- a/pw_rpc/service.cc
+++ b/pw_rpc/service.cc
@@ -14,17 +14,23 @@
 
 #include "pw_rpc/service.h"
 
+#include <cstddef>
 #include <type_traits>
 
-#include "pw_rpc/internal/method.h"
-
 namespace pw::rpc {
 
 const internal::Method* Service::FindMethod(uint32_t method_id) const {
-  for (const internal::Method& method : methods_) {
-    if (method.id() == method_id) {
-      return &method;
+  const internal::MethodUnion* method_impl = methods_;
+
+  for (size_t i = 0; i < method_count_; ++i) {
+    const internal::Method* method = &method_impl->method();
+    if (method->id() == method_id) {
+      return method;
     }
+
+    const auto raw = reinterpret_cast<const std::byte*>(method_impl);
+    method_impl =
+        reinterpret_cast<const internal::MethodUnion*>(raw + method_size_);
   }
 
   return nullptr;
diff --git a/pw_rpc/service_test.cc b/pw_rpc/service_test.cc
new file mode 100644
index 0000000..959323f
--- /dev/null
+++ b/pw_rpc/service_test.cc
@@ -0,0 +1,99 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_rpc/service.h"
+
+#include "gtest/gtest.h"
+#include "pw_rpc/internal/method.h"
+
+namespace pw::rpc {
+
+class ServiceTestHelper {
+ public:
+  static const internal::Method* FindMethod(Service& service, uint32_t id) {
+    return service.FindMethod(id);
+  }
+};
+
+namespace {
+
+void InvokeIt(const internal::Method&,
+              internal::ServerCall&,
+              const internal::Packet&) {}
+
+class ServiceTestMethod : public internal::Method {
+ public:
+  constexpr ServiceTestMethod(uint32_t id, char the_value)
+      : internal::Method(id, InvokeIt), value(the_value) {}
+
+  char value;  // Add a member so the class is larger than the base Method.
+};
+
+class ServiceTestMethodUnion : public internal::MethodUnion {
+ public:
+  constexpr ServiceTestMethodUnion(ServiceTestMethod&& method)
+      : impl_({.service_test = method}) {}
+
+  constexpr const internal::Method& method() const { return impl_.method; }
+
+ private:
+  union {
+    internal::Method method;
+    ServiceTestMethod service_test;
+  } impl_;
+};
+
+class TestService : public Service {
+ public:
+  constexpr TestService() : Service(0xabcd, kMethods) {}
+
+  static constexpr std::array<ServiceTestMethodUnion, 3> kMethods = {
+      ServiceTestMethod(123, 'a'),
+      ServiceTestMethod(456, 'b'),
+      ServiceTestMethod(789, 'c'),
+  };
+};
+
+TEST(Service, MultipleMethods_FindMethod_Present) {
+  TestService service;
+  EXPECT_EQ(ServiceTestHelper::FindMethod(service, 123),
+            &TestService::kMethods[0].method());
+  EXPECT_EQ(ServiceTestHelper::FindMethod(service, 456),
+            &TestService::kMethods[1].method());
+  EXPECT_EQ(ServiceTestHelper::FindMethod(service, 789),
+            &TestService::kMethods[2].method());
+}
+
+TEST(Service, MultipleMethods_FindMethod_NotPresent) {
+  TestService service;
+  EXPECT_EQ(ServiceTestHelper::FindMethod(service, 0), nullptr);
+  EXPECT_EQ(ServiceTestHelper::FindMethod(service, 457), nullptr);
+  EXPECT_EQ(ServiceTestHelper::FindMethod(service, 999), nullptr);
+}
+
+class EmptyTestService : public Service {
+ public:
+  constexpr EmptyTestService() : Service(0xabcd, kMethods) {}
+  static constexpr std::array<ServiceTestMethodUnion, 0> kMethods = {};
+};
+
+TEST(Service, NoMethods_FindMethod_NotPresent) {
+  EmptyTestService service;
+  EXPECT_EQ(ServiceTestHelper::FindMethod(service, 123), nullptr);
+  EXPECT_EQ(ServiceTestHelper::FindMethod(service, 456), nullptr);
+  EXPECT_EQ(ServiceTestHelper::FindMethod(service, 789), nullptr);
+}
+
+}  // namespace
+}  // namespace pw::rpc
diff --git a/pw_rpc/size_report/BUILD b/pw_rpc/size_report/BUILD
new file mode 100644
index 0000000..e4e46ca
--- /dev/null
+++ b/pw_rpc/size_report/BUILD
@@ -0,0 +1,55 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_binary",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])  # Apache License 2.0
+
+pw_cc_binary(
+    name = "base",
+    srcs = ["base.cc"],
+    deps = [
+        "//pw_assert",
+        "//pw_bloat:bloat_this_binary",
+        "//pw_log",
+        "//pw_rpc:server",
+        "//pw_sys_io",
+    ],
+)
+
+pw_cc_binary(
+    name = "server_only",
+    srcs = ["server_only.cc"],
+    deps = [
+        "//pw_assert",
+        "//pw_bloat:bloat_this_binary",
+        "//pw_log",
+        "//pw_rpc:server",
+        "//pw_sys_io",
+    ],
+)
+
+# TODO(frolv): Figure out how to add third-party nanopb to Bazel.
+filegroup(
+    name = "nanopb_reports",
+    srcs = [
+        "base_with_nanopb.cc",
+        "server_with_echo_service.cc",
+    ],
+)
diff --git a/pw_rpc/size_report/BUILD.gn b/pw_rpc/size_report/BUILD.gn
new file mode 100644
index 0000000..07758af
--- /dev/null
+++ b/pw_rpc/size_report/BUILD.gn
@@ -0,0 +1,45 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+_deps = [
+  "$dir_pw_bloat:bloat_this_binary",
+  "..:server",
+  dir_pw_assert,
+  dir_pw_log,
+  dir_pw_sys_io,
+]
+
+pw_executable("base") {
+  sources = [ "base.cc" ]
+  deps = _deps
+}
+
+pw_executable("base_with_nanopb") {
+  sources = [ "base_with_nanopb.cc" ]
+  deps = _deps + [ "$dir_pw_third_party/nanopb" ]
+}
+
+pw_executable("server_only") {
+  sources = [ "server_only.cc" ]
+  deps = _deps
+}
+
+pw_executable("server_with_echo_service") {
+  sources = [ "server_with_echo_service.cc" ]
+  deps = _deps + [ "../nanopb:echo_service" ]
+}
diff --git a/pw_rpc/size_report/base.cc b/pw_rpc/size_report/base.cc
new file mode 100644
index 0000000..031afee
--- /dev/null
+++ b/pw_rpc/size_report/base.cc
@@ -0,0 +1,34 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_assert/assert.h"
+#include "pw_bloat/bloat_this_binary.h"
+#include "pw_log/log.h"
+#include "pw_sys_io/sys_io.h"
+
+int volatile* unoptimizable;
+
+int main() {
+  pw::bloat::BloatThisBinary();
+
+  // Ensure we are paying the cost for log and assert.
+  PW_CHECK_INT_GE(*unoptimizable, 0, "Ensure this CHECK logic stays");
+  PW_LOG_INFO("We care about optimizing: %d", *unoptimizable);
+
+  std::byte packet_buffer[128];
+  pw::sys_io::ReadBytes(packet_buffer);
+  pw::sys_io::WriteBytes(packet_buffer);
+
+  return static_cast<int>(packet_buffer[92]);
+}
diff --git a/pw_rpc/size_report/base_with_nanopb.cc b/pw_rpc/size_report/base_with_nanopb.cc
new file mode 100644
index 0000000..a5333db
--- /dev/null
+++ b/pw_rpc/size_report/base_with_nanopb.cc
@@ -0,0 +1,66 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pb_decode.h"
+#include "pb_encode.h"
+#include "pw_assert/assert.h"
+#include "pw_bloat/bloat_this_binary.h"
+#include "pw_log/log.h"
+#include "pw_sys_io/sys_io.h"
+
+int volatile* unoptimizable;
+
+namespace my_product {
+
+template <typename DecodeFunction>
+struct NanopbTraits;
+
+template <typename FieldsType>
+struct NanopbTraits<bool(pb_istream_t*, FieldsType, void*)> {
+  using Fields = FieldsType;
+};
+
+using Fields = typename NanopbTraits<decltype(pb_decode)>::Fields;
+
+// Performs the core nanopb encode and decode operations so that those functions
+// are included in the binary.
+void DoNanopbStuff() {
+  std::byte buffer[128];
+  void* fields = &buffer;
+
+  auto output = pb_ostream_from_buffer(reinterpret_cast<pb_byte_t*>(buffer),
+                                       sizeof(buffer));
+  pb_encode(&output, static_cast<Fields>(fields), buffer);
+
+  auto input = pb_istream_from_buffer(
+      reinterpret_cast<const pb_byte_t*>(buffer), sizeof(buffer));
+  pb_decode(&input, static_cast<Fields>(fields), buffer);
+}
+
+}  // namespace my_product
+
+int main() {
+  pw::bloat::BloatThisBinary();
+  my_product::DoNanopbStuff();
+
+  // Ensure we are paying the cost for log and assert.
+  PW_CHECK_INT_GE(*unoptimizable, 0, "Ensure this CHECK logic stays");
+  PW_LOG_INFO("We care about optimizing: %d", *unoptimizable);
+
+  std::byte packet_buffer[128];
+  pw::sys_io::ReadBytes(packet_buffer);
+  pw::sys_io::WriteBytes(packet_buffer);
+
+  return static_cast<int>(packet_buffer[92]);
+}
diff --git a/pw_rpc/size_report/server_only.cc b/pw_rpc/size_report/server_only.cc
new file mode 100644
index 0000000..a8f8ff2
--- /dev/null
+++ b/pw_rpc/size_report/server_only.cc
@@ -0,0 +1,59 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_assert/assert.h"
+#include "pw_bloat/bloat_this_binary.h"
+#include "pw_log/log.h"
+#include "pw_rpc/server.h"
+#include "pw_sys_io/sys_io.h"
+
+int volatile* unoptimizable;
+
+class Output : public pw::rpc::ChannelOutput {
+ public:
+  Output() : ChannelOutput("output") {}
+
+  std::span<std::byte> AcquireBuffer() override { return buffer_; }
+
+  pw::Status SendAndReleaseBuffer(size_t size) override {
+    return pw::sys_io::WriteBytes(std::span(buffer_, size)).status();
+  }
+
+ private:
+  std::byte buffer_[128];
+};
+
+namespace my_product {
+
+Output output;
+pw::rpc::Channel channels[] = {pw::rpc::Channel::Create<1>(&output)};
+pw::rpc::Server server(channels);
+
+}  // namespace my_product
+
+int main() {
+  pw::bloat::BloatThisBinary();
+
+  // Ensure we are paying the cost for log and assert.
+  PW_CHECK_INT_GE(*unoptimizable, 0, "Ensure this CHECK logic stays");
+  PW_LOG_INFO("We care about optimizing: %d", *unoptimizable);
+
+  std::byte packet_buffer[128];
+  pw::sys_io::ReadBytes(packet_buffer);
+  pw::sys_io::WriteBytes(packet_buffer);
+
+  my_product::server.ProcessPacket(packet_buffer, my_product::output);
+
+  return static_cast<int>(packet_buffer[92]);
+}
diff --git a/pw_rpc/size_report/server_with_echo_service.cc b/pw_rpc/size_report/server_with_echo_service.cc
new file mode 100644
index 0000000..860ee33
--- /dev/null
+++ b/pw_rpc/size_report/server_with_echo_service.cc
@@ -0,0 +1,90 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pb_decode.h"
+#include "pb_encode.h"
+#include "pw_assert/assert.h"
+#include "pw_bloat/bloat_this_binary.h"
+#include "pw_log/log.h"
+#include "pw_rpc/echo_service_nanopb.h"
+#include "pw_rpc/server.h"
+#include "pw_sys_io/sys_io.h"
+
+int volatile* unoptimizable;
+
+class Output : public pw::rpc::ChannelOutput {
+ public:
+  Output() : ChannelOutput("output") {}
+
+  std::span<std::byte> AcquireBuffer() override { return buffer_; }
+
+  pw::Status SendAndReleaseBuffer(size_t size) override {
+    return pw::sys_io::WriteBytes(std::span(buffer_, size)).status();
+  }
+
+ private:
+  std::byte buffer_[128];
+};
+
+namespace my_product {
+
+template <typename DecodeFunction>
+struct NanopbTraits;
+
+template <typename FieldsType>
+struct NanopbTraits<bool(pb_istream_t*, FieldsType, void*)> {
+  using Fields = FieldsType;
+};
+
+using Fields = typename NanopbTraits<decltype(pb_decode)>::Fields;
+
+// Performs the core nanopb encode and decode operations so that those functions
+// are included in the binary.
+void DoNanopbStuff() {
+  std::byte buffer[128];
+  void* fields = &buffer;
+
+  auto output = pb_ostream_from_buffer(reinterpret_cast<pb_byte_t*>(buffer),
+                                       sizeof(buffer));
+  pb_encode(&output, static_cast<Fields>(fields), buffer);
+
+  auto input = pb_istream_from_buffer(
+      reinterpret_cast<const pb_byte_t*>(buffer), sizeof(buffer));
+  pb_decode(&input, static_cast<Fields>(fields), buffer);
+}
+
+Output output;
+pw::rpc::Channel channels[] = {pw::rpc::Channel::Create<1>(&output)};
+pw::rpc::Server server(channels);
+pw::rpc::EchoService echo_service;
+
+}  // namespace my_product
+
+int main() {
+  pw::bloat::BloatThisBinary();
+  my_product::DoNanopbStuff();
+
+  // Ensure we are paying the cost for log and assert.
+  PW_CHECK_INT_GE(*unoptimizable, 0, "Ensure this CHECK logic stays");
+  PW_LOG_INFO("We care about optimizing: %d", *unoptimizable);
+
+  std::byte packet_buffer[128];
+  pw::sys_io::ReadBytes(packet_buffer);
+  pw::sys_io::WriteBytes(packet_buffer);
+
+  my_product::server.RegisterService(my_product::echo_service);
+  my_product::server.ProcessPacket(packet_buffer, my_product::output);
+
+  return static_cast<int>(packet_buffer[92]);
+}
diff --git a/pw_span/BUILD.gn b/pw_span/BUILD.gn
index 11fd53f..4a1f9ff 100644
--- a/pw_span/BUILD.gn
+++ b/pw_span/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [
     "public",
@@ -38,6 +38,7 @@
 
 pw_test("test") {
   deps = [ ":pw_span" ]
+  remove_configs = [ "$dir_pw_build:extra_strict_warnings" ]
   sources = [ "span_test.cc" ]
 }
 
diff --git a/pw_span/CMakeLists.txt b/pw_span/CMakeLists.txt
index 65644a1..9b22950 100644
--- a/pw_span/CMakeLists.txt
+++ b/pw_span/CMakeLists.txt
@@ -12,5 +12,7 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_auto_add_simple_module(pw_span PUBLIC_DEPS pw_polyfill)
 target_include_directories(pw_span PUBLIC public_overrides)
diff --git a/pw_span/docs.rst b/pw_span/docs.rst
index 64dad43..2987ce4 100644
--- a/pw_span/docs.rst
+++ b/pw_span/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-span:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_span:
 
 -------
 pw_span
diff --git a/pw_span/public/pw_span/internal/span.h b/pw_span/public/pw_span/internal/span.h
index a39859b..d6b6a41 100644
--- a/pw_span/public/pw_span/internal/span.h
+++ b/pw_span/public/pw_span/internal/span.h
@@ -35,6 +35,9 @@
 //
 #pragma once
 
+#ifndef __cpp_lib_span
+#define __cpp_lib_span 202002L
+
 #include <algorithm>
 #include <array>
 #include <cstddef>
@@ -43,7 +46,7 @@
 #include <type_traits>
 #include <utility>
 
-#include "pw_polyfill/language_features.h"
+#include "pw_polyfill/language_feature_macros.h"
 #include "pw_polyfill/standard_library/namespace.h"
 
 // Pigweed: Disable the asserts from Chromium for now.
@@ -470,3 +473,4 @@
 _PW_POLYFILL_END_NAMESPACE_STD
 
 #undef _PW_SPAN_ASSERT
+#endif  // __cpp_lib_span
diff --git a/pw_span/public_overrides/span b/pw_span/public_overrides/span
index 7991e33..1ac4288 100644
--- a/pw_span/public_overrides/span
+++ b/pw_span/public_overrides/span
@@ -13,16 +13,8 @@
 // the License.
 #pragma once
 
-#if __has_include(<version>)
-#include <version>
-#endif  // __has_include(<version>)
-
-#ifdef __cpp_lib_span  // C++ library feature test macro, provided by <version>.
-
+#if __has_include_next(<span>)
 #include_next <span>
-
-#else
+#endif  // __has_include_next(<span>)
 
 #include "pw_span/internal/span.h"
-
-#endif  // __cpp_lib_span
diff --git a/pw_status/BUILD b/pw_status/BUILD
index fb5a645..6d28e38 100644
--- a/pw_status/BUILD
+++ b/pw_status/BUILD
@@ -28,6 +28,7 @@
     hdrs = [
         "public/pw_status/status.h",
         "public/pw_status/status_with_size.h",
+        "public/pw_status/try.h",
     ],
     includes = ["public"],
 )
@@ -52,3 +53,12 @@
         "//pw_unit_test",
     ],
 )
+
+pw_cc_test(
+    name = "try_test",
+    srcs = ["try_test.cc"],
+    deps = [
+        ":pw_status",
+        "//pw_unit_test",
+    ],
+)
diff --git a/pw_status/BUILD.gn b/pw_status/BUILD.gn
index 0522ad2..c1d7086 100644
--- a/pw_status/BUILD.gn
+++ b/pw_status/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -27,14 +27,16 @@
   public = [
     "public/pw_status/status.h",
     "public/pw_status/status_with_size.h",
+    "public/pw_status/try.h",
   ]
-  sources = [ "status.cc" ] + public
+  sources = [ "status.cc" ]
 }
 
 pw_test_group("tests") {
   tests = [
     ":status_test",
     ":status_with_size_test",
+    ":try_test",
   ]
 }
 
@@ -51,6 +53,11 @@
   sources = [ "status_with_size_test.cc" ]
 }
 
+pw_test("try_test") {
+  deps = [ ":pw_status" ]
+  sources = [ "try_test.cc" ]
+}
+
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
 }
diff --git a/pw_status/CMakeLists.txt b/pw_status/CMakeLists.txt
index 132c41e..3c5a0f0 100644
--- a/pw_status/CMakeLists.txt
+++ b/pw_status/CMakeLists.txt
@@ -12,4 +12,6 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_auto_add_simple_module(pw_status)
diff --git a/pw_status/docs.rst b/pw_status/docs.rst
index faee13b..a2de75f 100644
--- a/pw_status/docs.rst
+++ b/pw_status/docs.rst
@@ -1,20 +1,250 @@
-.. _chapter-pw-status:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_status:
 
 ---------
 pw_status
 ---------
+``pw_status`` provides features for communicating the result of an operation.
+The classes in ``pw_status`` are used extensively throughout Pigweed.
+
+pw::Status
+==========
+The primary feature of ``pw_status`` is the ``pw::Status`` class.
 ``pw::Status`` (``pw_status/status.h``) is a simple, zero-overhead status
-object. It uses Google's standard status codes, which are also used by projects
-such as `gRPC <https://github.com/grpc/grpc/blob/master/doc/statuscodes.md>`_.
+object that wraps a status code.
 
+``pw::Status`` uses Google's standard status codes (see the `Google APIs
+repository <https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto>`_).
+These codes are used extensively in Google projects including `Abseil
+<https://abseil.io>`_ (`status/status.h
+<https://cs.opensource.google/abseil/abseil-cpp/+/master:absl/status/status.h>`_
+) and `gRPC <https://grpc.io>`_ (`doc/statuscodes.md
+<https://github.com/grpc/grpc/blob/master/doc/statuscodes.md>`_).
+
+A ``Status`` is created with a ``static constexpr`` member function
+corresponding to the code.
+
+.. code-block:: cpp
+
+  // Ok (gRPC code "OK") does not indicate an error; this value is returned on
+  // success. It is typical to check for this value before proceeding on any
+  // given call across an API or RPC boundary. To check this value, use the
+  // `Status::ok()` member function rather than inspecting the raw code.
+  Status::Ok()
+
+  // Cancelled (gRPC code "CANCELLED") indicates the operation was cancelled,
+  // typically by the caller.
+  Status::Cancelled()
+
+  // Unknown (gRPC code "UNKNOWN") indicates an unknown error occurred. In
+  // general, more specific errors should be raised, if possible. Errors raised
+  // by APIs that do not return enough error information may be converted to
+  // this error.
+  Status::Unknown()
+
+  // InvalidArgument (gRPC code "INVALID_ARGUMENT") indicates the caller
+  // specified an invalid argument, such a malformed filename. Note that such
+  // errors should be narrowly limited to indicate to the invalid nature of the
+  // arguments themselves. Errors with validly formed arguments that may cause
+  // errors with the state of the receiving system should be denoted with
+  // `FailedPrecondition` instead.
+  Status::InvalidArgument()
+
+  // DeadlineExceeded (gRPC code "DEADLINE_EXCEEDED") indicates a deadline
+  // expired before the operation could complete. For operations that may change
+  // state within a system, this error may be returned even if the operation has
+  // completed successfully. For example, a successful response from a server
+  // could have been delayed long enough for the deadline to expire.
+  Status::DeadlineExceeded()
+
+  // NotFound (gRPC code "NOT_FOUND") indicates some requested entity (such as
+  // a file or directory) was not found.
+  //
+  // `NotFound` is useful if a request should be denied for an entire class of
+  // users, such as during a gradual feature rollout or undocumented allow list.
+  // If, instead, a request should be denied for specific sets of users, such as
+  // through user-based access control, use `PermissionDenied` instead.
+  Status::NotFound()
+
+  // AlreadyExists (gRPC code "ALREADY_EXISTS") indicates the entity that a
+  // caller attempted to create (such as file or directory) is already present.
+  Status::AlreadyExists()
+
+  // PermissionDenied (gRPC code "PERMISSION_DENIED") indicates that the caller
+  // does not have permission to execute the specified operation. Note that this
+  // error is different than an error due to an *un*authenticated user. This
+  // error code does not imply the request is valid or the requested entity
+  // exists or satisfies any other pre-conditions.
+  //
+  // `PermissionDenied` must not be used for rejections caused by exhausting
+  // some resource. Instead, use `ResourceExhausted` for those errors.
+  // `PermissionDenied` must not be used if the caller cannot be identified.
+  // Instead, use `Unauthenticated` for those errors.
+  Status::PermissionDenied()
+
+  // ResourceExhausted (gRPC code "RESOURCE_EXHAUSTED") indicates some resource
+  // has been exhausted, perhaps a per-user quota, or perhaps the entire file
+  // system is out of space.
+  Status::ResourceExhausted()
+
+  // FailedPrecondition (gRPC code "FAILED_PRECONDITION") indicates that the
+  // operation was rejected because the system is not in a state required for
+  // the operation's execution. For example, a directory to be deleted may be
+  // non-empty, an "rmdir" operation is applied to a non-directory, etc.
+  //
+  // Some guidelines that may help a service implementer in deciding between
+  // `FailedPrecondition`, `Aborted`, and `Unavailable`:
+  //
+  //  (a) Use `Unavailable` if the client can retry just the failing call.
+  //  (b) Use `Aborted` if the client should retry at a higher transaction
+  //      level (such as when a client-specified test-and-set fails, indicating
+  //      the client should restart a read-modify-write sequence).
+  //  (c) Use `FailedPrecondition` if the client should not retry until
+  //      the system state has been explicitly fixed. For example, if an "rmdir"
+  //      fails because the directory is non-empty, `FailedPrecondition`
+  //      should be returned since the client should not retry unless
+  //      the files are deleted from the directory.
+  Status::FailedPrecondition()
+
+  // Aborted (gRPC code "ABORTED") indicates the operation was aborted,
+  // typically due to a concurrency issue such as a sequencer check failure or a
+  // failed transaction.
+  //
+  // See the guidelines above for deciding between `FailedPrecondition`,
+  // `Aborted`, and `Unavailable`.
+  Status::Aborted()
+
+  // OutOfRange (gRPC code "OUT_OF_RANGE") indicates the operation was
+  // attempted past the valid range, such as seeking or reading past an
+  // end-of-file.
+  //
+  // Unlike `InvalidArgument`, this error indicates a problem that may
+  // be fixed if the system state changes. For example, a 32-bit file
+  // system will generate `InvalidArgument` if asked to read at an
+  // offset that is not in the range [0,2^32-1], but it will generate
+  // `OutOfRange` if asked to read from an offset past the current
+  // file size.
+  //
+  // There is a fair bit of overlap between `FailedPrecondition` and
+  // `OutOfRange`.  We recommend using `OutOfRange` (the more specific
+  // error) when it applies so that callers who are iterating through
+  // a space can easily look for an `OutOfRange` error to detect when
+  // they are done.
+  Status::OutOfRange()
+
+  // Unimplemented (gRPC code "UNIMPLEMENTED") indicates the operation is not
+  // implemented or supported in this service. In this case, the operation
+  // should not be re-attempted.
+  Status::Unimplemented()
+
+  // Internal (gRPC code "INTERNAL") indicates an internal error has occurred
+  // and some invariants expected by the underlying system have not been
+  // satisfied. This error code is reserved for serious errors.
+  Status::Internal()
+
+  // Unavailable (gRPC code "UNAVAILABLE") indicates the service is currently
+  // unavailable and that this is most likely a transient condition. An error
+  // such as this can be corrected by retrying with a backoff scheme. Note that
+  // it is not always safe to retry non-idempotent operations.
+  //
+  // See the guidelines above for deciding between `FailedPrecondition`,
+  // `Aborted`, and `Unavailable`.
+  Status::Unavailable()
+
+  // DataLoss (gRPC code "DATA_LOSS") indicates that unrecoverable data loss or
+  // corruption has occurred. As this error is serious, proper alerting should
+  // be attached to errors such as this.
+  Status::DataLoss()
+
+  // Unauthenticated (gRPC code "UNAUTHENTICATED") indicates that the request
+  // does not have valid authentication credentials for the operation. Correct
+  // the authentication and try again.
+  Status::Unauthenticated()
+
+.. attention::
+
+  Some code may use all-caps status values such as ``Status::UNKNOWN`` instead
+  of ``Status::Unknown()``. These all-caps status codes are deprecated and will
+  be removed in the future. Do not use them; use the functions above instead.
+
+  The all-caps status aliases were deprecated because they do not comply with
+  the style guide and potentially conflict with macro definitions. For example,
+  projects might define an ``INTERNAL`` macro, which would prevent ``status.h``
+  or code that uses ``Status::INTERNAL`` from compiling.
+
+  The Python tool ``pw_status/update_style.py`` may be used to migrate code in a
+  Git repo to the new status style.
+
+C compatibility
+---------------
+``pw_status`` provides the C-compatible ``pw_Status`` enum for the status codes.
+For ease of use, ``pw::Status`` implicitly converts to and from ``pw_Status``.
+However, the ``pw_Status`` enum should never be used in C++; instead use the
+``Status`` class.
+
+The values of the ``pw_Status`` enum are all-caps and prefixed with
+``PW_STATUS_``. For example, ``PW_STATUS_DATA_LOSS`` corresponds with the C++
+``Status::DataLoss()``.
+
+StatusWithSize
+==============
 ``pw::StatusWithSize`` (``pw_status/status_with_size.h``) is a convenient,
-efficent class for reporting a status along with an unsigned integer value.
+efficient class for reporting a status along with an unsigned integer value.
+It is similar to the ``pw::Result<T>`` class, but it stores both a size and a
+status, regardless of the status value, and only supports a limited range (27
+bits).
 
-The classes in pw_status are used extensively by other Pigweed modules.
+``pw::StatusWithSize`` values may be created with functions similar to
+``pw::Status``. For example,
+
+  .. code-block:: cpp
+
+    // An OK StatusWithSize with a size of 123.
+    StatusWithSize::Ok(123)
+
+    // A NOT_FOUND StatusWithSize with a size of 0.
+    StatusWithSize::NotFound()
+
+    // A RESOURCE_EXHAUSTED StatusWithSize with a size of 10.
+    StatusWithSize::ResourceExhausted(10)
+
+PW_TRY
+======
+``PW_TRY`` (``pw_status/try.h``) is a convenient set of macros for working
+with Status and StatusWithSize objects in functions that return Status or
+StatusWithSize. The PW_TRY and PW_TRY_WITH_SIZE macros call a function and
+do an early return if the function's return status is not ok.
+
+Example:
+
+.. code-block:: cpp
+
+  Status PwTryExample() {
+    PW_TRY(FunctionThatReturnsStatus());
+    PW_TRY(FunctionThatReturnsStatusWithSize());
+
+    // Do something, only executed if both functions above return OK.
+  }
+
+  StatusWithSize PwTryWithSizeExample() {
+    PW_TRY_WITH_SIZE(FunctionThatReturnsStatus());
+    PW_TRY_WITH_SIZE(FunctionThatReturnsStatusWithSize());
+
+    // Do something, only executed if both functions above return OK.
+  }
+
+PW_TRY_ASSIGN is for working with StatusWithSize objects in in functions
+that return Status. It is similar to PW_TRY with the addition of assigning
+the size from the StatusWithSize on ok.
+
+.. code-block:: cpp
+
+  Status PwTryAssignExample() {
+    size_t size_value
+    PW_TRY_ASSIGN(size_value, FunctionThatReturnsStatusWithSize());
+
+    // Do something that uses size_value. size_value is only assigned and this
+    // following code executed if the PW_TRY_ASSIGN function above returns OK.
+  }
 
 Compatibility
 =============
diff --git a/pw_status/public/pw_status/status.h b/pw_status/public/pw_status/status.h
index fc1063d..2842797 100644
--- a/pw_status/public/pw_status/status.h
+++ b/pw_status/public/pw_status/status.h
@@ -24,131 +24,152 @@
 // Status implicitly convert to one another and can be passed cleanly between C
 // and C++ APIs.
 //
-// pw_Status uses the canonical Google error codes. The following was copied
-// from Tensorflow and prefixed with PW_STATUS_.
+// pw_Status uses the canonical Google error codes. The following enum was based
+// on Abseil's status/status.h. The values are all-caps and prefixed with
+// PW_STATUS_ instead of using C++ constant style.
 typedef enum {
-  PW_STATUS_OK = 0,  // Use Status::OK in C++
+  // Ok (gRPC code "OK") does not indicate an error; this value is returned on
+  // success. It is typical to check for this value before proceeding on any
+  // given call across an API or RPC boundary. To check this value, use the
+  // `Status::ok()` member function rather than inspecting the raw code.
+  PW_STATUS_OK = 0,  // Use Status::Ok() in C++
 
-  // The operation was cancelled (typically by the caller).
-  PW_STATUS_CANCELLED = 1,  // Use Status::CANCELLED in C++
+  // Cancelled (gRPC code "CANCELLED") indicates the operation was cancelled,
+  // typically by the caller.
+  PW_STATUS_CANCELLED = 1,  // Use Status::Cancelled() in C++
 
-  // Unknown error.  An example of where this error may be returned is
-  // if a Status value received from another address space belongs to
-  // an error-space that is not known in this address space.  Also,
-  // errors raised by APIs that do not return enough error information
-  // may be converted to this error.
-  PW_STATUS_UNKNOWN = 2,  // Use Status::UNKNOWN in C++
+  // Unknown (gRPC code "UNKNOWN") indicates an unknown error occurred. In
+  // general, more specific errors should be raised, if possible. Errors raised
+  // by APIs that do not return enough error information may be converted to
+  // this error.
+  PW_STATUS_UNKNOWN = 2,  // Use Status::Unknown() in C++
 
-  // Client specified an invalid argument.  Note that this differs
-  // from FAILED_PRECONDITION.  INVALID_ARGUMENT indicates arguments
-  // that are problematic regardless of the state of the system
-  // (e.g. a malformed file name).
-  PW_STATUS_INVALID_ARGUMENT = 3,  // Use Status::INVALID_ARGUMENT in C++
+  // InvalidArgument (gRPC code "INVALID_ARGUMENT") indicates the caller
+  // specified an invalid argument, such a malformed filename. Note that such
+  // errors should be narrowly limited to indicate to the invalid nature of the
+  // arguments themselves. Errors with validly formed arguments that may cause
+  // errors with the state of the receiving system should be denoted with
+  // `FailedPrecondition` instead.
+  PW_STATUS_INVALID_ARGUMENT = 3,  // Use Status::InvalidArgument() in C++
 
-  // Deadline expired before operation could complete.  For operations
-  // that change the state of the system, this error may be returned
-  // even if the operation has completed successfully.  For example, a
-  // successful response from a server could have been delayed long
-  // enough for the deadline to expire.
-  PW_STATUS_DEADLINE_EXCEEDED = 4,  // Use Status::DEADLINE_EXCEEDED in C++
+  // DeadlineExceeded (gRPC code "DEADLINE_EXCEEDED") indicates a deadline
+  // expired before the operation could complete. For operations that may change
+  // state within a system, this error may be returned even if the operation has
+  // completed successfully. For example, a successful response from a server
+  // could have been delayed long enough for the deadline to expire.
+  PW_STATUS_DEADLINE_EXCEEDED = 4,  // Use Status::DeadlineExceeded() in C++
 
-  // Some requested entity (e.g. file or directory) was not found.
-  // For privacy reasons, this code *may* be returned when the client
-  // does not have the access right to the entity.
-  PW_STATUS_NOT_FOUND = 5,  // Use Status::NOT_FOUND in C++
-
-  // Some entity that we attempted to create (e.g. file or directory)
-  // already exists.
-  PW_STATUS_ALREADY_EXISTS = 6,  // Use Status::ALREADY_EXISTS in C++
-
-  // The caller does not have permission to execute the specified
-  // operation.  PERMISSION_DENIED must not be used for rejections
-  // caused by exhausting some resource (use RESOURCE_EXHAUSTED
-  // instead for those errors).  PERMISSION_DENIED must not be
-  // used if the caller cannot be identified (use UNAUTHENTICATED
-  // instead for those errors).
-  PW_STATUS_PERMISSION_DENIED = 7,  // Use Status::PERMISSION_DENIED in C++
-
-  // The request does not have valid authentication credentials for the
-  // operation.
-  PW_STATUS_UNAUTHENTICATED = 16,  // Use Status::UNAUTHENTICATED in C++
-
-  // Some resource has been exhausted, perhaps a per-user quota, or
-  // perhaps the entire filesystem is out of space.
-  PW_STATUS_RESOURCE_EXHAUSTED = 8,  // Use Status::RESOURCE_EXHAUSTED in C++
-
-  // Operation was rejected because the system is not in a state
-  // required for the operation's execution.  For example, directory
-  // to be deleted may be non-empty, an rmdir operation is applied to
-  // a non-directory, etc.
+  // NotFound (gRPC code "NOT_FOUND") indicates some requested entity (such as
+  // a file or directory) was not found.
   //
-  // A litmus test that may help a service implementer in deciding
-  // between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE:
-  //  (a) Use UNAVAILABLE if the client can retry just the failing call.
-  //  (b) Use ABORTED if the client should retry at a higher-level
-  //      (e.g. restarting a read-modify-write sequence).
-  //  (c) Use FAILED_PRECONDITION if the client should not retry until
-  //      the system state has been explicitly fixed.  E.g. if an "rmdir"
-  //      fails because the directory is non-empty, FAILED_PRECONDITION
+  // `NotFound` is useful if a request should be denied for an entire class of
+  // users, such as during a gradual feature rollout or undocumented allow list.
+  // If, instead, a request should be denied for specific sets of users, such as
+  // through user-based access control, use `PermissionDenied` instead.
+  PW_STATUS_NOT_FOUND = 5,  // Use Status::NotFound() in C++
+
+  // AlreadyExists (gRPC code "ALREADY_EXISTS") indicates the entity that a
+  // caller attempted to create (such as file or directory) is already present.
+  PW_STATUS_ALREADY_EXISTS = 6,  // Use Status::AlreadyExists() in C++
+
+  // PermissionDenied (gRPC code "PERMISSION_DENIED") indicates that the caller
+  // does not have permission to execute the specified operation. Note that this
+  // error is different than an error due to an *un*authenticated user. This
+  // error code does not imply the request is valid or the requested entity
+  // exists or satisfies any other pre-conditions.
+  //
+  // `PermissionDenied` must not be used for rejections caused by exhausting
+  // some resource. Instead, use `ResourceExhausted` for those errors.
+  // `PermissionDenied` must not be used if the caller cannot be identified.
+  // Instead, use `Unauthenticated` for those errors.
+  PW_STATUS_PERMISSION_DENIED = 7,  // Use Status::PermissionDenied() in C++
+
+  // ResourceExhausted (gRPC code "RESOURCE_EXHAUSTED") indicates some resource
+  // has been exhausted, perhaps a per-user quota, or perhaps the entire file
+  // system is out of space.
+  PW_STATUS_RESOURCE_EXHAUSTED = 8,  // Use Status::ResourceExhausted() in C++
+
+  // FailedPrecondition (gRPC code "FAILED_PRECONDITION") indicates that the
+  // operation was rejected because the system is not in a state required for
+  // the operation's execution. For example, a directory to be deleted may be
+  // non-empty, an "rmdir" operation is applied to a non-directory, etc.
+  //
+  // Some guidelines that may help a service implementer in deciding between
+  // `FailedPrecondition`, `Aborted`, and `Unavailable`:
+  //
+  //  (a) Use `Unavailable` if the client can retry just the failing call.
+  //  (b) Use `Aborted` if the client should retry at a higher transaction
+  //      level (such as when a client-specified test-and-set fails, indicating
+  //      the client should restart a read-modify-write sequence).
+  //  (c) Use `FailedPrecondition` if the client should not retry until
+  //      the system state has been explicitly fixed. For example, if an "rmdir"
+  //      fails because the directory is non-empty, `FailedPrecondition`
   //      should be returned since the client should not retry unless
-  //      they have first fixed up the directory by deleting files from it.
-  //  (d) Use FAILED_PRECONDITION if the client performs conditional
-  //      REST Get/Update/Delete on a resource and the resource on the
-  //      server does not match the condition. E.g. conflicting
-  //      read-modify-write on the same resource.
-  PW_STATUS_FAILED_PRECONDITION = 9,  // Use Status::FAILED_PRECONDITION in C++
+  //      the files are deleted from the directory.
+  PW_STATUS_FAILED_PRECONDITION = 9,  // Use Status::FailedPrecondition() in C++
 
-  // The operation was aborted, typically due to a concurrency issue
-  // like sequencer check failures, transaction aborts, etc.
+  // Aborted (gRPC code "ABORTED") indicates the operation was aborted,
+  // typically due to a concurrency issue such as a sequencer check failure or a
+  // failed transaction.
   //
-  // See litmus test above for deciding between FAILED_PRECONDITION,
-  // ABORTED, and UNAVAILABLE.
-  PW_STATUS_ABORTED = 10,  // Use Status::ABORTED in C++
+  // See the guidelines above for deciding between `FailedPrecondition`,
+  // `Aborted`, and `Unavailable`.
+  PW_STATUS_ABORTED = 10,  // Use Status::Aborted() in C++
 
-  // Operation tried to iterate past the valid input range.  E.g. seeking or
-  // reading past end of file.
+  // OutOfRange (gRPC code "OUT_OF_RANGE") indicates the operation was
+  // attempted past the valid range, such as seeking or reading past an
+  // end-of-file.
   //
-  // Unlike INVALID_ARGUMENT, this error indicates a problem that may
+  // Unlike `InvalidArgument`, this error indicates a problem that may
   // be fixed if the system state changes. For example, a 32-bit file
-  // system will generate INVALID_ARGUMENT if asked to read at an
+  // system will generate `InvalidArgument` if asked to read at an
   // offset that is not in the range [0,2^32-1], but it will generate
-  // OUT_OF_RANGE if asked to read from an offset past the current
+  // `OutOfRange` if asked to read from an offset past the current
   // file size.
   //
-  // There is a fair bit of overlap between FAILED_PRECONDITION and
-  // OUT_OF_RANGE.  We recommend using OUT_OF_RANGE (the more specific
+  // There is a fair bit of overlap between `FailedPrecondition` and
+  // `OutOfRange`.  We recommend using `OutOfRange` (the more specific
   // error) when it applies so that callers who are iterating through
-  // a space can easily look for an OUT_OF_RANGE error to detect when
+  // a space can easily look for an `OutOfRange` error to detect when
   // they are done.
-  PW_STATUS_OUT_OF_RANGE = 11,  // Use Status::OUT_OF_RANGE in C++
+  PW_STATUS_OUT_OF_RANGE = 11,  // Use Status::OutOfRange() in C++
 
-  // Operation is not implemented or not supported/enabled in this service.
-  PW_STATUS_UNIMPLEMENTED = 12,  // Use Status::UNIMPLEMENTED in C++
+  // Unimplemented (gRPC code "UNIMPLEMENTED") indicates the operation is not
+  // implemented or supported in this service. In this case, the operation
+  // should not be re-attempted.
+  PW_STATUS_UNIMPLEMENTED = 12,  // Use Status::Unimplemented() in C++
 
-  // Internal errors.  Means some invariants expected by underlying
-  // system has been broken.  If you see one of these errors,
-  // something is very broken.
-  PW_STATUS_INTERNAL = 13,  // Use Status::INTERNAL in C++
+  // Internal (gRPC code "INTERNAL") indicates an internal error has occurred
+  // and some invariants expected by the underlying system have not been
+  // satisfied. This error code is reserved for serious errors.
+  PW_STATUS_INTERNAL = 13,  // Use Status::Internal() in C++
 
-  // The service is currently unavailable.  This is a most likely a
-  // transient condition and may be corrected by retrying with
-  // a backoff.
+  // Unavailable (gRPC code "UNAVAILABLE") indicates the service is currently
+  // unavailable and that this is most likely a transient condition. An error
+  // such as this can be corrected by retrying with a backoff scheme. Note that
+  // it is not always safe to retry non-idempotent operations.
   //
-  // See litmus test above for deciding between FAILED_PRECONDITION,
-  // ABORTED, and UNAVAILABLE.
-  PW_STATUS_UNAVAILABLE = 14,  // Use Status::UNAVAILABLE in C++
+  // See the guidelines above for deciding between `FailedPrecondition`,
+  // `Aborted`, and `Unavailable`.
+  PW_STATUS_UNAVAILABLE = 14,  // Use Status::Unavailable() in C++
 
-  // Unrecoverable data loss or corruption.
-  PW_STATUS_DATA_LOSS = 15,  // Use Status::DATA_LOSS in C++
+  // DataLoss (gRPC code "DATA_LOSS") indicates that unrecoverable data loss or
+  // corruption has occurred. As this error is serious, proper alerting should
+  // be attached to errors such as this.
+  PW_STATUS_DATA_LOSS = 15,  // Use Status::DataLoss() in C++
 
-  // An extra enum entry to prevent people from writing code that
-  // fails to compile when a new code is added.
+  // Unauthenticated (gRPC code "UNAUTHENTICATED") indicates that the request
+  // does not have valid authentication credentials for the operation. Correct
+  // the authentication and try again.
+  PW_STATUS_UNAUTHENTICATED = 16,  // Use Status::Unauthenticated() in C++
+
+  // NOTE: this error code entry should not be used and you should not rely on
+  // its value, which may change.
   //
-  // Nobody should ever reference this enumeration entry. In particular,
-  // if you write C++ code that switches on this enumeration, add a default:
-  // case instead of a case that mentions this enumeration entry.
-  //
-  // Nobody should rely on the value listed here. It may change in the future.
+  // The purpose of this enumerated value is to force people who handle status
+  // codes with `switch()` statements to *not* simply enumerate all possible
+  // values, but instead provide a "default:" case. Providing such a default
+  // case ensures that code will compile when new codes are added.
   PW_STATUS_DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_,
 } pw_Status;  // Use pw::Status in C++
 
@@ -159,17 +180,54 @@
 
 }  // extern "C"
 
+// This header violates the Pigweed style guide! It declares constants that use
+// macro naming style, rather than constant naming style (kConstant). This is
+// done for readability and for consistency with Google's standard status codes
+// (e.g. as in gRPC).
+//
+// The problem is that the status code names might overlap with macro
+// definitions. To workaround this, this header undefines any macros with these
+// names.
+//
+// If your project relies on a macro with one of these names (e.g. INTERNAL),
+// make sure it is included after status.h so that the macro is defined.
+//
+// TODO(pwbug/268): Remove these #undefs after removing the names that violate
+//     the style guide.
+#undef OK
+#undef CANCELLED
+#undef UNKNOWN
+#undef INVALID_ARGUMENT
+#undef DEADLINE_EXCEEDED
+#undef NOT_FOUND
+#undef ALREADY_EXISTS
+#undef PERMISSION_DENIED
+#undef UNAUTHENTICATED
+#undef RESOURCE_EXHAUSTED
+#undef FAILED_PRECONDITION
+#undef ABORTED
+#undef OUT_OF_RANGE
+#undef UNIMPLEMENTED
+#undef INTERNAL
+#undef UNAVAILABLE
+#undef DATA_LOSS
+
 namespace pw {
 
 // The Status class is a thin, zero-cost abstraction around the pw_Status enum.
-// It initializes to Status::OK by default and adds ok() and str() methods.
+// It initializes to Status::Ok() by default and adds ok() and str() methods.
 // Implicit conversions are permitted between pw_Status and pw::Status.
 class Status {
  public:
   using Code = pw_Status;
 
   // All of the pw_Status codes are available in the Status class as, e.g.
-  // pw::Status::OK or pw::Status::OUT_OF_RANGE.
+  // pw::Status::Ok() or pw::Status::OutOfRange().
+  //
+  // These aliases are DEPRECATED -- prefer using the helper functions below.
+  // For example, change Status::CANCELLED to Status::Cancelled().
+  //
+  // TODO(pwbug/268): Migrate to the helper functions and remove these aliases.
   static constexpr Code OK = PW_STATUS_OK;
   static constexpr Code CANCELLED = PW_STATUS_CANCELLED;
   static constexpr Code UNKNOWN = PW_STATUS_UNKNOWN;
@@ -188,25 +246,144 @@
   static constexpr Code UNAVAILABLE = PW_STATUS_UNAVAILABLE;
   static constexpr Code DATA_LOSS = PW_STATUS_DATA_LOSS;
 
+  // Functions that create a Status with the specified code.
+  // clang-format off
+  [[nodiscard]] static constexpr Status Ok() {
+    return PW_STATUS_OK;
+  }
+  [[nodiscard]] static constexpr Status Cancelled() {
+    return PW_STATUS_CANCELLED;
+  }
+  [[nodiscard]] static constexpr Status Unknown() {
+    return PW_STATUS_UNKNOWN;
+  }
+  [[nodiscard]] static constexpr Status InvalidArgument() {
+    return PW_STATUS_INVALID_ARGUMENT;
+  }
+  [[nodiscard]] static constexpr Status DeadlineExceeded() {
+    return PW_STATUS_DEADLINE_EXCEEDED;
+  }
+  [[nodiscard]] static constexpr Status NotFound() {
+    return PW_STATUS_NOT_FOUND;
+  }
+  [[nodiscard]] static constexpr Status AlreadyExists() {
+    return PW_STATUS_ALREADY_EXISTS;
+  }
+  [[nodiscard]] static constexpr Status PermissionDenied() {
+    return PW_STATUS_PERMISSION_DENIED;
+  }
+  [[nodiscard]] static constexpr Status ResourceExhausted() {
+    return PW_STATUS_RESOURCE_EXHAUSTED;
+  }
+  [[nodiscard]] static constexpr Status FailedPrecondition() {
+    return PW_STATUS_FAILED_PRECONDITION;
+  }
+  [[nodiscard]] static constexpr Status Aborted() {
+    return PW_STATUS_ABORTED;
+  }
+  [[nodiscard]] static constexpr Status OutOfRange() {
+    return PW_STATUS_OUT_OF_RANGE;
+  }
+  [[nodiscard]] static constexpr Status Unimplemented() {
+    return PW_STATUS_UNIMPLEMENTED;
+  }
+  [[nodiscard]] static constexpr Status Internal() {
+    return PW_STATUS_INTERNAL;
+  }
+  [[nodiscard]] static constexpr Status Unavailable() {
+    return PW_STATUS_UNAVAILABLE;
+  }
+  [[nodiscard]] static constexpr Status DataLoss() {
+    return PW_STATUS_DATA_LOSS;
+  }
+  [[nodiscard]] static constexpr Status Unauthenticated() {
+    return PW_STATUS_UNAUTHENTICATED;
+  }
+  // clang-format on
+
   // Statuses are created with a Status::Code.
-  constexpr Status(Code code = OK) : code_(code) {}
+  constexpr Status(Code code = PW_STATUS_OK) : code_(code) {}
 
   constexpr Status(const Status&) = default;
   constexpr Status& operator=(const Status&) = default;
 
-  // Status implicitly converts to a Status::Code.
-  constexpr operator Code() const { return code_; }
+  // Returns the Status::Code (pw_Status) for this Status.
+  constexpr Code code() const { return code_; }
 
-  // True if the status is Status::OK.
-  constexpr bool ok() const { return code_ == OK; }
+  // True if the status is Status::Ok().
+  [[nodiscard]] constexpr bool ok() const { return code_ == PW_STATUS_OK; }
+
+  // Functions for checking which status this is.
+  [[nodiscard]] constexpr bool IsCancelled() const {
+    return code_ == PW_STATUS_CANCELLED;
+  }
+  [[nodiscard]] constexpr bool IsUnknown() const {
+    return code_ == PW_STATUS_UNKNOWN;
+  }
+  [[nodiscard]] constexpr bool IsInvalidArgument() const {
+    return code_ == PW_STATUS_INVALID_ARGUMENT;
+  }
+  [[nodiscard]] constexpr bool IsDeadlineExceeded() const {
+    return code_ == PW_STATUS_DEADLINE_EXCEEDED;
+  }
+  [[nodiscard]] constexpr bool IsNotFound() const {
+    return code_ == PW_STATUS_NOT_FOUND;
+  }
+  [[nodiscard]] constexpr bool IsAlreadyExists() const {
+    return code_ == PW_STATUS_ALREADY_EXISTS;
+  }
+  [[nodiscard]] constexpr bool IsPermissionDenied() const {
+    return code_ == PW_STATUS_PERMISSION_DENIED;
+  }
+  [[nodiscard]] constexpr bool IsResourceExhausted() const {
+    return code_ == PW_STATUS_RESOURCE_EXHAUSTED;
+  }
+  [[nodiscard]] constexpr bool IsFailedPrecondition() const {
+    return code_ == PW_STATUS_FAILED_PRECONDITION;
+  }
+  [[nodiscard]] constexpr bool IsAborted() const {
+    return code_ == PW_STATUS_ABORTED;
+  }
+  [[nodiscard]] constexpr bool IsOutOfRange() const {
+    return code_ == PW_STATUS_OUT_OF_RANGE;
+  }
+  [[nodiscard]] constexpr bool IsUnimplemented() const {
+    return code_ == PW_STATUS_UNIMPLEMENTED;
+  }
+  [[nodiscard]] constexpr bool IsInternal() const {
+    return code_ == PW_STATUS_INTERNAL;
+  }
+  [[nodiscard]] constexpr bool IsUnavailable() const {
+    return code_ == PW_STATUS_UNAVAILABLE;
+  }
+  [[nodiscard]] constexpr bool IsDataLoss() const {
+    return code_ == PW_STATUS_DATA_LOSS;
+  }
+  [[nodiscard]] constexpr bool IsUnauthenticated() const {
+    return code_ == PW_STATUS_UNAUTHENTICATED;
+  }
 
   // Returns a null-terminated string representation of the Status.
-  const char* str() const { return pw_StatusString(code_); }
+  [[nodiscard]] const char* str() const { return pw_StatusString(code_); }
 
  private:
   Code code_;
 };
 
+constexpr bool operator==(const Status& lhs, const Status& rhs) {
+  return lhs.code() == rhs.code();
+}
+
+constexpr bool operator!=(const Status& lhs, const Status& rhs) {
+  return lhs.code() != rhs.code();
+}
+
 }  // namespace pw
 
+// Create a C++ overload of pw_StatusString so that it supports pw::Status in
+// addition to pw_Status.
+inline const char* pw_StatusString(pw::Status status) {
+  return pw_StatusString(status.code());
+}
+
 #endif  // __cplusplus
diff --git a/pw_status/public/pw_status/status_with_size.h b/pw_status/public/pw_status/status_with_size.h
index 607b8e1..751ace8 100644
--- a/pw_status/public/pw_status/status_with_size.h
+++ b/pw_status/public/pw_status/status_with_size.h
@@ -24,13 +24,14 @@
 
 namespace internal {
 
+// TODO(pwbug/268): Remove this class after migrating to the helper functions.
 template <int kStatusShift>
 class StatusWithSizeConstant {
  private:
   friend class ::pw::StatusWithSize;
 
-  explicit constexpr StatusWithSizeConstant(Status::Code value)
-      : value_(static_cast<size_t>(value) << kStatusShift) {}
+  explicit constexpr StatusWithSizeConstant(Status value)
+      : value_(static_cast<size_t>(value.code()) << kStatusShift) {}
 
   const size_t value_;
 };
@@ -74,37 +75,96 @@
  public:
   // Non-OK StatusWithSizes can be constructed from these constants, such as:
   //
-  //   StatusWithSize result = StatusWithSize::NOT_FOUND;
+  //   StatusWithSize result = StatusWithSize::NotFound();
   //
-  static constexpr Constant CANCELLED{Status::CANCELLED};
-  static constexpr Constant UNKNOWN{Status::UNKNOWN};
-  static constexpr Constant INVALID_ARGUMENT{Status::INVALID_ARGUMENT};
-  static constexpr Constant DEADLINE_EXCEEDED{Status::DEADLINE_EXCEEDED};
-  static constexpr Constant NOT_FOUND{Status::NOT_FOUND};
-  static constexpr Constant ALREADY_EXISTS{Status::ALREADY_EXISTS};
-  static constexpr Constant PERMISSION_DENIED{Status::PERMISSION_DENIED};
-  static constexpr Constant RESOURCE_EXHAUSTED{Status::RESOURCE_EXHAUSTED};
-  static constexpr Constant FAILED_PRECONDITION{Status::FAILED_PRECONDITION};
-  static constexpr Constant ABORTED{Status::ABORTED};
-  static constexpr Constant OUT_OF_RANGE{Status::OUT_OF_RANGE};
-  static constexpr Constant UNIMPLEMENTED{Status::UNIMPLEMENTED};
-  static constexpr Constant INTERNAL{Status::INTERNAL};
-  static constexpr Constant UNAVAILABLE{Status::UNAVAILABLE};
-  static constexpr Constant DATA_LOSS{Status::DATA_LOSS};
-  static constexpr Constant UNAUTHENTICATED{Status::UNAUTHENTICATED};
+  // These constants are DEPRECATED! Use the helper functions below instead. For
+  // example, change StatusWithSize::NotFound() to StatusWithSize::NotFound().
+  //
+  // TODO(pwbug/268): Migrate to the functions and remove these constants.
+  static constexpr Constant CANCELLED{Status::Cancelled()};
+  static constexpr Constant UNKNOWN{Status::Unknown()};
+  static constexpr Constant INVALID_ARGUMENT{Status::InvalidArgument()};
+  static constexpr Constant DEADLINE_EXCEEDED{Status::DeadlineExceeded()};
+  static constexpr Constant NOT_FOUND{Status::NotFound()};
+  static constexpr Constant ALREADY_EXISTS{Status::AlreadyExists()};
+  static constexpr Constant PERMISSION_DENIED{Status::PermissionDenied()};
+  static constexpr Constant RESOURCE_EXHAUSTED{Status::ResourceExhausted()};
+  static constexpr Constant FAILED_PRECONDITION{Status::FailedPrecondition()};
+  static constexpr Constant ABORTED{Status::Aborted()};
+  static constexpr Constant OUT_OF_RANGE{Status::OutOfRange()};
+  static constexpr Constant UNIMPLEMENTED{Status::Unimplemented()};
+  static constexpr Constant INTERNAL{Status::Internal()};
+  static constexpr Constant UNAVAILABLE{Status::Unavailable()};
+  static constexpr Constant DATA_LOSS{Status::DataLoss()};
+  static constexpr Constant UNAUTHENTICATED{Status::Unauthenticated()};
 
-  // Creates a StatusWithSize with Status::OK and a size of 0.
+  // Functions that create a StatusWithSize with the specified status code. For
+  // codes other than OK, the size defaults to 0.
+  static constexpr StatusWithSize Ok(size_t size) {
+    return StatusWithSize(size);
+  }
+  static constexpr StatusWithSize Cancelled(size_t size = 0) {
+    return StatusWithSize(Status::Cancelled(), size);
+  }
+  static constexpr StatusWithSize Unknown(size_t size = 0) {
+    return StatusWithSize(Status::Unknown(), size);
+  }
+  static constexpr StatusWithSize InvalidArgument(size_t size = 0) {
+    return StatusWithSize(Status::InvalidArgument(), size);
+  }
+  static constexpr StatusWithSize DeadlineExceeded(size_t size = 0) {
+    return StatusWithSize(Status::DeadlineExceeded(), size);
+  }
+  static constexpr StatusWithSize NotFound(size_t size = 0) {
+    return StatusWithSize(Status::NotFound(), size);
+  }
+  static constexpr StatusWithSize AlreadyExists(size_t size = 0) {
+    return StatusWithSize(Status::AlreadyExists(), size);
+  }
+  static constexpr StatusWithSize PermissionDenied(size_t size = 0) {
+    return StatusWithSize(Status::PermissionDenied(), size);
+  }
+  static constexpr StatusWithSize Unauthenticated(size_t size = 0) {
+    return StatusWithSize(Status::Unauthenticated(), size);
+  }
+  static constexpr StatusWithSize ResourceExhausted(size_t size = 0) {
+    return StatusWithSize(Status::ResourceExhausted(), size);
+  }
+  static constexpr StatusWithSize FailedPrecondition(size_t size = 0) {
+    return StatusWithSize(Status::FailedPrecondition(), size);
+  }
+  static constexpr StatusWithSize Aborted(size_t size = 0) {
+    return StatusWithSize(Status::Aborted(), size);
+  }
+  static constexpr StatusWithSize OutOfRange(size_t size = 0) {
+    return StatusWithSize(Status::OutOfRange(), size);
+  }
+  static constexpr StatusWithSize Unimplemented(size_t size = 0) {
+    return StatusWithSize(Status::Unimplemented(), size);
+  }
+  static constexpr StatusWithSize Internal(size_t size = 0) {
+    return StatusWithSize(Status::Internal(), size);
+  }
+  static constexpr StatusWithSize Unavailable(size_t size = 0) {
+    return StatusWithSize(Status::Unavailable(), size);
+  }
+  static constexpr StatusWithSize DataLoss(size_t size = 0) {
+    return StatusWithSize(Status::DataLoss(), size);
+  }
+
+  // Creates a StatusWithSize with Status::Ok() and a size of 0.
   explicit constexpr StatusWithSize() : size_(0) {}
 
-  // Creates a StatusWithSize with Status::OK and the provided size.
+  // Creates a StatusWithSize with Status::Ok() and the provided size.
   // std::enable_if is used to prevent enum types (e.g. Status) from being used.
   // TODO(hepler): Add debug-only assert that size <= max_size().
   template <typename T, typename = std::enable_if_t<std::is_integral_v<T>>>
   explicit constexpr StatusWithSize(T size) : size_(size) {}
 
   // Creates a StatusWithSize with the provided status and size.
-  constexpr StatusWithSize(Status::Code status, size_t size)
-      : StatusWithSize((static_cast<size_t>(status) << kStatusShift) | size) {}
+  explicit constexpr StatusWithSize(Status status, size_t size)
+      : StatusWithSize((static_cast<size_t>(status.code()) << kStatusShift) |
+                       size) {}
 
   // Allow implicit conversions from the StatusWithSize constants.
   constexpr StatusWithSize(Constant constant) : size_(constant.value_) {}
@@ -118,13 +178,63 @@
   // The maximum valid value for size.
   static constexpr size_t max_size() { return kSizeMask; }
 
-  // True if status() == Status::OK.
+  // True if status() == Status::Ok().
   constexpr bool ok() const { return (size_ & kStatusMask) == 0u; }
 
   constexpr Status status() const {
     return static_cast<Status::Code>((size_ & kStatusMask) >> kStatusShift);
   }
 
+  // Functions for checking which status the StatusWithSize contains.
+  [[nodiscard]] constexpr bool IsCancelled() const {
+    return status().IsCancelled();
+  }
+  [[nodiscard]] constexpr bool IsUnknown() const {
+    return status().IsUnknown();
+  }
+  [[nodiscard]] constexpr bool IsInvalidArgument() const {
+    return status().IsInvalidArgument();
+  }
+  [[nodiscard]] constexpr bool IsDeadlineExceeded() const {
+    return status().IsDeadlineExceeded();
+  }
+  [[nodiscard]] constexpr bool IsNotFound() const {
+    return status().IsNotFound();
+  }
+  [[nodiscard]] constexpr bool IsAlreadyExists() const {
+    return status().IsAlreadyExists();
+  }
+  [[nodiscard]] constexpr bool IsPermissionDenied() const {
+    return status().IsPermissionDenied();
+  }
+  [[nodiscard]] constexpr bool IsResourceExhausted() const {
+    return status().IsResourceExhausted();
+  }
+  [[nodiscard]] constexpr bool IsFailedPrecondition() const {
+    return status().IsFailedPrecondition();
+  }
+  [[nodiscard]] constexpr bool IsAborted() const {
+    return status().IsAborted();
+  }
+  [[nodiscard]] constexpr bool IsOutOfRange() const {
+    return status().IsOutOfRange();
+  }
+  [[nodiscard]] constexpr bool IsUnimplemented() const {
+    return status().IsUnimplemented();
+  }
+  [[nodiscard]] constexpr bool IsInternal() const {
+    return status().IsInternal();
+  }
+  [[nodiscard]] constexpr bool IsUnavailable() const {
+    return status().IsUnavailable();
+  }
+  [[nodiscard]] constexpr bool IsDataLoss() const {
+    return status().IsDataLoss();
+  }
+  [[nodiscard]] constexpr bool IsUnauthenticated() const {
+    return status().IsUnauthenticated();
+  }
+
  private:
   size_t size_;
 };
diff --git a/pw_kvs/pw_kvs_private/macros.h b/pw_status/public/pw_status/try.h
similarity index 86%
rename from pw_kvs/pw_kvs_private/macros.h
rename to pw_status/public/pw_status/try.h
index cf4e673..76e7a19 100644
--- a/pw_kvs/pw_kvs_private/macros.h
+++ b/pw_status/public/pw_status/try.h
@@ -51,23 +51,20 @@
 #define _PW_TRY_UNIQUE(line) _PW_TRY_UNIQUE_EXPANDED(line)
 #define _PW_TRY_UNIQUE_EXPANDED(line) _pw_try_unique_name_##line
 
-#define TRY PW_TRY
-#define TRY_WITH_SIZE PW_TRY_WITH_SIZE
-#define TRY_ASSIGN PW_TRY_ASSIGN
-
 namespace pw::internal {
 
-inline Status ConvertToStatus(Status status) { return status; }
+constexpr Status ConvertToStatus(Status status) { return status; }
 
-inline Status ConvertToStatus(StatusWithSize status_with_size) {
+constexpr Status ConvertToStatus(StatusWithSize status_with_size) {
   return status_with_size.status();
 }
 
-inline StatusWithSize ConvertToStatusWithSize(Status status) {
+constexpr StatusWithSize ConvertToStatusWithSize(Status status) {
   return StatusWithSize(status, 0);
 }
 
-inline StatusWithSize ConvertToStatusWithSize(StatusWithSize status_with_size) {
+constexpr StatusWithSize ConvertToStatusWithSize(
+    StatusWithSize status_with_size) {
   return status_with_size;
 }
 
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_status/py/BUILD.gn
similarity index 73%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_status/py/BUILD.gn
index 3c3be32..d6d12c9 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_status/py/BUILD.gn
@@ -12,8 +12,14 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_status/__init__.py",
+    "pw_status/update_style.py",
+  ]
 }
diff --git a/pw_status/py/pw_status/__init__.py b/pw_status/py/pw_status/__init__.py
index 060ac51..6d22bda 100644
--- a/pw_status/py/pw_status/__init__.py
+++ b/pw_status/py/pw_status/__init__.py
@@ -34,3 +34,6 @@
     INTERNAL = 13
     UNAVAILABLE = 14
     DATA_LOSS = 15
+
+    def ok(self) -> bool:
+        return self is self.OK
diff --git a/pw_status/py/pw_status/py.typed b/pw_status/py/pw_status/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_status/py/pw_status/py.typed
diff --git a/pw_status/py/pw_status/update_style.py b/pw_status/py/pw_status/update_style.py
new file mode 100755
index 0000000..4e67941
--- /dev/null
+++ b/pw_status/py/pw_status/update_style.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Updates pw::Status usages from Status::CODE to Status::Code() style.
+
+Also updates StatusWithSize.
+"""
+
+import argparse
+from pathlib import Path
+import re
+import sys
+from typing import Iterable
+
+from pw_presubmit import git_repo
+
+_REMAP = {
+    'OK': 'Ok',
+    'CANCELLED': 'Cancelled',
+    'UNKNOWN': 'Unknown',
+    'INVALID_ARGUMENT': 'InvalidArgument',
+    'DEADLINE_EXCEEDED': 'DeadlineExceeded',
+    'NOT_FOUND': 'NotFound',
+    'ALREADY_EXISTS': 'AlreadyExists',
+    'PERMISSION_DENIED': 'PermissionDenied',
+    'UNAUTHENTICATED': 'Unauthenticated',
+    'RESOURCE_EXHAUSTED': 'ResourceExhausted',
+    'FAILED_PRECONDITION': 'FailedPrecondition',
+    'ABORTED': 'Aborted',
+    'OUT_OF_RANGE': 'OutOfRange',
+    'UNIMPLEMENTED': 'Unimplemented',
+    'INTERNAL': 'Internal',
+    'UNAVAILABLE': 'Unavailable',
+    'DATA_LOSS': 'DataLoss',
+}
+
+_CODES = '|'.join(_REMAP.keys())
+_FUNCTIONS = '|'.join(_REMAP.values())
+
+_STATUS_WITH_SIZE_CTOR = re.compile(
+    fr'\bStatusWithSize\(Status::({_CODES}),\s*'.encode())
+_STATUS = re.compile(fr'\b(Status|StatusWithSize)::({_CODES})(?!")\b'.encode())
+_STATUS_EQUALITY = re.compile(
+    fr'Status::(?P<l_func>{_FUNCTIONS})\(\)\s+==\s+(?P<value>[a-zA-Z0-9_.()]+)|'
+    fr'\s+==\s+(?:pw::)?Status::(?P<r_func>{_FUNCTIONS})\(\)'.encode())
+
+
+def _remap_status_with_size(match) -> bytes:
+    return f'StatusWithSize::{_REMAP[match.group(1).decode()]}('.encode()
+
+
+def _remap_codes(match) -> bytes:
+    status, code = (g.decode() for g in match.groups())
+    return f'{status}::{_REMAP[code]}()'.encode()
+
+
+def _remap_equality(match) -> bytes:
+    l_func, status, r_func = (g.decode() for g in match.groups(b''))
+    func = l_func or r_func
+    return (f'{status}.ok()'
+            if func == 'Ok' else f'{status}.Is{func}()').encode()
+
+
+def _parse_args():
+    """Parses and return command line arguments."""
+
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument('paths',
+                        nargs='*',
+                        type=Path,
+                        help='Paths to repositories')
+    return parser.parse_args()
+
+
+def update_status(paths: Iterable[Path]) -> None:
+    if not paths:
+        paths = [Path.cwd()]
+
+    for path in paths:
+        if git_repo.has_uncommitted_changes(path):
+            raise RuntimeError('There are pending changes in the Git repo!')
+
+        updated = 0
+
+        for file in git_repo.list_files(pathspecs=('*.h', '*.cc', '*.cpp'),
+                                        repo_path=path):
+            orig = file.read_bytes()
+
+            # Replace StatusWithSize constructor
+            text = _STATUS_WITH_SIZE_CTOR.sub(_remap_status_with_size, orig)
+
+            # Replace Status and StatusWithSize
+            text = _STATUS.sub(_remap_codes, text)
+
+            text = _STATUS_EQUALITY.sub(_remap_equality, text)
+
+            if orig != text:
+                updated += 1
+                file.write_bytes(text)
+
+    print('Updated', updated, 'files.')
+    print('Manually inspect the changes! This script is not perfect.')
+
+
+def main():
+    return update_status(**vars(_parse_args()))
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/pw_status/py/setup.py b/pw_status/py/setup.py
index 45387d7..13faaaf 100644
--- a/pw_status/py/setup.py
+++ b/pw_status/py/setup.py
@@ -13,7 +13,7 @@
 # the License.
 """pw_status"""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='pw_status',
@@ -22,4 +22,6 @@
     author_email='pigweed-developers@googlegroups.com',
     description='Pigweed Status object',
     packages=setuptools.find_packages(),
+    package_data={'pw_status': ['py.typed']},
+    zip_safe=False,
 )
diff --git a/pw_status/status.cc b/pw_status/status.cc
index 5047734..9adbeab 100644
--- a/pw_status/status.cc
+++ b/pw_status/status.cc
@@ -19,7 +19,6 @@
     return #value
 
 extern "C" const char* pw_StatusString(pw_Status status) {
-  // Status codes are ordered by assigned number (UNAUTHENTICATED is last).
   switch (status) {
     PW_CASE_RETURN_ENUM_STRING(OK);
     PW_CASE_RETURN_ENUM_STRING(CANCELLED);
diff --git a/pw_status/status_test.cc b/pw_status/status_test.cc
index 98afa56..4812b9d 100644
--- a/pw_status/status_test.cc
+++ b/pw_status/status_test.cc
@@ -12,6 +12,26 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+// Make sure status works if these macros are defined.
+// TODO(pwbug/268): Remove these macros after migrating from these aliases.
+#define OK Uh oh, this macro is defined !
+#define CANCELLED Uh oh, this macro is defined !
+#define UNKNOWN Uh oh, this macro is defined !
+#define INVALID_ARGUMENT Uh oh, this macro is defined !
+#define DEADLINE_EXCEEDED Uh oh, this macro is defined !
+#define NOT_FOUND Uh oh, this macro is defined !
+#define ALREADY_EXISTS Uh oh, this macro is defined !
+#define PERMISSION_DENIED Uh oh, this macro is defined !
+#define UNAUTHENTICATED Uh oh, this macro is defined !
+#define RESOURCE_EXHAUSTED Uh oh, this macro is defined !
+#define FAILED_PRECONDITION Uh oh, this macro is defined !
+#define ABORTED Uh oh, this macro is defined !
+#define OUT_OF_RANGE Uh oh, this macro is defined !
+#define UNIMPLEMENTED Uh oh, this macro is defined !
+#define INTERNAL Uh oh, this macro is defined !
+#define UNAVAILABLE Uh oh, this macro is defined !
+#define DATA_LOSS Uh oh, this macro is defined !
+
 #include "pw_status/status.h"
 
 #include "gtest/gtest.h"
@@ -22,59 +42,159 @@
 constexpr Status::Code kInvalidCode = static_cast<Status::Code>(30);
 
 TEST(Status, Default) {
-  Status status;
-  EXPECT_TRUE(status.ok());
-  EXPECT_EQ(Status(), status);
+  constexpr Status status;
+  static_assert(status.ok());
+  static_assert(Status() == status);
 }
 
 TEST(Status, ConstructWithStatusCode) {
-  Status status(Status::ABORTED);
-  EXPECT_EQ(Status::ABORTED, status);
+  constexpr Status status(PW_STATUS_ABORTED);
+  static_assert(Status::Aborted() == status);
 }
 
 TEST(Status, AssignFromStatusCode) {
   Status status;
-  status = Status::INTERNAL;
-  EXPECT_EQ(Status::INTERNAL, status);
-}
-
-TEST(Status, CompareToStatusCode) {
-  EXPECT_EQ(Status(), Status::OK);
-  EXPECT_EQ(Status::ABORTED, Status(Status::ABORTED));
-  EXPECT_NE(Status(), Status::ABORTED);
+  status = PW_STATUS_INTERNAL;
+  EXPECT_EQ(Status::Internal(), status);
 }
 
 TEST(Status, Ok_OkIsTrue) {
-  EXPECT_TRUE(Status().ok());
-  EXPECT_TRUE(Status(Status::OK).ok());
+  static_assert(Status().ok());
+  static_assert(Status(PW_STATUS_OK).ok());
+  static_assert(Status::Ok().ok());
 }
 
 TEST(Status, NotOk_OkIsFalse) {
-  EXPECT_FALSE(Status(Status::DATA_LOSS).ok());
-  EXPECT_FALSE(Status(kInvalidCode).ok());
+  static_assert(!Status::DataLoss().ok());
+  static_assert(!Status(kInvalidCode).ok());
 }
 
-TEST(Status, KnownString) {
-  EXPECT_STREQ("OK", Status(Status::OK).str());
-  EXPECT_STREQ("CANCELLED", Status(Status::CANCELLED).str());
-  EXPECT_STREQ("DEADLINE_EXCEEDED", Status(Status::DEADLINE_EXCEEDED).str());
-  EXPECT_STREQ("NOT_FOUND", Status(Status::NOT_FOUND).str());
-  EXPECT_STREQ("ALREADY_EXISTS", Status(Status::ALREADY_EXISTS).str());
-  EXPECT_STREQ("PERMISSION_DENIED", Status(Status::PERMISSION_DENIED).str());
-  EXPECT_STREQ("UNAUTHENTICATED", Status(Status::UNAUTHENTICATED).str());
-  EXPECT_STREQ("RESOURCE_EXHAUSTED", Status(Status::RESOURCE_EXHAUSTED).str());
-  EXPECT_STREQ("FAILED_PRECONDITION",
-               Status(Status::FAILED_PRECONDITION).str());
-  EXPECT_STREQ("ABORTED", Status(Status::ABORTED).str());
-  EXPECT_STREQ("OUT_OF_RANGE", Status(Status::OUT_OF_RANGE).str());
-  EXPECT_STREQ("UNIMPLEMENTED", Status(Status::UNIMPLEMENTED).str());
-  EXPECT_STREQ("INTERNAL", Status(Status::INTERNAL).str());
-  EXPECT_STREQ("UNAVAILABLE", Status(Status::UNAVAILABLE).str());
-  EXPECT_STREQ("DATA_LOSS", Status(Status::DATA_LOSS).str());
+TEST(Status, Code) {
+  // clang-format off
+  static_assert(PW_STATUS_OK == Status().code());
+  static_assert(PW_STATUS_OK == Status::Ok().code());
+  static_assert(PW_STATUS_CANCELLED == Status::Cancelled().code());
+  static_assert(PW_STATUS_UNKNOWN == Status::Unknown().code());
+  static_assert(PW_STATUS_INVALID_ARGUMENT == Status::InvalidArgument().code());
+  static_assert(PW_STATUS_DEADLINE_EXCEEDED == Status::DeadlineExceeded().code());
+  static_assert(PW_STATUS_NOT_FOUND == Status::NotFound().code());
+  static_assert(PW_STATUS_ALREADY_EXISTS == Status::AlreadyExists().code());
+  static_assert(PW_STATUS_PERMISSION_DENIED == Status::PermissionDenied().code());
+  static_assert(PW_STATUS_RESOURCE_EXHAUSTED == Status::ResourceExhausted().code());
+  static_assert(PW_STATUS_FAILED_PRECONDITION == Status::FailedPrecondition().code());
+  static_assert(PW_STATUS_ABORTED == Status::Aborted().code());
+  static_assert(PW_STATUS_OUT_OF_RANGE == Status::OutOfRange().code());
+  static_assert(PW_STATUS_UNIMPLEMENTED == Status::Unimplemented().code());
+  static_assert(PW_STATUS_INTERNAL == Status::Internal().code());
+  static_assert(PW_STATUS_UNAVAILABLE == Status::Unavailable().code());
+  static_assert(PW_STATUS_DATA_LOSS == Status::DataLoss().code());
+  static_assert(PW_STATUS_UNAUTHENTICATED == Status::Unauthenticated().code());
+  // clang-format on
+}
+
+TEST(Status, EqualCodes) {
+  static_assert(PW_STATUS_OK == Status());
+  static_assert(PW_STATUS_OK == Status::Ok());
+  static_assert(PW_STATUS_CANCELLED == Status::Cancelled());
+  static_assert(PW_STATUS_UNKNOWN == Status::Unknown());
+  static_assert(PW_STATUS_INVALID_ARGUMENT == Status::InvalidArgument());
+  static_assert(PW_STATUS_DEADLINE_EXCEEDED == Status::DeadlineExceeded());
+  static_assert(PW_STATUS_NOT_FOUND == Status::NotFound());
+  static_assert(PW_STATUS_ALREADY_EXISTS == Status::AlreadyExists());
+  static_assert(PW_STATUS_PERMISSION_DENIED == Status::PermissionDenied());
+  static_assert(PW_STATUS_RESOURCE_EXHAUSTED == Status::ResourceExhausted());
+  static_assert(PW_STATUS_FAILED_PRECONDITION == Status::FailedPrecondition());
+  static_assert(PW_STATUS_ABORTED == Status::Aborted());
+  static_assert(PW_STATUS_OUT_OF_RANGE == Status::OutOfRange());
+  static_assert(PW_STATUS_UNIMPLEMENTED == Status::Unimplemented());
+  static_assert(PW_STATUS_INTERNAL == Status::Internal());
+  static_assert(PW_STATUS_UNAVAILABLE == Status::Unavailable());
+  static_assert(PW_STATUS_DATA_LOSS == Status::DataLoss());
+  static_assert(PW_STATUS_UNAUTHENTICATED == Status::Unauthenticated());
+}
+
+TEST(Status, IsError) {
+  static_assert(Status::Cancelled().IsCancelled());
+  static_assert(Status::Unknown().IsUnknown());
+  static_assert(Status::InvalidArgument().IsInvalidArgument());
+  static_assert(Status::DeadlineExceeded().IsDeadlineExceeded());
+  static_assert(Status::NotFound().IsNotFound());
+  static_assert(Status::AlreadyExists().IsAlreadyExists());
+  static_assert(Status::PermissionDenied().IsPermissionDenied());
+  static_assert(Status::ResourceExhausted().IsResourceExhausted());
+  static_assert(Status::FailedPrecondition().IsFailedPrecondition());
+  static_assert(Status::Aborted().IsAborted());
+  static_assert(Status::OutOfRange().IsOutOfRange());
+  static_assert(Status::Unimplemented().IsUnimplemented());
+  static_assert(Status::Internal().IsInternal());
+  static_assert(Status::Unavailable().IsUnavailable());
+  static_assert(Status::DataLoss().IsDataLoss());
+  static_assert(Status::Unauthenticated().IsUnauthenticated());
+}
+
+TEST(Status, IsNotError) {
+  static_assert(!Status::Ok().IsCancelled());
+  static_assert(!Status::Ok().IsUnknown());
+  static_assert(!Status::Ok().IsInvalidArgument());
+  static_assert(!Status::Ok().IsDeadlineExceeded());
+  static_assert(!Status::Ok().IsNotFound());
+  static_assert(!Status::Ok().IsAlreadyExists());
+  static_assert(!Status::Ok().IsPermissionDenied());
+  static_assert(!Status::Ok().IsUnauthenticated());
+  static_assert(!Status::Ok().IsResourceExhausted());
+  static_assert(!Status::Ok().IsFailedPrecondition());
+  static_assert(!Status::Ok().IsAborted());
+  static_assert(!Status::Ok().IsOutOfRange());
+  static_assert(!Status::Ok().IsUnimplemented());
+  static_assert(!Status::Ok().IsInternal());
+  static_assert(!Status::Ok().IsUnavailable());
+  static_assert(!Status::Ok().IsDataLoss());
+}
+
+TEST(Status, Strings) {
+  EXPECT_STREQ("OK", Status().str());
+  EXPECT_STREQ("OK", Status::Ok().str());
+  EXPECT_STREQ("CANCELLED", Status::Cancelled().str());
+  EXPECT_STREQ("UNKNOWN", Status::Unknown().str());
+  EXPECT_STREQ("INVALID_ARGUMENT", Status::InvalidArgument().str());
+  EXPECT_STREQ("DEADLINE_EXCEEDED", Status::DeadlineExceeded().str());
+  EXPECT_STREQ("NOT_FOUND", Status::NotFound().str());
+  EXPECT_STREQ("ALREADY_EXISTS", Status::AlreadyExists().str());
+  EXPECT_STREQ("PERMISSION_DENIED", Status::PermissionDenied().str());
+  EXPECT_STREQ("RESOURCE_EXHAUSTED", Status::ResourceExhausted().str());
+  EXPECT_STREQ("FAILED_PRECONDITION", Status::FailedPrecondition().str());
+  EXPECT_STREQ("ABORTED", Status::Aborted().str());
+  EXPECT_STREQ("OUT_OF_RANGE", Status::OutOfRange().str());
+  EXPECT_STREQ("UNIMPLEMENTED", Status::Unimplemented().str());
+  EXPECT_STREQ("INTERNAL", Status::Internal().str());
+  EXPECT_STREQ("UNAVAILABLE", Status::Unavailable().str());
+  EXPECT_STREQ("DATA_LOSS", Status::DataLoss().str());
+  EXPECT_STREQ("UNAUTHENTICATED", Status::Unauthenticated().str());
 }
 
 TEST(Status, UnknownString) {
-  EXPECT_STREQ("INVALID STATUS", Status(static_cast<Status::Code>(30)).str());
+  EXPECT_STREQ("INVALID STATUS", Status(kInvalidCode).str());
+}
+
+TEST(Status, DeprecatedAliases) {
+  // TODO(pwbug/268): Remove this test after migrating from these aliases.
+  static_assert(PW_STATUS_OK == Status::OK);
+  static_assert(PW_STATUS_CANCELLED == Status::CANCELLED);
+  static_assert(PW_STATUS_UNKNOWN == Status::UNKNOWN);
+  static_assert(PW_STATUS_INVALID_ARGUMENT == Status::INVALID_ARGUMENT);
+  static_assert(PW_STATUS_DEADLINE_EXCEEDED == Status::DEADLINE_EXCEEDED);
+  static_assert(PW_STATUS_NOT_FOUND == Status::NOT_FOUND);
+  static_assert(PW_STATUS_ALREADY_EXISTS == Status::ALREADY_EXISTS);
+  static_assert(PW_STATUS_PERMISSION_DENIED == Status::PERMISSION_DENIED);
+  static_assert(PW_STATUS_RESOURCE_EXHAUSTED == Status::RESOURCE_EXHAUSTED);
+  static_assert(PW_STATUS_FAILED_PRECONDITION == Status::FAILED_PRECONDITION);
+  static_assert(PW_STATUS_ABORTED == Status::ABORTED);
+  static_assert(PW_STATUS_OUT_OF_RANGE == Status::OUT_OF_RANGE);
+  static_assert(PW_STATUS_UNIMPLEMENTED == Status::UNIMPLEMENTED);
+  static_assert(PW_STATUS_INTERNAL == Status::INTERNAL);
+  static_assert(PW_STATUS_UNAVAILABLE == Status::UNAVAILABLE);
+  static_assert(PW_STATUS_DATA_LOSS == Status::DATA_LOSS);
+  static_assert(PW_STATUS_UNAUTHENTICATED == Status::UNAUTHENTICATED);
 }
 
 // Functions for executing the C pw_Status tests.
@@ -82,7 +202,7 @@
 
 Status::Code PassStatusFromC(Status status);
 
-Status::Code PassStatusFromCpp(Status status) { return status; }
+Status::Code PassStatusFromCpp(Status status) { return status.code(); }
 
 int TestStatusFromC(void);
 
@@ -91,11 +211,11 @@
 }  // extern "C"
 
 TEST(StatusCLinkage, CallCFunctionWithStatus) {
-  EXPECT_EQ(Status::ABORTED, PassStatusFromC(Status::ABORTED));
-  EXPECT_EQ(Status::UNKNOWN, PassStatusFromC(Status(Status::UNKNOWN)));
+  EXPECT_EQ(Status::Aborted(), PassStatusFromC(PW_STATUS_ABORTED));
+  EXPECT_EQ(Status::Unknown(), PassStatusFromC(Status::Unknown()));
 
-  EXPECT_EQ(Status(Status::NOT_FOUND), PassStatusFromC(Status::NOT_FOUND));
-  EXPECT_EQ(Status(Status::OK), PassStatusFromC(Status(Status::OK)));
+  EXPECT_EQ(Status::NotFound(), PassStatusFromC(PW_STATUS_NOT_FOUND));
+  EXPECT_EQ(Status::Ok(), PassStatusFromC(Status::Ok()));
 }
 
 TEST(StatusCLinkage, TestStatusFromC) { EXPECT_EQ(0, TestStatusFromC()); }
diff --git a/pw_status/status_with_size_test.cc b/pw_status/status_with_size_test.cc
index 473384a..ef0f1cf 100644
--- a/pw_status/status_with_size_test.cc
+++ b/pw_status/status_with_size_test.cc
@@ -26,40 +26,40 @@
 TEST(StatusWithSize, Default) {
   StatusWithSize result;
   EXPECT_TRUE(result.ok());
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(0u, result.size());
 }
 
 TEST(StatusWithSize, ConstructWithSize) {
   StatusWithSize result = StatusWithSize(456);
   EXPECT_TRUE(result.ok());
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(456u, result.size());
 }
 
 TEST(StatusWithSize, ConstructWithError) {
-  StatusWithSize result(Status::RESOURCE_EXHAUSTED, 123);
+  StatusWithSize result(Status::ResourceExhausted(), 123);
   EXPECT_FALSE(result.ok());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, result.status());
+  EXPECT_EQ(Status::ResourceExhausted(), result.status());
   EXPECT_EQ(123u, result.size());
 }
 
 TEST(StatusWithSize, ConstructWithOkAndSize) {
-  StatusWithSize result(Status::OK, 99);
+  StatusWithSize result(Status::Ok(), 99);
   EXPECT_TRUE(result.ok());
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(99u, result.size());
 }
 
 TEST(StatusWithSize, ConstructFromConstant) {
-  StatusWithSize result(StatusWithSize::ALREADY_EXISTS);
+  StatusWithSize result(StatusWithSize::AlreadyExists());
 
-  EXPECT_EQ(Status::ALREADY_EXISTS, result.status());
+  EXPECT_EQ(Status::AlreadyExists(), result.status());
   EXPECT_EQ(0u, result.size());
 
-  result = StatusWithSize::NOT_FOUND;
+  result = StatusWithSize::NotFound();
 
-  EXPECT_EQ(Status::NOT_FOUND, result.status());
+  EXPECT_EQ(Status::NotFound(), result.status());
   EXPECT_EQ(0u, result.size());
 }
 
@@ -67,7 +67,7 @@
   for (int i = 0; i < 32; ++i) {
     StatusWithSize result(static_cast<Status::Code>(i), 0);
     EXPECT_EQ(result.ok(), i == 0);
-    EXPECT_EQ(i, static_cast<int>(result.status()));
+    EXPECT_EQ(i, static_cast<int>(result.status().code()));
     EXPECT_EQ(0u, result.size());
   }
 }
@@ -76,7 +76,7 @@
   for (int i = 0; i < 32; ++i) {
     StatusWithSize result(static_cast<Status::Code>(i), i);
     EXPECT_EQ(result.ok(), i == 0);
-    EXPECT_EQ(i, static_cast<int>(result.status()));
+    EXPECT_EQ(i, static_cast<int>(result.status().code()));
     EXPECT_EQ(static_cast<size_t>(i), result.size());
   }
 }
@@ -86,29 +86,145 @@
     StatusWithSize result(static_cast<Status::Code>(i),
                           StatusWithSize::max_size());
     EXPECT_EQ(result.ok(), i == 0);
-    EXPECT_EQ(i, static_cast<int>(result.status()));
+    EXPECT_EQ(i, static_cast<int>(result.status().code()));
     EXPECT_EQ(result.max_size(), result.size());
   }
 }
 
 TEST(StatusWithSize, Assignment) {
-  StatusWithSize result = StatusWithSize(Status::INTERNAL, 0x123);
+  StatusWithSize result = StatusWithSize(Status::Internal(), 0x123);
   EXPECT_FALSE(result.ok());
-  EXPECT_EQ(Status::INTERNAL, result.status());
+  EXPECT_EQ(Status::Internal(), result.status());
   EXPECT_EQ(0x123u, result.size());
 
   result = StatusWithSize(300);
   EXPECT_TRUE(result.ok());
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(300u, result.size());
 }
 
 TEST(StatusWithSize, Constexpr) {
-  constexpr StatusWithSize result(Status::CANCELLED, 1234);
-  static_assert(Status::CANCELLED == result.status());
+  constexpr StatusWithSize result(Status::Cancelled(), 1234);
+  static_assert(Status::Cancelled() == result.status());
   static_assert(!result.ok());
   static_assert(1234u == result.size());
 }
 
+TEST(StatusWithSize, Functions_Status) {
+  // clang-format off
+  static_assert(StatusWithSize::Ok(0).status() == Status::Ok());
+  static_assert(StatusWithSize::Cancelled().status() == Status::Cancelled());
+  static_assert(StatusWithSize::Unknown().status() == Status::Unknown());
+  static_assert(StatusWithSize::InvalidArgument().status() == Status::InvalidArgument());
+  static_assert(StatusWithSize::DeadlineExceeded().status() == Status::DeadlineExceeded());
+  static_assert(StatusWithSize::NotFound().status() == Status::NotFound());
+  static_assert(StatusWithSize::AlreadyExists().status() == Status::AlreadyExists());
+  static_assert(StatusWithSize::PermissionDenied().status() == Status::PermissionDenied());
+  static_assert(StatusWithSize::Unauthenticated().status() == Status::Unauthenticated());
+  static_assert(StatusWithSize::ResourceExhausted().status() == Status::ResourceExhausted());
+  static_assert(StatusWithSize::FailedPrecondition().status() == Status::FailedPrecondition());
+  static_assert(StatusWithSize::Aborted().status() == Status::Aborted());
+  static_assert(StatusWithSize::OutOfRange().status() == Status::OutOfRange());
+  static_assert(StatusWithSize::Unimplemented().status() == Status::Unimplemented());
+  static_assert(StatusWithSize::Internal().status() == Status::Internal());
+  static_assert(StatusWithSize::Unavailable().status() == Status::Unavailable());
+  static_assert(StatusWithSize::DataLoss().status() == Status::DataLoss());
+
+  static_assert(StatusWithSize::Ok(123).status() == Status::Ok());
+  static_assert(StatusWithSize::Cancelled(123).status() == Status::Cancelled());
+  static_assert(StatusWithSize::Unknown(123).status() == Status::Unknown());
+  static_assert(StatusWithSize::InvalidArgument(123).status() == Status::InvalidArgument());
+  static_assert(StatusWithSize::DeadlineExceeded(123).status() == Status::DeadlineExceeded());
+  static_assert(StatusWithSize::NotFound(123).status() == Status::NotFound());
+  static_assert(StatusWithSize::AlreadyExists(123).status() == Status::AlreadyExists());
+  static_assert(StatusWithSize::PermissionDenied(123).status() == Status::PermissionDenied());
+  static_assert(StatusWithSize::Unauthenticated(123).status() == Status::Unauthenticated());
+  static_assert(StatusWithSize::ResourceExhausted(123).status() == Status::ResourceExhausted());
+  static_assert(StatusWithSize::FailedPrecondition(123).status() == Status::FailedPrecondition());
+  static_assert(StatusWithSize::Aborted(123).status() == Status::Aborted());
+  static_assert(StatusWithSize::OutOfRange(123).status() == Status::OutOfRange());
+  static_assert(StatusWithSize::Unimplemented(123).status() == Status::Unimplemented());
+  static_assert(StatusWithSize::Internal(123).status() == Status::Internal());
+  static_assert(StatusWithSize::Unavailable(123).status() == Status::Unavailable());
+  static_assert(StatusWithSize::DataLoss(123).status() == Status::DataLoss());
+  // clang-format on
+}
+
+TEST(StatusWithSize, Functions_DefaultSize) {
+  static_assert(StatusWithSize::Cancelled().size() == 0u);
+  static_assert(StatusWithSize::Unknown().size() == 0u);
+  static_assert(StatusWithSize::InvalidArgument().size() == 0u);
+  static_assert(StatusWithSize::DeadlineExceeded().size() == 0u);
+  static_assert(StatusWithSize::NotFound().size() == 0u);
+  static_assert(StatusWithSize::AlreadyExists().size() == 0u);
+  static_assert(StatusWithSize::PermissionDenied().size() == 0u);
+  static_assert(StatusWithSize::Unauthenticated().size() == 0u);
+  static_assert(StatusWithSize::ResourceExhausted().size() == 0u);
+  static_assert(StatusWithSize::FailedPrecondition().size() == 0u);
+  static_assert(StatusWithSize::Aborted().size() == 0u);
+  static_assert(StatusWithSize::OutOfRange().size() == 0u);
+  static_assert(StatusWithSize::Unimplemented().size() == 0u);
+  static_assert(StatusWithSize::Internal().size() == 0u);
+  static_assert(StatusWithSize::Unavailable().size() == 0u);
+  static_assert(StatusWithSize::DataLoss().size() == 0u);
+}
+
+TEST(StatusWithSize, Functions_SpecifiedSize) {
+  static_assert(StatusWithSize::Ok(123).size() == 123u);
+  static_assert(StatusWithSize::Cancelled(123).size() == 123u);
+  static_assert(StatusWithSize::Unknown(123).size() == 123u);
+  static_assert(StatusWithSize::InvalidArgument(123).size() == 123u);
+  static_assert(StatusWithSize::DeadlineExceeded(123).size() == 123u);
+  static_assert(StatusWithSize::NotFound(123).size() == 123u);
+  static_assert(StatusWithSize::AlreadyExists(123).size() == 123u);
+  static_assert(StatusWithSize::PermissionDenied(123).size() == 123u);
+  static_assert(StatusWithSize::Unauthenticated(123).size() == 123u);
+  static_assert(StatusWithSize::ResourceExhausted(123).size() == 123u);
+  static_assert(StatusWithSize::FailedPrecondition(123).size() == 123u);
+  static_assert(StatusWithSize::Aborted(123).size() == 123u);
+  static_assert(StatusWithSize::OutOfRange(123).size() == 123u);
+  static_assert(StatusWithSize::Unimplemented(123).size() == 123u);
+  static_assert(StatusWithSize::Internal(123).size() == 123u);
+  static_assert(StatusWithSize::Unavailable(123).size() == 123u);
+  static_assert(StatusWithSize::DataLoss(123).size() == 123u);
+}
+
+TEST(StatusWithSize, IsError) {
+  static_assert(StatusWithSize::Cancelled().IsCancelled());
+  static_assert(StatusWithSize::Unknown().IsUnknown());
+  static_assert(StatusWithSize::InvalidArgument().IsInvalidArgument());
+  static_assert(StatusWithSize::DeadlineExceeded().IsDeadlineExceeded());
+  static_assert(StatusWithSize::NotFound().IsNotFound());
+  static_assert(StatusWithSize::AlreadyExists().IsAlreadyExists());
+  static_assert(StatusWithSize::PermissionDenied().IsPermissionDenied());
+  static_assert(StatusWithSize::ResourceExhausted().IsResourceExhausted());
+  static_assert(StatusWithSize::FailedPrecondition().IsFailedPrecondition());
+  static_assert(StatusWithSize::Aborted().IsAborted());
+  static_assert(StatusWithSize::OutOfRange().IsOutOfRange());
+  static_assert(StatusWithSize::Unimplemented().IsUnimplemented());
+  static_assert(StatusWithSize::Internal().IsInternal());
+  static_assert(StatusWithSize::Unavailable().IsUnavailable());
+  static_assert(StatusWithSize::DataLoss().IsDataLoss());
+  static_assert(StatusWithSize::Unauthenticated().IsUnauthenticated());
+}
+
+TEST(StatusWithSize, IsNotError) {
+  static_assert(!StatusWithSize::Ok(0).IsCancelled());
+  static_assert(!StatusWithSize::Ok(0).IsUnknown());
+  static_assert(!StatusWithSize::Ok(0).IsInvalidArgument());
+  static_assert(!StatusWithSize::Ok(0).IsDeadlineExceeded());
+  static_assert(!StatusWithSize::Ok(0).IsNotFound());
+  static_assert(!StatusWithSize::Ok(0).IsAlreadyExists());
+  static_assert(!StatusWithSize::Ok(0).IsPermissionDenied());
+  static_assert(!StatusWithSize::Ok(0).IsUnauthenticated());
+  static_assert(!StatusWithSize::Ok(0).IsResourceExhausted());
+  static_assert(!StatusWithSize::Ok(0).IsFailedPrecondition());
+  static_assert(!StatusWithSize::Ok(0).IsAborted());
+  static_assert(!StatusWithSize::Ok(0).IsOutOfRange());
+  static_assert(!StatusWithSize::Ok(0).IsUnimplemented());
+  static_assert(!StatusWithSize::Ok(0).IsInternal());
+  static_assert(!StatusWithSize::Ok(0).IsUnavailable());
+  static_assert(!StatusWithSize::Ok(0).IsDataLoss());
+}
 }  // namespace
 }  // namespace pw
diff --git a/pw_status/try_test.cc b/pw_status/try_test.cc
new file mode 100644
index 0000000..b84258b
--- /dev/null
+++ b/pw_status/try_test.cc
@@ -0,0 +1,163 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_status/try.h"
+
+#include "gtest/gtest.h"
+
+namespace pw {
+namespace {
+
+Status ReturnStatus(Status status) { return status; }
+StatusWithSize ReturnStatusWithSize(StatusWithSize status) { return status; }
+
+Status TryStatus(Status status) {
+  PW_TRY(ReturnStatus(status));
+
+  // Any status other than OK should have already returned.
+  EXPECT_EQ(status, Status::Ok());
+  return status;
+}
+
+Status TryStatus(StatusWithSize status) {
+  PW_TRY(ReturnStatusWithSize(status));
+
+  // Any status other than OK should have already returned.
+  EXPECT_EQ(status.status(), Status::Ok());
+  return status.status();
+}
+
+TEST(Status, Try_Status) {
+  EXPECT_EQ(TryStatus(Status::Ok()), Status::Ok());
+
+  // Don't need all the status types, just pick a few not-ok ones.
+  EXPECT_EQ(TryStatus(Status::Cancelled()), Status::Cancelled());
+  EXPECT_EQ(TryStatus(Status::DataLoss()), Status::DataLoss());
+  EXPECT_EQ(TryStatus(Status::Unimplemented()), Status::Unimplemented());
+}
+
+TEST(Status, Try_StatusWithSizeOk) {
+  for (size_t i = 0; i < 32; ++i) {
+    StatusWithSize val(Status::Ok(), 0);
+    EXPECT_EQ(TryStatus(val), Status::Ok());
+  }
+}
+
+TEST(Status, Try_StatusWithSizeError) {
+  for (size_t i = 0; i < 32; ++i) {
+    StatusWithSize val(Status::DataLoss(), i);
+    EXPECT_EQ(TryStatus(val), Status::DataLoss());
+  }
+}
+
+TEST(Status, Try_StatusWithSizeFromConstant) {
+  // Don't need all the status types, just pick a few not-ok ones.
+  EXPECT_EQ(TryStatus(StatusWithSize::Cancelled()), Status::Cancelled());
+  EXPECT_EQ(TryStatus(StatusWithSize::DataLoss()), Status::DataLoss());
+  EXPECT_EQ(TryStatus(StatusWithSize::Unimplemented()),
+            Status::Unimplemented());
+}
+
+Status TryStatusAssign(size_t& size_val, StatusWithSize status) {
+  PW_TRY_ASSIGN(size_val, ReturnStatusWithSize(status));
+
+  // Any status other than OK should have already returned.
+  EXPECT_EQ(status.status(), Status::Ok());
+  EXPECT_EQ(size_val, status.size());
+  return status.status();
+}
+
+TEST(Status, TryAssignOk) {
+  size_t size_val = 0;
+
+  for (size_t i = 1; i < 32; ++i) {
+    StatusWithSize val(Status::Ok(), i);
+    EXPECT_EQ(TryStatusAssign(size_val, val), Status::Ok());
+    EXPECT_EQ(size_val, i);
+  }
+}
+
+TEST(Status, TryAssignError) {
+  size_t size_val = 0u;
+
+  for (size_t i = 1; i < 32; ++i) {
+    StatusWithSize val(Status::OutOfRange(), i);
+    EXPECT_EQ(TryStatusAssign(size_val, val), Status::OutOfRange());
+    EXPECT_EQ(size_val, 0u);
+  }
+}
+
+StatusWithSize TryStatusWithSize(StatusWithSize status) {
+  PW_TRY_WITH_SIZE(ReturnStatusWithSize(status));
+
+  // Any status other than OK should have already returned.
+  EXPECT_TRUE(status.ok());
+  return status;
+}
+
+StatusWithSize TryStatusWithSize(Status status) {
+  PW_TRY_WITH_SIZE(ReturnStatus(status));
+
+  // Any status other than OK should have already returned.
+  EXPECT_EQ(status, Status::Ok());
+
+  StatusWithSize return_val(status, 0u);
+  return return_val;
+}
+
+TEST(Status, TryWithSize_StatusOk) {
+  StatusWithSize result = TryStatusWithSize(Status::Ok());
+  EXPECT_EQ(result.status(), Status::Ok());
+  EXPECT_EQ(result.size(), 0u);
+}
+
+TEST(Status, TryWithSize_StatusError) {
+  StatusWithSize result = TryStatusWithSize(Status::PermissionDenied());
+  EXPECT_EQ(result.status(), Status::PermissionDenied());
+  EXPECT_EQ(result.size(), 0u);
+}
+
+TEST(Status, TryWithSize_StatusWithSizeOk) {
+  for (size_t i = 0; i < 32; ++i) {
+    StatusWithSize val(Status::Ok(), i);
+    EXPECT_EQ(TryStatusWithSize(val).status(), Status::Ok());
+    EXPECT_EQ(TryStatusWithSize(val).size(), i);
+  }
+}
+
+TEST(Status, TryWithSize_StatusWithSizeError) {
+  for (size_t i = 0; i < 32; ++i) {
+    StatusWithSize val(Status::DataLoss(), i);
+    StatusWithSize result = TryStatusWithSize(val);
+    EXPECT_EQ(result.status(), Status::DataLoss());
+    EXPECT_EQ(result.size(), i);
+  }
+}
+
+TEST(Status, TryWithSize_StatusWithSizeConst) {
+  StatusWithSize result = TryStatusWithSize(StatusWithSize::DataLoss());
+  EXPECT_EQ(result.status(), Status::DataLoss());
+  EXPECT_EQ(result.size(), 0u);
+
+  result = TryStatusWithSize(StatusWithSize::NotFound());
+  EXPECT_EQ(result.status(), Status::NotFound());
+  EXPECT_EQ(result.size(), 0u);
+
+  result = TryStatusWithSize(StatusWithSize::Unimplemented());
+  EXPECT_EQ(result.status(), Status::Unimplemented());
+  EXPECT_EQ(result.size(), 0u);
+}
+
+}  // namespace
+}  // namespace pw
diff --git a/pw_stream/BUILD b/pw_stream/BUILD
index 87e639d..a28ebae 100644
--- a/pw_stream/BUILD
+++ b/pw_stream/BUILD
@@ -27,6 +27,7 @@
     hdrs = [
       "public/pw_stream/buffered_stream.h",
       "public/pw_stream/memory_stream.h",
+      "public/pw_stream/null_stream.h",
       "public/pw_stream/stream.h",
     ],
     srcs = [
diff --git a/pw_stream/BUILD.gn b/pw_stream/BUILD.gn
index d5d0ac6..cfd3583 100644
--- a/pw_stream/BUILD.gn
+++ b/pw_stream/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -26,6 +26,7 @@
   public_configs = [ ":default_config" ]
   public = [
     "public/pw_stream/memory_stream.h",
+    "public/pw_stream/null_stream.h",
     "public/pw_stream/stream.h",
   ]
   sources = [ "memory_stream.cc" ]
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_stream/CMakeLists.txt
similarity index 71%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_stream/CMakeLists.txt
index 3c3be32..27e58f6 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_stream/CMakeLists.txt
@@ -12,8 +12,17 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
+pw_auto_add_simple_module(pw_stream
+  PUBLIC_DEPS
+    pw_bytes
+    pw_containers
+    pw_log
+    pw_result
+    pw_span
+    pw_status
+  PRIVATE_DEPS
+    pw_assert
+    pw_string
+)
diff --git a/pw_stream/docs.rst b/pw_stream/docs.rst
index 9b3da1c..2fcd644 100644
--- a/pw_stream/docs.rst
+++ b/pw_stream/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-stream:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_stream:
 
 ---------
 pw_stream
@@ -68,6 +64,11 @@
 The ``MemoryReader`` class implements the ``Reader`` interface by backing the
 data source with an **externally-provided** memory buffer.
 
+pw::stream::NullWriter
+------------------------
+The ``NullWriter`` class implements the ``Writer`` interface by dropping all
+requested data writes, similar to ``/dev/null``.
+
 Why use pw_stream?
 ==================
 
diff --git a/pw_stream/memory_stream.cc b/pw_stream/memory_stream.cc
index b0a9c36..8f7e29e 100644
--- a/pw_stream/memory_stream.cc
+++ b/pw_stream/memory_stream.cc
@@ -23,22 +23,22 @@
 
 Status MemoryWriter::DoWrite(ConstByteSpan data) {
   if (ConservativeWriteLimit() == 0) {
-    return Status::OUT_OF_RANGE;
+    return Status::OutOfRange();
   }
   if (ConservativeWriteLimit() < data.size_bytes()) {
-    return Status::RESOURCE_EXHAUSTED;
+    return Status::ResourceExhausted();
   }
 
   size_t bytes_to_write = data.size_bytes();
   std::memcpy(dest_.data() + bytes_written_, data.data(), bytes_to_write);
   bytes_written_ += bytes_to_write;
 
-  return Status::OK;
+  return Status::Ok();
 }
 
 StatusWithSize MemoryReader::DoRead(ByteSpan dest) {
   if (source_.size_bytes() == bytes_read_) {
-    return StatusWithSize::OUT_OF_RANGE;
+    return StatusWithSize::OutOfRange();
   }
 
   size_t bytes_to_read =
diff --git a/pw_stream/memory_stream_test.cc b/pw_stream/memory_stream_test.cc
index e08a299..51c9a38 100644
--- a/pw_stream/memory_stream_test.cc
+++ b/pw_stream/memory_stream_test.cc
@@ -38,7 +38,7 @@
   EXPECT_EQ(memory_writer.bytes_written(), 0u);
   Status status =
       memory_writer.Write(&kExpectedStruct, sizeof(kExpectedStruct));
-  EXPECT_EQ(status, Status::OK);
+  EXPECT_EQ(status, Status::Ok());
   EXPECT_EQ(memory_writer.bytes_written(), sizeof(kExpectedStruct));
 }  // namespace
 
@@ -68,13 +68,14 @@
     for (size_t i = 0; i < sizeof(buffer); ++i) {
       buffer[i] = std::byte(counter++);
     }
-    EXPECT_EQ(memory_writer.Write(std::span(buffer)), Status::OK);
+    EXPECT_EQ(memory_writer.Write(std::span(buffer)), Status::Ok());
   }
 
   EXPECT_GT(memory_writer.ConservativeWriteLimit(), 0u);
   EXPECT_LT(memory_writer.ConservativeWriteLimit(), kTempBufferSize);
 
-  EXPECT_EQ(memory_writer.Write(std::span(buffer)), Status::RESOURCE_EXHAUSTED);
+  EXPECT_EQ(memory_writer.Write(std::span(buffer)),
+            Status::ResourceExhausted());
   EXPECT_EQ(memory_writer.bytes_written(), counter);
 
   counter = 0;
@@ -98,12 +99,12 @@
     size_t bytes_to_write =
         std::min(sizeof(buffer), memory_writer.ConservativeWriteLimit());
     EXPECT_EQ(memory_writer.Write(std::span(buffer, bytes_to_write)),
-              Status::OK);
+              Status::Ok());
   }
 
   EXPECT_EQ(memory_writer.ConservativeWriteLimit(), 0u);
 
-  EXPECT_EQ(memory_writer.Write(std::span(buffer)), Status::OUT_OF_RANGE);
+  EXPECT_EQ(memory_writer.Write(std::span(buffer)), Status::OutOfRange());
   EXPECT_EQ(memory_writer.bytes_written(), memory_buffer.size());
 
   for (const std::byte& value : memory_writer.WrittenData()) {
@@ -115,7 +116,7 @@
   std::byte buffer[5] = {};
 
   MemoryWriter memory_writer(memory_buffer);
-  EXPECT_EQ(memory_writer.Write(buffer, 0), Status::OK);
+  EXPECT_EQ(memory_writer.Write(buffer, 0), Status::Ok());
   EXPECT_EQ(memory_writer.bytes_written(), 0u);
 }
 
@@ -130,7 +131,8 @@
   EXPECT_EQ(memory_writer.data()[1], std::byte{0x7E});
 }
 
-#if CHECK_TEST_CRASHES
+#define TESTING_CHECK_FAILURES_IS_SUPPORTED 0
+#if TESTING_CHECK_FAILURES_IS_SUPPORTED
 
 // TODO(amontanez): Ensure that this test triggers an assert.
 TEST(MemoryWriter, NullPointer) {
@@ -151,7 +153,7 @@
   memory_reader.Read(nullptr, 21);
 }
 
-#endif  // CHECK_TEST_CRASHES
+#endif  // TESTING_CHECK_FAILURES_IS_SUPPORTED
 
 TEST(MemoryReader, SingleFullRead) {
   constexpr size_t kTempBufferSize = 32;
@@ -169,7 +171,7 @@
   // Read exactly the available bytes.
   EXPECT_EQ(memory_reader.ConservativeReadLimit(), dest.size());
   Result<ByteSpan> result = memory_reader.Read(dest);
-  EXPECT_EQ(result.status(), Status::OK);
+  EXPECT_EQ(result.status(), Status::Ok());
   EXPECT_EQ(result.value().size_bytes(), dest.size());
 
   ASSERT_EQ(source.size(), result.value().size_bytes());
@@ -180,7 +182,7 @@
   // Shoud be no byte remaining.
   EXPECT_EQ(memory_reader.ConservativeReadLimit(), 0u);
   result = memory_reader.Read(dest);
-  EXPECT_EQ(result.status(), Status::OUT_OF_RANGE);
+  EXPECT_EQ(result.status(), Status::OutOfRange());
 }
 
 TEST(MemoryReader, EmptySpanRead) {
@@ -195,7 +197,7 @@
 
   // Read exactly the available bytes.
   Result<ByteSpan> result = memory_reader.Read(dest);
-  EXPECT_EQ(result.status(), Status::OK);
+  EXPECT_EQ(result.status(), Status::Ok());
   EXPECT_EQ(result.value().size_bytes(), 0u);
   EXPECT_EQ(result.value().data(), dest.data());
 
@@ -218,7 +220,7 @@
   // Try and read double the bytes available. Use the pointer/size version of
   // the API.
   Result<ByteSpan> result = memory_reader.Read(dest.data(), dest.size());
-  EXPECT_EQ(result.status(), Status::OK);
+  EXPECT_EQ(result.status(), Status::Ok());
   EXPECT_EQ(result.value().size_bytes(), source.size());
 
   ASSERT_EQ(source.size(), result.value().size_bytes());
@@ -229,7 +231,7 @@
   // Shoud be no byte remaining.
   EXPECT_EQ(memory_reader.ConservativeReadLimit(), 0u);
   result = memory_reader.Read(dest);
-  EXPECT_EQ(result.status(), Status::OUT_OF_RANGE);
+  EXPECT_EQ(result.status(), Status::OutOfRange());
 }
 
 TEST(MemoryReader, MultipleReads) {
@@ -253,7 +255,7 @@
 
     // Try and read a chunk of bytes.
     Result<ByteSpan> result = memory_reader.Read(dest);
-    EXPECT_EQ(result.status(), Status::OK);
+    EXPECT_EQ(result.status(), Status::Ok());
     EXPECT_EQ(result.value().size_bytes(), dest.size());
     EXPECT_EQ(memory_reader.ConservativeReadLimit(),
               read_limit - result.value().size_bytes());
diff --git a/pw_stream/public/pw_stream/memory_stream.h b/pw_stream/public/pw_stream/memory_stream.h
index c8e0460..299fb3a 100644
--- a/pw_stream/public/pw_stream/memory_stream.h
+++ b/pw_stream/public/pw_stream/memory_stream.h
@@ -41,7 +41,7 @@
   // Implementation for writing data to this stream.
   //
   // If the in-memory buffer is exhausted in the middle of a write, this will
-  // perform a partial write and Status::RESOURCE_EXHAUSTED will be returned.
+  // perform a partial write and Status::ResourceExhausted() will be returned.
   Status DoWrite(ConstByteSpan data) override;
 
   ByteSpan dest_;
@@ -49,7 +49,7 @@
 };
 
 template <size_t size_bytes>
-class MemoryWriterBuffer : public MemoryWriter {
+class MemoryWriterBuffer final : public MemoryWriter {
  public:
   MemoryWriterBuffer() : MemoryWriter(buffer_) {}
 
@@ -57,7 +57,7 @@
   std::array<std::byte, size_bytes> buffer_;
 };
 
-class MemoryReader : public Reader {
+class MemoryReader final : public Reader {
  public:
   MemoryReader(ConstByteSpan source) : source_(source), bytes_read_(0) {}
 
diff --git a/pw_stream/public/pw_stream/null_stream.h b/pw_stream/public/pw_stream/null_stream.h
new file mode 100644
index 0000000..70048dc
--- /dev/null
+++ b/pw_stream/public/pw_stream/null_stream.h
@@ -0,0 +1,39 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <cstddef>
+#include <span>
+
+#include "pw_bytes/span.h"
+#include "pw_status/status.h"
+#include "pw_status/status_with_size.h"
+#include "pw_stream/stream.h"
+
+namespace pw::stream {
+
+// Stream writer which quietly drops all of the data, similar to /dev/null.
+class NullWriter final : public Writer {
+ public:
+  size_t ConservativeWriteLimit() const override {
+    // In theory this can sink as much as is addressable, however this way it is
+    // compliant with pw::StatusWithSize.
+    return StatusWithSize::max_size();
+  }
+
+ private:
+  Status DoWrite(ConstByteSpan data) override { return Status::Ok(); }
+};
+
+}  // namespace pw::stream
diff --git a/pw_stream/public/pw_stream/stream.h b/pw_stream/public/pw_stream/stream.h
index 66a5955..e52bb7c 100644
--- a/pw_stream/public/pw_stream/stream.h
+++ b/pw_stream/public/pw_stream/stream.h
@@ -17,7 +17,7 @@
 #include <cstddef>
 #include <span>
 
-#include "pw_assert/assert.h"
+#include "pw_assert/light.h"
 #include "pw_bytes/span.h"
 #include "pw_result/result.h"
 #include "pw_status/status.h"
@@ -61,7 +61,7 @@
   // OUT_OF_RANGE - Writer has been exhausted, similar to EOF. No data written,
   //     no more will be written.
   Status Write(ConstByteSpan data) {
-    PW_DCHECK(data.empty() || data.data() != nullptr);
+    PW_DASSERT(data.empty() || data.data() != nullptr);
     return DoWrite(data);
   }
   Status Write(const void* data, size_t size_bytes) {
@@ -73,7 +73,8 @@
   // written. This number is advisory and not guaranteed to write without a
   // RESOURCE_EXHAUSTED or OUT_OF_RANGE. As Writer processes/handles enqueued of
   // other contexts write data this number can go up or down for some Writers.
-  // Returns zero if, in the current state, Write() would not return Status::OK.
+  // Returns zero if, in the current state, Write() would not return
+  // Status::Ok().
   virtual size_t ConservativeWriteLimit() const = 0;
 
  private:
@@ -107,7 +108,7 @@
   // OUT_OF_RANGE - Reader has been exhausted, similar to EOF. No bytes read, no
   //     more will be read.
   Result<ByteSpan> Read(ByteSpan dest) {
-    PW_DCHECK(dest.empty() || dest.data() != nullptr);
+    PW_DASSERT(dest.empty() || dest.data() != nullptr);
     StatusWithSize result = DoRead(dest);
 
     if (result.ok()) {
@@ -125,7 +126,8 @@
   // requested bytes or without a RESOURCE_EXHAUSTED or OUT_OF_RANGE. As Reader
   // processes/handles/receives enqueued data or other contexts read data this
   // number can go up or down for some Readers.
-  // Returns zero if, in the current state, Read() would not return Status::OK.
+  // Returns zero if, in the current state, Read() would not return
+  // Status::Ok().
   virtual size_t ConservativeReadLimit() const = 0;
 
  private:
diff --git a/pw_string/BUILD.gn b/pw_string/BUILD.gn
index c657cee..400fa48 100644
--- a/pw_string/BUILD.gn
+++ b/pw_string/BUILD.gn
@@ -12,13 +12,13 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_bloat/bloat.gni")
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -37,7 +37,6 @@
     "string_builder.cc",
     "type_to_string.cc",
   ]
-  sources += public
   public_deps = [
     "$dir_pw_preprocessor",
     "$dir_pw_span",
diff --git a/pw_string/CMakeLists.txt b/pw_string/CMakeLists.txt
index 24c48aa..92a14a4 100644
--- a/pw_string/CMakeLists.txt
+++ b/pw_string/CMakeLists.txt
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_auto_add_simple_module(pw_string
   PUBLIC_DEPS
     pw_preprocessor
diff --git a/pw_string/docs.rst b/pw_string/docs.rst
index 37df397..8944100 100644
--- a/pw_string/docs.rst
+++ b/pw_string/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-string:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_string:
 
 ---------
 pw_string
diff --git a/pw_string/format.cc b/pw_string/format.cc
index c9ed379..c08bf3c 100644
--- a/pw_string/format.cc
+++ b/pw_string/format.cc
@@ -31,7 +31,7 @@
                             const char* format,
                             va_list args) {
   if (buffer.empty()) {
-    return StatusWithSize::RESOURCE_EXHAUSTED;
+    return StatusWithSize::ResourceExhausted();
   }
 
   const int result = std::vsnprintf(buffer.data(), buffer.size(), format, args);
@@ -40,12 +40,12 @@
   // Discard any output by terminating the buffer.
   if (result < 0) {
     buffer[0] = '\0';
-    return StatusWithSize::INVALID_ARGUMENT;
+    return StatusWithSize::InvalidArgument();
   }
 
   // If result >= buffer.size(), the output was truncated and null-terminated.
   if (static_cast<unsigned>(result) >= buffer.size()) {
-    return StatusWithSize(Status::RESOURCE_EXHAUSTED, buffer.size() - 1);
+    return StatusWithSize::ResourceExhausted(buffer.size() - 1);
   }
 
   return StatusWithSize(result);
diff --git a/pw_string/format_test.cc b/pw_string/format_test.cc
index dfaac7b..b7aab52 100644
--- a/pw_string/format_test.cc
+++ b/pw_string/format_test.cc
@@ -26,7 +26,7 @@
   char buffer[32];
   auto result = Format(buffer, "-_-");
 
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(3u, result.size());
   EXPECT_STREQ("-_-", buffer);
 }
@@ -35,7 +35,7 @@
   char buffer[32];
   auto result = Format(buffer, "%d4%s", 123, "5");
 
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(5u, result.size());
   EXPECT_STREQ("12345", buffer);
 }
@@ -43,7 +43,7 @@
 TEST(Format, EmptyBuffer_ReturnsResourceExhausted) {
   auto result = Format(std::span<char>(), "?");
 
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, result.status());
+  EXPECT_EQ(Status::ResourceExhausted(), result.status());
   EXPECT_EQ(0u, result.size());
 }
 
@@ -51,7 +51,7 @@
   char buffer[5];
   auto result = Format(buffer, "2big!");
 
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, result.status());
+  EXPECT_EQ(Status::ResourceExhausted(), result.status());
   EXPECT_EQ(4u, result.size());
   EXPECT_STREQ("2big", buffer);
 }
@@ -60,7 +60,7 @@
   char buffer[5];
   auto result = Format(buffer, "%s", "2big!");
 
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, result.status());
+  EXPECT_EQ(Status::ResourceExhausted(), result.status());
   EXPECT_EQ(4u, result.size());
   EXPECT_STREQ("2big", buffer);
 }
@@ -81,7 +81,7 @@
   char buffer[8];
   auto result = CallFormatWithVaList(buffer, "Yo%s", "?!");
 
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_EQ(4u, result.size());
   EXPECT_STREQ("Yo?!", buffer);
 }
diff --git a/pw_string/public/pw_string/format.h b/pw_string/public/pw_string/format.h
index 5c0adfe..f3aa337 100644
--- a/pw_string/public/pw_string/format.h
+++ b/pw_string/public/pw_string/format.h
@@ -35,9 +35,9 @@
 //
 // The status is
 //
-//   Status::OK if the operation succeeded,
-//   Status::RESOURCE_EXHAUSTED if the buffer was too small to fit the output,
-//   Status::INVALID_ARGUMENT if there was a formatting error.
+//   Status::Ok() if the operation succeeded,
+//   Status::ResourceExhausted() if the buffer was too small to fit the output,
+//   Status::InvalidArgument() if there was a formatting error.
 //
 PW_PRINTF_FORMAT(2, 3)
 StatusWithSize Format(std::span<char> buffer, const char* format, ...);
diff --git a/pw_string/public/pw_string/string_builder.h b/pw_string/public/pw_string/string_builder.h
index 96a88ef..425fbfb 100644
--- a/pw_string/public/pw_string/string_builder.h
+++ b/pw_string/public/pw_string/string_builder.h
@@ -89,6 +89,9 @@
   constexpr StringBuilder(std::span<char> buffer) : buffer_(buffer), size_(0) {
     NullTerminate();
   }
+  StringBuilder(std::span<std::byte> buffer)
+      : StringBuilder(
+            {reinterpret_cast<char*>(buffer.data()), buffer.size_bytes()}) {}
 
   // Disallow copy/assign to avoid confusion about where the string is actually
   // stored. StringBuffers may be copied into one another.
@@ -132,7 +135,7 @@
   // The status from the last operation. May be OK while status() is not OK.
   Status last_status() const { return last_status_; }
 
-  // True if status() is Status::OK.
+  // True if status() is Status::Ok().
   bool ok() const { return status_.ok(); }
 
   // True if the string is empty.
@@ -147,10 +150,10 @@
   // Clears the string and resets its error state.
   void clear();
 
-  // Sets the statuses to Status::OK;
+  // Sets the statuses to Status::Ok();
   void clear_status() {
-    status_ = Status::OK;
-    last_status_ = Status::OK;
+    status_ = Status::Ok();
+    last_status_ = Status::Ok();
   }
 
   // Appends a single character. Stets the status to RESOURCE_EXHAUSTED if the
diff --git a/pw_string/size_report/BUILD.gn b/pw_string/size_report/BUILD.gn
index 8307f8f..47d11c7 100644
--- a/pw_string/size_report/BUILD.gn
+++ b/pw_string/size_report/BUILD.gn
@@ -12,10 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
+
 pw_executable("single_write_snprintf") {
   sources = [ "format_single.cc" ]
   deps = [
diff --git a/pw_string/string_builder.cc b/pw_string/string_builder.cc
index b433e7f..1bc3b1f 100644
--- a/pw_string/string_builder.cc
+++ b/pw_string/string_builder.cc
@@ -24,8 +24,8 @@
 void StringBuilder::clear() {
   size_ = 0;
   NullTerminate();
-  status_ = Status::OK;
-  last_status_ = Status::OK;
+  status_ = Status::Ok();
+  last_status_ = Status::Ok();
 }
 
 StringBuilder& StringBuilder::append(size_t count, char ch) {
@@ -55,7 +55,7 @@
                                      size_t pos,
                                      size_t count) {
   if (pos > str.size()) {
-    SetErrorStatus(Status::OUT_OF_RANGE);
+    SetErrorStatus(Status::OutOfRange());
     return *this;
   }
 
@@ -68,9 +68,9 @@
   NullTerminate();
 
   if (buffer_.empty() || chars_to_append != copied) {
-    SetErrorStatus(Status::RESOURCE_EXHAUSTED);
+    SetErrorStatus(Status::ResourceExhausted());
   } else {
-    last_status_ = Status::OK;
+    last_status_ = Status::Ok();
   }
   return copied;
 }
@@ -79,9 +79,9 @@
   if (new_size <= size_) {
     size_ = new_size;
     NullTerminate();
-    last_status_ = Status::OK;
+    last_status_ = Status::Ok();
   } else {
-    SetErrorStatus(Status::OUT_OF_RANGE);
+    SetErrorStatus(Status::OutOfRange());
   }
 }
 
diff --git a/pw_string/string_builder_test.cc b/pw_string/string_builder_test.cc
index c36c748..cfef0e7 100644
--- a/pw_string/string_builder_test.cc
+++ b/pw_string/string_builder_test.cc
@@ -75,7 +75,7 @@
   StringBuilder sb(std::span(buffer, 0));
 
   sb << CustomType() << " is " << 12345;
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.status());
   EXPECT_EQ(kNoTouch, std::string_view(buffer, sizeof(buffer)));
 }
 
@@ -102,44 +102,44 @@
 
 TEST(StringBuilder, EmptyBuffer_AppendEmpty_ResourceExhausted) {
   StringBuilder sb(std::span<char>{});
-  EXPECT_EQ(Status::OK, sb.last_status());
-  EXPECT_EQ(Status::OK, sb.status());
+  EXPECT_EQ(Status::Ok(), sb.last_status());
+  EXPECT_EQ(Status::Ok(), sb.status());
 
   sb << "";
 
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.last_status());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.last_status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.status());
 }
 
 TEST(StringBuilder, Status_StartsOk) {
   StringBuffer<16> sb;
-  EXPECT_EQ(Status::OK, sb.status());
-  EXPECT_EQ(Status::OK, sb.last_status());
+  EXPECT_EQ(Status::Ok(), sb.status());
+  EXPECT_EQ(Status::Ok(), sb.last_status());
 }
 
 TEST(StringBuilder, Status_StatusAndLastStatusUpdate) {
   StringBuffer<16> sb;
   sb << "Well, if only there were enough room in here for this string";
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.status());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.last_status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.last_status());
 
   sb.resize(1029);
-  EXPECT_EQ(Status::OUT_OF_RANGE, sb.status());
-  EXPECT_EQ(Status::OUT_OF_RANGE, sb.last_status());
+  EXPECT_EQ(Status::OutOfRange(), sb.status());
+  EXPECT_EQ(Status::OutOfRange(), sb.last_status());
 
   sb << "";
-  EXPECT_EQ(Status::OUT_OF_RANGE, sb.status());
-  EXPECT_EQ(Status::OK, sb.last_status());
+  EXPECT_EQ(Status::OutOfRange(), sb.status());
+  EXPECT_EQ(Status::Ok(), sb.last_status());
 }
 
 TEST(StringBuilder, Status_ClearStatus_SetsStatuesToOk) {
   StringBuffer<2> sb = MakeString<2>("Won't fit!!!!!");
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.status());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.last_status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.last_status());
 
   sb.clear_status();
-  EXPECT_EQ(Status::OK, sb.status());
-  EXPECT_EQ(Status::OK, sb.last_status());
+  EXPECT_EQ(Status::Ok(), sb.status());
+  EXPECT_EQ(Status::Ok(), sb.last_status());
 }
 
 TEST(StringBuilder, StreamOutput_OutputSelf) {
@@ -153,7 +153,7 @@
 TEST(StringBuilder, PushBack) {
   StringBuffer<12> sb;
   sb.push_back('?');
-  EXPECT_EQ(Status::OK, sb.last_status());
+  EXPECT_EQ(Status::Ok(), sb.last_status());
   EXPECT_EQ(1u, sb.size());
   EXPECT_STREQ("?", sb.data());
 }
@@ -161,14 +161,14 @@
 TEST(StringBuilder, PushBack_Full) {
   StringBuffer<1> sb;
   sb.push_back('!');
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.last_status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.last_status());
   EXPECT_EQ(0u, sb.size());
 }
 
 TEST(StringBuilder, PopBack) {
   auto sb = MakeString<12>("Welcome!");
   sb.pop_back();
-  EXPECT_EQ(Status::OK, sb.last_status());
+  EXPECT_EQ(Status::Ok(), sb.last_status());
   EXPECT_EQ(7u, sb.size());
   EXPECT_STREQ("Welcome", sb.data());
 }
@@ -176,7 +176,7 @@
 TEST(StringBuilder, PopBack_Empty) {
   StringBuffer<12> sb;
   sb.pop_back();
-  EXPECT_EQ(Status::OUT_OF_RANGE, sb.last_status());
+  EXPECT_EQ(Status::OutOfRange(), sb.last_status());
   EXPECT_EQ(0u, sb.size());
 }
 
@@ -185,7 +185,7 @@
   std::memset(bad_string, '?', sizeof(bad_string));
 
   StringBuffer<6> sb;
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.append(bad_string).last_status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.append(bad_string).last_status());
   EXPECT_STREQ("?????", sb.data());
 }
 
@@ -199,7 +199,7 @@
 TEST(StringBuilder, Append_Chars_Full) {
   StringBuffer<8> sb;
 
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.append(8, '?').last_status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.append(8, '?').last_status());
   EXPECT_STREQ("???????", sb.data());
 }
 
@@ -219,8 +219,8 @@
 
 TEST(StringBuilder, Append_CString_Full) {
   auto sb = MakeString<6>("hello");
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.append("890123", 1).last_status());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.append("890123", 1).last_status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.status());
   EXPECT_EQ(sb.max_size(), sb.size());
   EXPECT_STREQ("hello", sb.data());
 }
@@ -263,20 +263,21 @@
   auto sb = MakeString<12>("Four");
   EXPECT_EQ(4u, sb.size());
   sb.resize(10);
-  EXPECT_EQ(sb.status(), Status::OUT_OF_RANGE);
+  EXPECT_EQ(sb.status(), Status::OutOfRange());
   EXPECT_EQ(4u, sb.size());
 }
 
 TEST(StringBuilder, Resize_LargerThanCapacity_Fails) {
   auto sb = MakeString<12>("Four");
   sb.resize(1234);
-  EXPECT_EQ(sb.status(), Status::OUT_OF_RANGE);
+  EXPECT_EQ(sb.status(), Status::OutOfRange());
   EXPECT_EQ(4u, sb.size());
   EXPECT_STREQ("Four", sb.data());
 }
 
 TEST(StringBuilder, Format_Normal) {
-  StringBuffer<64> sb;
+  std::byte buffer[64];
+  StringBuilder sb(buffer);
   EXPECT_TRUE(sb.Format("0x%x", 0xabc).ok());
   EXPECT_STREQ("0xabc", sb.data());
 
@@ -288,10 +289,10 @@
 
 TEST(StringBuilder, Format_ExhaustBuffer) {
   StringBuffer<6> sb;
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.Format("012345").status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.Format("012345").status());
 
   EXPECT_STREQ("01234", sb.data());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.status());
 }
 
 TEST(StringBuilder, StreamOutput_MultipleTypes) {
@@ -315,7 +316,7 @@
 
   sb << true << "Now it's way " << static_cast<unsigned char>(2) << " long";
   EXPECT_FALSE(sb.ok());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.status());
   EXPECT_STREQ("0true", sb.data());
 }
 
@@ -341,7 +342,7 @@
   EXPECT_EQ(2u, sb.size());
   sb << "234";
   EXPECT_STREQ("012", sb.data());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.status());
   EXPECT_EQ(3u, sb.size());
 }
 
@@ -352,7 +353,7 @@
   StringBuffer<6> sb;
   sb << "hey" << bad_string;
 
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.status());
   EXPECT_STREQ("hey??", sb.data());
 }
 
@@ -361,7 +362,7 @@
   constexpr std::string_view hello("hello");
 
   buffer << hello;
-  EXPECT_EQ(Status::OK, buffer.status());
+  EXPECT_EQ(Status::Ok(), buffer.status());
   EXPECT_STREQ("hello", buffer.data());
 }
 
@@ -389,13 +390,13 @@
 
   two << "0123456789";
   ASSERT_STREQ("What heck", two.data());
-  ASSERT_EQ(Status::RESOURCE_EXHAUSTED, two.status());
-  ASSERT_EQ(Status::RESOURCE_EXHAUSTED, two.last_status());
+  ASSERT_EQ(Status::ResourceExhausted(), two.status());
+  ASSERT_EQ(Status::ResourceExhausted(), two.last_status());
 
   one = two;
   EXPECT_STREQ("What heck", one.data());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, one.status());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, one.last_status());
+  EXPECT_EQ(Status::ResourceExhausted(), one.status());
+  EXPECT_EQ(Status::ResourceExhausted(), one.last_status());
 
   StringBuffer<12> three;
   three = two;
@@ -420,8 +421,8 @@
   two << "0123456789";
   two << "";
   ASSERT_STREQ("What heck", two.data());
-  ASSERT_EQ(Status::RESOURCE_EXHAUSTED, two.status());
-  ASSERT_EQ(Status::OK, two.last_status());
+  ASSERT_EQ(Status::ResourceExhausted(), two.status());
+  ASSERT_EQ(Status::Ok(), two.last_status());
 }
 
 TEST(StringBuffer, CopyConstructFromSmaller) {
@@ -429,7 +430,7 @@
   StringBuffer<12> two(one);
 
   EXPECT_STREQ("You are t", two.data());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, two.status());
+  EXPECT_EQ(Status::ResourceExhausted(), two.status());
 }
 
 TEST(StringBuilder, Object) {
@@ -513,7 +514,7 @@
 TEST(MakeString, LargerThanDefaultSize_Truncates) {
   auto sb = MakeString("1844674407", 3709551615, 123456);
 
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, sb.status());
+  EXPECT_EQ(Status::ResourceExhausted(), sb.status());
   EXPECT_STREQ(kLongestString, sb.data());
 }
 
diff --git a/pw_string/to_string_test.cc b/pw_string/to_string_test.cc
index 6f4a4da..2f3608d 100644
--- a/pw_string/to_string_test.cc
+++ b/pw_string/to_string_test.cc
@@ -43,13 +43,13 @@
   int result =
       std::snprintf(buffer.data(), buffer.size(), CustomType::kToString);
   if (result < 0) {
-    return StatusWithSize::UNKNOWN;
+    return StatusWithSize::Unknown();
   }
   if (static_cast<size_t>(result) < buffer.size()) {
     return StatusWithSize(result);
   }
-  return StatusWithSize(Status::RESOURCE_EXHAUSTED,
-                        buffer.empty() ? 0u : buffer.size() - 1);
+  return StatusWithSize::ResourceExhausted(buffer.empty() ? 0u
+                                                          : buffer.size() - 1);
 }
 
 namespace {
@@ -101,14 +101,14 @@
 
   auto result = ToString(MyEnum::kLuckyNumber, buffer);
   EXPECT_EQ(1u, result.size());
-  EXPECT_EQ(Status::OK, result.status());
+  EXPECT_EQ(Status::Ok(), result.status());
   EXPECT_STREQ("8", buffer);
 }
 
 TEST(ToString, Integer_EmptyBuffer_WritesNothing) {
   auto result = ToString(-1234, std::span(buffer, 0));
   EXPECT_EQ(0u, result.size());
-  EXPECT_EQ(Status::RESOURCE_EXHAUSTED, result.status());
+  EXPECT_EQ(Status::ResourceExhausted(), result.status());
 }
 
 TEST(ToString, Integer_BufferTooSmall_WritesNullTerminator) {
@@ -218,7 +218,7 @@
 
 TEST(ToString, StatusCode) {
   EXPECT_EQ(sizeof("UNAVAILABLE") - 1,
-            ToString(Status::UNAVAILABLE, buffer).size());
+            ToString(Status::Unavailable(), buffer).size());
   EXPECT_STREQ("UNAVAILABLE", buffer);
 }
 
diff --git a/pw_string/type_to_string.cc b/pw_string/type_to_string.cc
index ce417e1..483d10e 100644
--- a/pw_string/type_to_string.cc
+++ b/pw_string/type_to_string.cc
@@ -51,7 +51,7 @@
   if (!buffer.empty()) {
     buffer[0] = '\0';
   }
-  return StatusWithSize::RESOURCE_EXHAUSTED;
+  return StatusWithSize::ResourceExhausted();
 }
 
 }  // namespace
@@ -183,14 +183,15 @@
 StatusWithSize CopyString(const std::string_view& value,
                           std::span<char> buffer) {
   if (buffer.empty()) {
-    return StatusWithSize::RESOURCE_EXHAUSTED;
+    return StatusWithSize::ResourceExhausted();
   }
 
   const size_t copied = value.copy(buffer.data(), buffer.size() - 1);
   buffer[copied] = '\0';
 
   return StatusWithSize(
-      copied == value.size() ? Status::OK : Status::RESOURCE_EXHAUSTED, copied);
+      copied == value.size() ? Status::Ok() : Status::ResourceExhausted(),
+      copied);
 }
 
 StatusWithSize CopyEntireString(const std::string_view& value,
diff --git a/pw_sys_io/BUILD.gn b/pw_sys_io/BUILD.gn
index ce2b30f..bdff7eb 100644
--- a/pw_sys_io/BUILD.gn
+++ b/pw_sys_io/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/facade.gni")
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
+
 declare_args() {
   # Backend for the pw_sys_io module.
   pw_sys_io_BACKEND = ""
diff --git a/pw_sys_io/CMakeLists.txt b/pw_sys_io/CMakeLists.txt
index 7ca3860..ee69592 100644
--- a/pw_sys_io/CMakeLists.txt
+++ b/pw_sys_io/CMakeLists.txt
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_add_facade(pw_sys_io
   SOURCES
     sys_io.cc
diff --git a/pw_sys_io/docs.rst b/pw_sys_io/docs.rst
index c87e0ac..cde6a3c 100644
--- a/pw_sys_io/docs.rst
+++ b/pw_sys_io/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-sys-io:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_sys_io:
 
 ---------
 pw_sys_io
diff --git a/pw_sys_io/public/pw_sys_io/sys_io.h b/pw_sys_io/public/pw_sys_io/sys_io.h
index 5f545b3..477c719 100644
--- a/pw_sys_io/public/pw_sys_io/sys_io.h
+++ b/pw_sys_io/public/pw_sys_io/sys_io.h
@@ -20,7 +20,7 @@
 //
 // This facade doesn't dictate any policies on input and output data encoding,
 // format, or transmission protocol. It only requires that backends return a
-// Status::OK if the operation succeeds. Backends may provide useful error
+// Status::Ok() if the operation succeeds. Backends may provide useful error
 // Status types, but depending on the implementation-specific Status values is
 // NOT recommended. Since this facade provides a very vague I/O interface, it
 // does NOT provide tests. Backends are expected to provide their own testing to
@@ -51,16 +51,25 @@
 // This function will block until it either succeeds or fails to read a byte
 // from the pw_sys_io backend.
 //
-// Returns Status::OK if a byte was successfully read.
+// Returns Status::Ok() - A byte was successfully read.
+//         Status::ResourceExhausted() - if the underlying source vanished.
 Status ReadByte(std::byte* dest);
 
+// Read a single byte from the sys io backend, if available.
+// Implemented by: Backend
+//
+// Returns Status::Ok() - A byte was successfully read, and is in dest.
+//         Status::Unavailable() - No byte is available to read; try later.
+//         Status::Unimplemented() - Not supported on this target.
+Status TryReadByte(std::byte* dest);
+
 // Write a single byte out the sys io backend.
 // Implemented by: Backend
 //
 // This function will block until it either succeeds or fails to write a byte
 // out the pw_sys_io backend.
 //
-// Returns Status::OK if a byte was successfully read.
+// Returns Status::Ok() if a byte was successfully read.
 Status WriteByte(std::byte b);
 
 // Write a string out the sys io backend.
@@ -70,7 +79,7 @@
 // backend, adding any platform-specific newline character(s) (these are
 // accounted for in the returned StatusWithSize).
 //
-// Return status is Status::OK if all the bytes from the source string were
+// Return status is Status::Ok() if all the bytes from the source string were
 // successfully written. In all cases, the number of bytes successfully written
 // are returned as part of the StatusWithSize.
 StatusWithSize WriteLine(const std::string_view& s);
@@ -84,9 +93,9 @@
 // undefined. This function blocks until either an error occurs, or all bytes
 // are successfully read from the backend's ReadByte() implementation.
 //
-// Return status is Status::OK if the destination span was successfully filled.
-// In all cases, the number of bytes successuflly read to the destination span
-// are returned as part of the StatusWithSize.
+// Return status is Status::Ok() if the destination span was successfully
+// filled. In all cases, the number of bytes successuflly read to the
+// destination span are returned as part of the StatusWithSize.
 StatusWithSize ReadBytes(std::span<std::byte> dest);
 
 // Write std::span of bytes out the sys io backend using WriteByte().
@@ -98,7 +107,7 @@
 // either an error occurs, or all bytes are successfully read from the backend's
 // WriteByte() implementation.
 //
-// Return status is Status::OK if all the bytes from the source span were
+// Return status is Status::Ok() if all the bytes from the source span were
 // successfully written. In all cases, the number of bytes successfully written
 // are returned as part of the StatusWithSize.
 StatusWithSize WriteBytes(std::span<const std::byte> src);
diff --git a/pw_sys_io_arduino/BUILD b/pw_sys_io_arduino/BUILD
new file mode 100644
index 0000000..3735a73
--- /dev/null
+++ b/pw_sys_io_arduino/BUILD
@@ -0,0 +1,33 @@
+# Copyright 2019 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_library",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])  # Apache License 2.0
+
+pw_cc_library(
+    name = "pw_sys_io_arduino",
+    srcs = ["sys_io_arduino.cc"],
+    hdrs = ["public/pw_sys_io_arduino/init.h"],
+    deps = [
+        "//pw_boot_armv7m",
+        "//pw_preprocessor",
+        "//pw_sys_io",
+    ]
+)
diff --git a/pw_sys_io_arduino/BUILD.gn b/pw_sys_io_arduino/BUILD.gn
new file mode 100644
index 0000000..ee9b770
--- /dev/null
+++ b/pw_sys_io_arduino/BUILD.gn
@@ -0,0 +1,42 @@
+# Copyright 2019 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_arduino_build/arduino.gni")
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
+
+config("default_config") {
+  include_dirs = [ "public" ]
+}
+
+if (dir_pw_third_party_arduino != "") {
+  pw_source_set("pw_sys_io_arduino") {
+    remove_configs = [ "$dir_pw_build:strict_warnings" ]
+    public_configs = [ ":default_config" ]
+    public = [ "public/pw_sys_io_arduino/init.h" ]
+    public_deps = [ "$dir_pw_preprocessor" ]
+    deps = [
+      "$dir_pw_sys_io:default_putget_bytes",
+      "$dir_pw_sys_io:facade",
+      "$dir_pw_third_party_arduino:arduino_core_sources",
+    ]
+    sources = [ "sys_io_arduino.cc" ]
+  }
+}
+
+pw_doc_group("docs") {
+  sources = [ "docs.rst" ]
+}
diff --git a/pw_sys_io_arduino/docs.rst b/pw_sys_io_arduino/docs.rst
new file mode 100644
index 0000000..79c533d
--- /dev/null
+++ b/pw_sys_io_arduino/docs.rst
@@ -0,0 +1,27 @@
+.. _module-pw_sys_io_arduino:
+
+-----------------
+pw_sys_io_arduino
+-----------------
+
+``pw_sys_io_arduino`` implements the ``pw_sys_io`` facade over
+`Arduino's Serial interface <https://www.arduino.cc/reference/en/language/functions/communication/serial/>`_.
+
+On initialization it runs Arduino's first ``Serial`` interface at a 115200 baud
+rate:
+
+.. code-block:: cpp
+
+  Serial.begin(115200);
+
+  // Wait for serial port to be available
+  while (!Serial) {
+  }
+
+After ``Serial.begin(115200)`` it will busy wait until a host connects to the
+serial port.
+
+.. seealso::
+   - :ref:`target-arduino` target documentation for a list of working hardware.
+   - :ref:`module-pw_arduino_build` for caveats when running Pigweed on top of
+     the Arduino API.
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_sys_io_arduino/public/pw_sys_io_arduino/init.h
similarity index 77%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to pw_sys_io_arduino/public/pw_sys_io_arduino/init.h
index 1670b7d..d4262c5 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_sys_io_arduino/public/pw_sys_io_arduino/init.h
@@ -11,7 +11,13 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+#pragma once
 
-#include "pw_boot_armv7m/boot.h"
+#include "pw_preprocessor/util.h"
 
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+PW_EXTERN_C_START
+
+// The actual implement of PreMainInit() in sys_io_BACKEND.
+void pw_sys_io_Init();
+
+PW_EXTERN_C_END
diff --git a/pw_sys_io_arduino/sys_io_arduino.cc b/pw_sys_io_arduino/sys_io_arduino.cc
new file mode 100644
index 0000000..de6c344
--- /dev/null
+++ b/pw_sys_io_arduino/sys_io_arduino.cc
@@ -0,0 +1,81 @@
+// Copyright 2019 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <Arduino.h>
+
+#include <cinttypes>
+#include <cstdint>
+
+#include "pw_preprocessor/compiler.h"
+#include "pw_sys_io/sys_io.h"
+
+extern "C" void pw_sys_io_Init() {
+  Serial.begin(115200);
+  // Wait for serial port to be available
+  while (!Serial) {
+  }
+}
+
+namespace pw::sys_io {
+
+// Wait for a byte to read on USART1. This blocks until a byte is read. This is
+// extremely inefficient as it requires the target to burn CPU cycles polling to
+// see if a byte is ready yet.
+
+Status ReadByte(std::byte* dest) {
+  while (true) {
+    if (TryReadByte(dest).ok()) {
+      return Status::Ok();
+    }
+  }
+}
+
+Status TryReadByte(std::byte* dest) {
+  if (!Serial.available()) {
+    return Status::Unavailable();
+  }
+  *dest = static_cast<std::byte>(Serial.read());
+  return Status::Ok();
+}
+
+// Send a byte over USART1. Since this blocks on every byte, it's rather
+// inefficient. At the default baud rate of 115200, one byte blocks the CPU for
+// ~87 micro seconds. This means it takes only 10 bytes to block the CPU for
+// 1ms!
+Status WriteByte(std::byte b) {
+  // Wait for TX buffer to be empty. When the buffer is empty, we can write
+  // a value to be dumped out of UART.
+  while (Serial.availableForWrite() < 1) {
+  }
+  Serial.write((uint8_t)b);
+  return Status::Ok();
+}
+
+// Writes a string using pw::sys_io, and add newline characters at the end.
+StatusWithSize WriteLine(const std::string_view& s) {
+  size_t chars_written = 0;
+  StatusWithSize result = WriteBytes(std::as_bytes(std::span(s)));
+  if (!result.ok()) {
+    return result;
+  }
+  chars_written += result.size();
+
+  // Write trailing newline.
+  result = WriteBytes(std::as_bytes(std::span("\r\n", 2)));
+  chars_written += result.size();
+
+  return StatusWithSize(result.status(), chars_written);
+}
+
+}  // namespace pw::sys_io
diff --git a/pw_sys_io_baremetal_lm3s6965evb/BUILD b/pw_sys_io_baremetal_lm3s6965evb/BUILD
index 78d4b76..85fe01a 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/BUILD
+++ b/pw_sys_io_baremetal_lm3s6965evb/BUILD
@@ -12,14 +12,22 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+load(
+    "//pw_build:pigweed.bzl",
+		"pw_cc_library",
+)
+
 package(default_visibility = ["//visibility:public"])
 
 licenses(["notice"])  # Apache License 2.0
 
-filegroup(
+pw_cc_library(
     name = "pw_sys_io_baremetal_lm3s6965evb",
-    srcs = [
-        "early_boot.c",
-        "sys_io_baremetal.cc",
-    ],
+    hdrs = ["public/pw_sys_io_baremetal_lm3s6965evb/init.h"],
+    srcs = ["sys_io_baremetal.cc"],
+		deps = [
+        "//pw_preprocessor",
+				"//pw_sys_io",
+		],
 )
+
diff --git a/pw_sys_io_baremetal_lm3s6965evb/BUILD.gn b/pw_sys_io_baremetal_lm3s6965evb/BUILD.gn
index b6cec70..5ff5e86 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/BUILD.gn
+++ b/pw_sys_io_baremetal_lm3s6965evb/BUILD.gn
@@ -12,20 +12,25 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
+
+config("default_config") {
+  include_dirs = [ "public" ]
+}
+
 pw_source_set("pw_sys_io_baremetal_lm3s6965evb") {
-  public_deps = [ "$dir_pw_boot_armv7m" ]
-  deps = [
+  public_configs = [ ":default_config" ]
+  public = [ "public/pw_sys_io_baremetal_lm3s6965evb/init.h" ]
+  public_deps = [
+    "$dir_pw_boot_armv7m",
     "$dir_pw_preprocessor",
+  ]
+  deps = [
     "$dir_pw_sys_io:default_putget_bytes",
     "$dir_pw_sys_io:facade",
   ]
-  sources = [
-    "early_boot.c",
-    "sys_io_baremetal.cc",
-  ]
+  sources = [ "sys_io_baremetal.cc" ]
 }
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/pw_sys_io_baremetal_lm3s6965evb/public/pw_sys_io_baremetal_lm3s6965evb/init.h
similarity index 77%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to pw_sys_io_baremetal_lm3s6965evb/public/pw_sys_io_baremetal_lm3s6965evb/init.h
index 1670b7d..d4262c5 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/pw_sys_io_baremetal_lm3s6965evb/public/pw_sys_io_baremetal_lm3s6965evb/init.h
@@ -11,7 +11,13 @@
 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 // License for the specific language governing permissions and limitations under
 // the License.
+#pragma once
 
-#include "pw_boot_armv7m/boot.h"
+#include "pw_preprocessor/util.h"
 
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+PW_EXTERN_C_START
+
+// The actual implement of PreMainInit() in sys_io_BACKEND.
+void pw_sys_io_Init();
+
+PW_EXTERN_C_END
diff --git a/pw_sys_io_baremetal_lm3s6965evb/sys_io_baremetal.cc b/pw_sys_io_baremetal_lm3s6965evb/sys_io_baremetal.cc
index 9794510..86cc341 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/sys_io_baremetal.cc
+++ b/pw_sys_io_baremetal_lm3s6965evb/sys_io_baremetal.cc
@@ -14,7 +14,6 @@
 
 #include <cinttypes>
 
-#include "pw_boot_armv7m/boot.h"
 #include "pw_preprocessor/compiler.h"
 #include "pw_sys_io/sys_io.h"
 
@@ -62,12 +61,6 @@
 constexpr uint32_t kRcgcUart0EnableMask = 0x1;
 volatile uint32_t& rcgc1 = *reinterpret_cast<volatile uint32_t*>(0x400FE104U);
 
-constexpr uint32_t kRccDefault = 0x078E3AD1U;
-volatile uint32_t& rcc = *reinterpret_cast<volatile uint32_t*>(0x400FE070U);
-
-constexpr uint32_t kRcc2Default = 0x07802810U;
-volatile uint32_t& rcc2 = *reinterpret_cast<volatile uint32_t*>(0x400FE070U);
-
 // Calculate a baud rate multiplier such that we have 16 bits of precision for
 // the integer portion and 6 bits for the fractional portion.
 void SetBaudRate(uint32_t clock, uint32_t target_baud) {
@@ -77,53 +70,9 @@
   uart0.fractional_baud = (((remainder << 7) / divisor + 1) >> 1) & 0x3f;
 }
 
-// Default handler to insert into the ARMv7-M vector table (below).
-// This function exists for convenience. If a device isn't doing what you
-// expect, it might have hit a fault and ended up here.
-void DefaultFaultHandler(void) {
-  while (true) {
-    // Wait for debugger to attach.
-  }
-}
-
-// This is the device's interrupt vector table. It's not referenced in any code
-// because the platform expects this table to be present at the beginning of
-// flash. The exact address is specified in the pw_boot_armv7m configuration as
-// part of the target config.
-//
-// For more information, see ARMv7-M Architecture Reference Manual DDI 0403E.b
-// section B1.5.3.
-
-// This typedef is for convenience when building the vector table. With the
-// exception of SP_main (0th entry in the vector table), all the entries of the
-// vector table are function pointers.
-typedef void (*InterruptHandler)();
-
-PW_KEEP_IN_SECTION(".vector_table")
-const InterruptHandler vector_table[] = {
-    // The starting location of the stack pointer.
-    // This address is NOT an interrupt handler/function pointer, it is simply
-    // the address that the main stack pointer should be initialized to. The
-    // value is reinterpret casted because it needs to be in the vector table.
-    [0] = reinterpret_cast<InterruptHandler>(&pw_stack_high_addr),
-
-    // Reset handler, dictates how to handle reset interrupt. This is the
-    // address that the Program Counter (PC) is initialized to at boot.
-    [1] = pw_BootEntry,
-
-    // NMI handler.
-    [2] = DefaultFaultHandler,
-    // HardFault handler.
-    [3] = DefaultFaultHandler,
-};
-
 }  // namespace
 
-extern "C" void pw_PreMainInit() {
-  // Force RCC to be at default at boot.
-  rcc = kRccDefault;
-  rcc2 = kRcc2Default;
-
+extern "C" void pw_sys_io_Init() {
   rcgc1 |= kRcgcUart0EnableMask;
   for (volatile int i = 0; i < 3; ++i) {
     // We must wait after enabling uart.
@@ -141,16 +90,22 @@
 // see if a byte is ready yet.
 Status ReadByte(std::byte* dest) {
   while (true) {
-    if (uart0.receive_error) {
-      // Writing anything to this register clears all errors.
-      uart0.receive_error = 0xff;
-    }
-    if (uart0.status_flags & kRxFifoFullMask) {
-      *dest = static_cast<std::byte>(uart0.data_register);
-      break;
+    if (TryReadByte(dest).ok()) {
+      return Status::Ok();
     }
   }
-  return Status::OK;
+}
+
+Status TryReadByte(std::byte* dest) {
+  if (uart0.receive_error) {
+    // Writing anything to this register clears all errors.
+    uart0.receive_error = 0xff;
+  }
+  if (!(uart0.status_flags & kRxFifoFullMask)) {
+    return Status::Unavailable();
+  }
+  *dest = static_cast<std::byte>(uart0.data_register);
+  return Status::Ok();
 }
 
 // Send a byte over UART0. Since this blocks on every byte, it's rather
@@ -163,7 +118,7 @@
   while (!(uart0.status_flags & kTxFifoEmptyMask)) {
   }
   uart0.data_register = static_cast<uint32_t>(b);
-  return Status::OK;
+  return Status::Ok();
 }
 
 // Writes a string using pw::sys_io, and add newline characters at the end.
diff --git a/pw_sys_io_baremetal_stm32f429/BUILD.gn b/pw_sys_io_baremetal_stm32f429/BUILD.gn
index 9df8bbf..c611ccb 100644
--- a/pw_sys_io_baremetal_stm32f429/BUILD.gn
+++ b/pw_sys_io_baremetal_stm32f429/BUILD.gn
@@ -12,11 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
@@ -24,9 +24,11 @@
 pw_source_set("pw_sys_io_baremetal_stm32f429") {
   public_configs = [ ":default_config" ]
   public = [ "public/pw_sys_io_baremetal_stm32f429/init.h" ]
-  public_deps = [ "$dir_pw_boot_armv7m" ]
-  deps = [
+  public_deps = [
+    "$dir_pw_boot_armv7m",
     "$dir_pw_preprocessor",
+  ]
+  deps = [
     "$dir_pw_sys_io:default_putget_bytes",
     "$dir_pw_sys_io:facade",
   ]
diff --git a/pw_sys_io_baremetal_stm32f429/docs.rst b/pw_sys_io_baremetal_stm32f429/docs.rst
index 44c68e3..1f1fe08 100644
--- a/pw_sys_io_baremetal_stm32f429/docs.rst
+++ b/pw_sys_io_baremetal_stm32f429/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-sys-io-baremetal-stm32f429:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_sys_io_baremetal_stm32f429:
 
 -----------------------------
 pw_sys_io_baremetal_stm32f429
diff --git a/pw_sys_io_baremetal_stm32f429/public/pw_sys_io_baremetal_stm32f429/init.h b/pw_sys_io_baremetal_stm32f429/public/pw_sys_io_baremetal_stm32f429/init.h
index 352a73f..d4262c5 100644
--- a/pw_sys_io_baremetal_stm32f429/public/pw_sys_io_baremetal_stm32f429/init.h
+++ b/pw_sys_io_baremetal_stm32f429/public/pw_sys_io_baremetal_stm32f429/init.h
@@ -13,5 +13,11 @@
 // the License.
 #pragma once
 
+#include "pw_preprocessor/util.h"
+
+PW_EXTERN_C_START
+
 // The actual implement of PreMainInit() in sys_io_BACKEND.
 void pw_sys_io_Init();
+
+PW_EXTERN_C_END
diff --git a/pw_sys_io_baremetal_stm32f429/sys_io_baremetal.cc b/pw_sys_io_baremetal_stm32f429/sys_io_baremetal.cc
index 7865693..846e34c 100644
--- a/pw_sys_io_baremetal_stm32f429/sys_io_baremetal.cc
+++ b/pw_sys_io_baremetal_stm32f429/sys_io_baremetal.cc
@@ -14,7 +14,6 @@
 
 #include <cinttypes>
 
-#include "pw_boot_armv7m/boot.h"
 #include "pw_preprocessor/compiler.h"
 #include "pw_sys_io/sys_io.h"
 
@@ -171,11 +170,21 @@
 // see if a byte is ready yet.
 Status ReadByte(std::byte* dest) {
   while (true) {
-    if (usart1.status & kReadDataReady) {
-      *dest = static_cast<std::byte>(usart1.data_register);
+    if (TryReadByte(dest).ok()) {
+      return Status::Ok();
     }
   }
-  return Status::OK;
+}
+
+// Wait for a byte to read on USART1. This blocks until a byte is read. This is
+// extremely inefficient as it requires the target to burn CPU cycles polling to
+// see if a byte is ready yet.
+Status TryReadByte(std::byte* dest) {
+  if (!(usart1.status & kReadDataReady)) {
+    return Status::Unavailable();
+  }
+  *dest = static_cast<std::byte>(usart1.data_register);
+  return Status::Ok();
 }
 
 // Send a byte over USART1. Since this blocks on every byte, it's rather
@@ -188,7 +197,7 @@
   while (!(usart1.status & kTxRegisterEmpty)) {
   }
   usart1.data_register = static_cast<uint32_t>(b);
-  return Status::OK;
+  return Status::Ok();
 }
 
 // Writes a string using pw::sys_io, and add newline characters at the end.
diff --git a/pw_sys_io_stdio/BUILD.gn b/pw_sys_io_stdio/BUILD.gn
index d01b93e..2a7c578 100644
--- a/pw_sys_io_stdio/BUILD.gn
+++ b/pw_sys_io_stdio/BUILD.gn
@@ -12,11 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
+
 pw_source_set("pw_sys_io_stdio") {
   deps = [
     "$dir_pw_sys_io:default_putget_bytes",
diff --git a/pw_sys_io_stdio/CMakeLists.txt b/pw_sys_io_stdio/CMakeLists.txt
index 5b76f64..1493cf7 100644
--- a/pw_sys_io_stdio/CMakeLists.txt
+++ b/pw_sys_io_stdio/CMakeLists.txt
@@ -12,11 +12,9 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_auto_add_simple_module(pw_sys_io_stdio
   IMPLEMENTS_FACADE
     pw_sys_io
 )
-
-# TODO(hepler): Declare pw_sys_io_stdio as the pw_sys_io backend for now.
-add_library(pw_sys_io.backend INTERFACE)
-target_link_libraries(pw_sys_io.backend INTERFACE pw_sys_io_stdio)
diff --git a/pw_sys_io_stdio/docs.rst b/pw_sys_io_stdio/docs.rst
index 31a9357..7280656 100644
--- a/pw_sys_io_stdio/docs.rst
+++ b/pw_sys_io_stdio/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-sys-io-stdio:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_sys_io_stdio:
 
 ---------------
 pw_sys_io_stdio
diff --git a/pw_sys_io_stdio/sys_io.cc b/pw_sys_io_stdio/sys_io.cc
index 65d1a89..53c9d5e 100644
--- a/pw_sys_io_stdio/sys_io.cc
+++ b/pw_sys_io_stdio/sys_io.cc
@@ -20,22 +20,27 @@
 
 Status ReadByte(std::byte* dest) {
   if (dest == nullptr) {
-    return Status::FAILED_PRECONDITION;
+    return Status::FailedPrecondition();
   }
 
   int value = std::getchar();
   if (value == EOF) {
-    return Status::RESOURCE_EXHAUSTED;
+    return Status::ResourceExhausted();
   }
   *dest = static_cast<std::byte>(value);
-  return Status::OK;
+  return Status::Ok();
+}
+
+Status TryReadByte(std::byte*) {
+  // TryReadByte() is not (yet) supported on hosts.
+  return Status::Unimplemented();
 }
 
 Status WriteByte(std::byte b) {
   if (std::putchar(static_cast<char>(b)) == EOF) {
-    return Status::INTERNAL;
+    return Status::Internal();
   }
-  return Status::OK;
+  return Status::Ok();
 }
 
 StatusWithSize WriteLine(const std::string_view& s) {
diff --git a/pw_target_runner/BUILD.gn b/pw_target_runner/BUILD.gn
index 2aa53af..6e67cbc 100644
--- a/pw_target_runner/BUILD.gn
+++ b/pw_target_runner/BUILD.gn
@@ -12,11 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_protobuf_compiler/proto.gni")
+
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
   group_deps = [ "go:docs" ]
diff --git a/pw_target_runner/docs.rst b/pw_target_runner/docs.rst
index 2b8b8ca..bca7084 100644
--- a/pw_target_runner/docs.rst
+++ b/pw_target_runner/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-target-runner:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_target_runner:
 
 ----------------
 pw_target_runner
diff --git a/pw_target_runner/go/BUILD.gn b/pw_target_runner/go/BUILD.gn
index 4b071f1..31be50e 100644
--- a/pw_target_runner/go/BUILD.gn
+++ b/pw_target_runner/go/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/go.gni")
 import("$dir_pw_build/host_tool.gni")
 import("$dir_pw_docgen/docs.gni")
+
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
 }
diff --git a/pw_target_runner/go/docs.rst b/pw_target_runner/go/docs.rst
index 3c1abb7..6303237 100644
--- a/pw_target_runner/go/docs.rst
+++ b/pw_target_runner/go/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-test-server:
-
-.. default-domain:: go
-
-.. highlight:: go
+.. _module-pw_target_runner-go:
 
 --
 Go
@@ -21,7 +17,7 @@
 The code below implements a very basic test server with two test workers which
 print out the path of the tests they are scheduled to run.
 
-.. code::
+.. code-block:: go
 
   package main
 
diff --git a/pw_target_runner/go/src/pigweed.dev/pw_target_runner/BUILD.gn b/pw_target_runner/go/src/pigweed.dev/pw_target_runner/BUILD.gn
index 31619cf..8fff0e2 100644
--- a/pw_target_runner/go/src/pigweed.dev/pw_target_runner/BUILD.gn
+++ b/pw_target_runner/go/src/pigweed.dev/pw_target_runner/BUILD.gn
@@ -12,17 +12,17 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/go.gni")
+
 pw_go_package("pw_target_runner") {
   sources = [
     "exec_runner.go",
     "server.go",
     "worker_pool.go",
   ]
-  deps = [ "$dir_pw_target_runner:target_runner_proto_go" ]
+  deps = [ "$dir_pw_target_runner:target_runner_proto.go" ]
   external_deps = [ "google.golang.org/grpc" ]
   gopath = "$dir_pw_target_runner/go"
 }
diff --git a/pw_target_runner/go/src/pigweed.dev/pw_target_runner_client/BUILD.gn b/pw_target_runner/go/src/pigweed.dev/pw_target_runner_client/BUILD.gn
index f576b6e..b8d7cd8 100644
--- a/pw_target_runner/go/src/pigweed.dev/pw_target_runner_client/BUILD.gn
+++ b/pw_target_runner/go/src/pigweed.dev/pw_target_runner_client/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/go.gni")
+
 pw_go_package("pw_target_runner_client") {
   sources = [ "main.go" ]
-  deps = [ "$dir_pw_target_runner:target_runner_proto_go" ]
+  deps = [ "$dir_pw_target_runner:target_runner_proto.go" ]
   gopath = "$dir_pw_target_runner/go"
 }
diff --git a/pw_target_runner/go/src/pigweed.dev/pw_target_runner_server/BUILD.gn b/pw_target_runner/go/src/pigweed.dev/pw_target_runner_server/BUILD.gn
index cf1ea2a..b1956ce 100644
--- a/pw_target_runner/go/src/pigweed.dev/pw_target_runner_server/BUILD.gn
+++ b/pw_target_runner/go/src/pigweed.dev/pw_target_runner_server/BUILD.gn
@@ -12,14 +12,14 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/go.gni")
+
 pw_go_package("pw_target_runner_server") {
   sources = [ "main.go" ]
   deps = [
-    "$dir_pw_target_runner:exec_server_config_proto_go",
+    "$dir_pw_target_runner:exec_server_config_proto.go",
     "$dir_pw_target_runner/go/src/pigweed.dev/pw_target_runner",
   ]
   external_deps = [ "github.com/golang/protobuf/proto" ]
diff --git a/pw_tokenizer/BUILD b/pw_tokenizer/BUILD
index 020db7b..350d463 100644
--- a/pw_tokenizer/BUILD
+++ b/pw_tokenizer/BUILD
@@ -27,6 +27,7 @@
     name = "pw_tokenizer",
     srcs = [
         "encode_args.cc",
+        "hash.cc",
         "public/pw_tokenizer/config.h",
         "public/pw_tokenizer/internal/argument_types.h",
         "public/pw_tokenizer/internal/argument_types_macro_4_byte.h",
@@ -39,7 +40,7 @@
         "tokenize.cc",
     ],
     hdrs = [
-        "public/pw_tokenizer/pw_tokenizer_65599_fixed_length_hash.h",
+        "public/pw_tokenizer/hash.h",
         "public/pw_tokenizer/tokenize.h",
     ],
     includes = ["public"],
@@ -150,8 +151,8 @@
 pw_cc_test(
     name = "argument_types_test",
     srcs = [
-        "argument_types_test_c.c",
         "argument_types_test.cc",
+        "argument_types_test_c.c",
         "pw_tokenizer_private/argument_types_test.h",
         "tokenize_test_fakes.cc",
     ],
@@ -202,8 +203,8 @@
 pw_cc_test(
     name = "global_handlers_test",
     srcs = [
-        "global_handlers_test_c.c",
         "global_handlers_test.cc",
+        "global_handlers_test_c.c",
         "pw_tokenizer_private/tokenize_test.h",
     ],
     deps = [
@@ -252,8 +253,8 @@
     name = "tokenize_test",
     srcs = [
         "pw_tokenizer_private/tokenize_test.h",
-        "tokenize_test_c.c",
         "tokenize_test.cc",
+        "tokenize_test_c.c",
     ],
     deps = [
         ":pw_tokenizer",
diff --git a/pw_tokenizer/BUILD.gn b/pw_tokenizer/BUILD.gn
index 71f7f36..35c1757 100644
--- a/pw_tokenizer/BUILD.gn
+++ b/pw_tokenizer/BUILD.gn
@@ -12,33 +12,77 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
+import("$dir_pw_arduino_build/arduino.gni")
 import("$dir_pw_build/facade.gni")
+import("$dir_pw_build/module_config.gni")
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_fuzzer/fuzzer.gni")
 import("$dir_pw_unit_test/test.gni")
 import("backend.gni")
-config("default_config") {
+
+declare_args() {
+  # The build target that overrides the default configuration options for this
+  # module. This should point to a source set that provides defines through a
+  # public config (which may -include a file or add defines directly).
+  pw_tokenizer_CONFIG = pw_build_DEFAULT_MODULE_CONFIG
+}
+
+config("public_include_path") {
   include_dirs = [ "public" ]
+  visibility = [ ":*" ]
+}
+
+config("linker_script") {
+  inputs = [ "pw_tokenizer_linker_sections.ld" ]
+
+  # Automatically add the tokenizer linker sections when cross-compiling or
+  # building for Linux. macOS and Windows executables are not supported.
+  if (current_os == "") {
+    ldflags = [
+      "-T",
+      rebase_path("pw_tokenizer_linker_sections.ld"),
+    ]
+  } else if (current_os == "linux") {
+    # When building for Linux, the linker provides a default linker script.
+    # The add_tokenizer_sections_to_default_script.ld wrapper includes the
+    # pw_tokenizer_linker_sections.ld script in a way that appends to the the
+    # default linker script instead of overriding it.
+    ldflags = [
+      "-T",
+      rebase_path("add_tokenizer_sections_to_default_script.ld"),
+      "-L",
+      rebase_path("."),
+    ]
+    inputs += [ "add_tokenizer_sections_to_default_script.ld" ]
+  }
+  visibility = [ ":*" ]
+}
+
+pw_source_set("config") {
+  public = [ "public/pw_tokenizer/config.h" ]
+  public_configs = [ ":public_include_path" ]
+  public_deps = [ pw_tokenizer_CONFIG ]
 }
 
 pw_source_set("pw_tokenizer") {
-  public_configs = [ ":default_config" ]
+  public_configs = [ ":public_include_path" ]
+  all_dependent_configs = [ ":linker_script" ]
   public_deps = [
+    ":config",
     dir_pw_preprocessor,
     dir_pw_span,
   ]
   deps = [ dir_pw_varint ]
   public = [
-    "public/pw_tokenizer/pw_tokenizer_65599_fixed_length_hash.h",
+    "public/pw_tokenizer/hash.h",
     "public/pw_tokenizer/tokenize.h",
   ]
   sources = [
     "encode_args.cc",
-    "public/pw_tokenizer/config.h",
+    "hash.cc",
     "public/pw_tokenizer/internal/argument_types.h",
     "public/pw_tokenizer/internal/argument_types_macro_4_byte.h",
     "public/pw_tokenizer/internal/argument_types_macro_8_byte.h",
@@ -72,38 +116,37 @@
 }
 
 pw_facade("global_handler") {
-  facade_name = "global_handler_facade"
   backend = pw_tokenizer_GLOBAL_HANDLER_BACKEND
 
-  public_configs = [ ":default_config" ]
+  public_configs = [ ":public_include_path" ]
   public = [ "public/pw_tokenizer/tokenize_to_global_handler.h" ]
   sources = [ "tokenize_to_global_handler.cc" ]
   public_deps = [ ":pw_tokenizer" ]
 }
 
 pw_facade("global_handler_with_payload") {
-  facade_name = "global_handler_with_payload_facade"
   backend = pw_tokenizer_GLOBAL_HANDLER_WITH_PAYLOAD_BACKEND
 
-  public_configs = [ ":default_config" ]
+  public_configs = [ ":public_include_path" ]
   public = [ "public/pw_tokenizer/tokenize_to_global_handler_with_payload.h" ]
   sources = [ "tokenize_to_global_handler_with_payload.cc" ]
   public_deps = [ ":pw_tokenizer" ]
 }
 
 pw_source_set("base64") {
-  public_configs = [ ":default_config" ]
+  public_configs = [ ":public_include_path" ]
   public = [ "public/pw_tokenizer/base64.h" ]
   sources = [ "base64.cc" ]
   public_deps = [
+    ":pw_tokenizer",
+    dir_pw_base64,
     dir_pw_preprocessor,
     dir_pw_span,
   ]
-  deps = [ dir_pw_base64 ]
 }
 
 pw_source_set("decoder") {
-  public_configs = [ ":default_config" ]
+  public_configs = [ ":public_include_path" ]
   public_deps = [ dir_pw_span ]
   deps = [ dir_pw_varint ]
   public = [
@@ -127,10 +170,7 @@
     ":pw_tokenizer",
     dir_pw_varint,
   ]
-  sources = [
-    "generate_decoding_test_data.cc",
-    "tokenize_test_fakes.cc",
-  ]
+  sources = [ "generate_decoding_test_data.cc" ]
 }
 
 # Executable for generating a test ELF file for elf_reader_test.py. A host
@@ -163,7 +203,6 @@
   group_deps = [
     "$dir_pw_preprocessor:tests",
     "$dir_pw_span:tests",
-    "$dir_pw_status:tests",
   ]
 }
 
@@ -172,9 +211,12 @@
     "argument_types_test.cc",
     "argument_types_test_c.c",
     "pw_tokenizer_private/argument_types_test.h",
-    "tokenize_test_fakes.cc",
   ]
   deps = [ ":pw_tokenizer" ]
+
+  if (dir_pw_third_party_arduino != "") {
+    remove_configs = [ "$dir_pw_build:strict_warnings" ]
+  }
 }
 
 pw_test("base64_test") {
@@ -192,11 +234,19 @@
     ":decoder",
     "$dir_pw_varint",
   ]
+
+  # TODO(tonymd): This fails on Teensyduino 1.54 beta core. It may be related to
+  # linking in stl functions. Will debug when 1.54 is released.
+  enable_if = pw_build_EXECUTABLE_TARGET_TYPE != "arduino_executable"
 }
 
 pw_test("detokenize_test") {
   sources = [ "detokenize_test.cc" ]
   deps = [ ":decoder" ]
+
+  # TODO(tonymd): This fails on Teensyduino 1.54 beta core. It may be related to
+  # linking in stl functions. Will debug when 1.54 is released.
+  enable_if = pw_build_EXECUTABLE_TARGET_TYPE != "arduino_executable"
 }
 
 pw_test("global_handlers_test") {
@@ -218,7 +268,6 @@
   sources = [
     "hash_test.cc",
     "pw_tokenizer_private/generated_hash_test_cases.h",
-    "tokenize_test_fakes.cc",
   ]
   deps = [ ":pw_tokenizer" ]
 }
@@ -230,6 +279,7 @@
   "$dir_pw_varint/varint.cc",
   "encode_args.cc",
   "public/pw_tokenizer/config.h",
+  "public/pw_tokenizer/hash.h",
   "public/pw_tokenizer/internal/argument_types.h",
   "public/pw_tokenizer/internal/argument_types_macro_4_byte.h",
   "public/pw_tokenizer/internal/argument_types_macro_8_byte.h",
@@ -237,7 +287,6 @@
   "public/pw_tokenizer/internal/pw_tokenizer_65599_fixed_length_80_hash_macro.h",
   "public/pw_tokenizer/internal/pw_tokenizer_65599_fixed_length_96_hash_macro.h",
   "public/pw_tokenizer/internal/tokenize_string.h",
-  "public/pw_tokenizer/pw_tokenizer_65599_fixed_length_hash.h",
   "public/pw_tokenizer/tokenize.h",
   "public/pw_tokenizer/tokenize_to_global_handler.h",
   "public/pw_tokenizer/tokenize_to_global_handler_with_payload.h",
@@ -248,7 +297,7 @@
   "tokenize_to_global_handler_with_payload.cc",
 ]
 _simple_tokenize_test_configs = [
-  ":default_config",
+  ":public_include_path",
   "$dir_pw_varint:default_config",
 ]
 
@@ -327,7 +376,7 @@
 # the JNI headers must be available in the system or provided with the
 # pw_JAVA_NATIVE_INTERFACE_INCLUDE_DIRS variable.
 pw_shared_library("detokenizer_jni") {
-  public_configs = [ ":default_config" ]
+  public_configs = [ ":public_include_path" ]
   include_dirs = pw_JAVA_NATIVE_INTERFACE_INCLUDE_DIRS
   sources = [ "java/dev/pigweed/tokenizer/detokenizer.cc" ]
   public_deps = [
diff --git a/pw_tokenizer/CMakeLists.txt b/pw_tokenizer/CMakeLists.txt
index 00fbe75..a467ada 100644
--- a/pw_tokenizer/CMakeLists.txt
+++ b/pw_tokenizer/CMakeLists.txt
@@ -12,25 +12,43 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_add_module_library(pw_tokenizer
   SOURCES
     encode_args.cc
+    hash.cc
     tokenize.cc
   PUBLIC_DEPS
+    pw_polyfill.overrides
     pw_preprocessor
     pw_span
   PRIVATE_DEPS
     pw_varint
 )
 
+if("${CMAKE_SYSTEM_NAME}" STREQUAL "")
+  target_link_options(pw_tokenizer
+    PUBLIC
+      "-T${CMAKE_CURRENT_SOURCE_DIR}/pw_tokenizer_linker_sections.ld"
+  )
+elseif("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux")
+  target_link_options(pw_tokenizer
+    PUBLIC
+      "-T${CMAKE_CURRENT_SOURCE_DIR}/add_tokenizer_sections_to_default_script.ld"
+      "-L${CMAKE_CURRENT_SOURCE_DIR}"
+  )
+endif()
+
 pw_add_module_library(pw_tokenizer.base64
   SOURCES
     base64.cc
   PUBLIC_DEPS
+    pw_base64
+    pw_containers
+    pw_polyfill.overrides
     pw_preprocessor
     pw_span
-  PRIVATE_DEPS
-    pw_base64
 )
 
 pw_add_module_library(pw_tokenizer.decoder
@@ -50,6 +68,8 @@
     tokenize_to_global_handler.cc
   PUBLIC_DEPS
     pw_tokenizer
+  DEFAULT_BACKEND
+    pw_build.empty  # Default to an empty backend so the tests can run.
 )
 
 pw_add_facade(pw_tokenizer.global_handler_with_payload
@@ -57,21 +77,10 @@
     tokenize_to_global_handler_with_payload.cc
   PUBLIC_DEPS
     pw_tokenizer
+  DEFAULT_BACKEND
+    pw_build.empty  # Default to an empty backend so the tests can run.
 )
 
-# TODO(hepler): Use an empty backend that makes tests possible, for now.
-add_library(pw_tokenizer.global_handler.backend INTERFACE)
-target_link_libraries(pw_tokenizer.global_handler.backend
-  INTERFACE
-    pw_tokenizer.test_backend)
-
-add_library(pw_tokenizer.global_handler_with_payload.backend INTERFACE)
-target_link_libraries(pw_tokenizer.global_handler_with_payload.backend
-  INTERFACE
-    pw_tokenizer.test_backend)
-
-add_library(pw_tokenizer.test_backend INTERFACE)
-
 # Executable for generating test data for the C++ and Python detokenizers. This
 # target should only be built for the host.
 add_executable(pw_tokenizer.generate_decoding_test_data EXCLUDE_FROM_ALL
diff --git a/pw_tokenizer/add_tokenizer_sections_to_default_script.ld b/pw_tokenizer/add_tokenizer_sections_to_default_script.ld
new file mode 100644
index 0000000..41cc0c1
--- /dev/null
+++ b/pw_tokenizer/add_tokenizer_sections_to_default_script.ld
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2020 The Pigweed Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ *     https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+INCLUDE pw_tokenizer_linker_sections.ld
+
+/*
+ * The INSERT directive instructs the linker to append the directives in this
+ * script to the default linker script, rather than replace the default with
+ * this script. It doesn't matter where the tokenizer sections are inserted, so
+ * insert them after the standard .strtab section.
+ */
+INSERT AFTER .strtab
diff --git a/pw_tokenizer/argument_types_test.cc b/pw_tokenizer/argument_types_test.cc
index a4d2676..67c1d17 100644
--- a/pw_tokenizer/argument_types_test.cc
+++ b/pw_tokenizer/argument_types_test.cc
@@ -75,7 +75,7 @@
 // uint64_t).
 #define PACKED_TYPES(...)                                                 \
   ((PW_CONCAT(0b, __VA_ARGS__, u) << PW_TOKENIZER_TYPE_COUNT_SIZE_BITS) | \
-   PW_ARG_COUNT(__VA_ARGS__))
+   PW_MACRO_ARG_COUNT(__VA_ARGS__))
 
 // Test this test macro for both uint32_t and uint64_t.
 #if PW_TOKENIZER_CFG_ARG_TYPES_SIZE_BYTES == 4
diff --git a/pw_tokenizer/argument_types_test_c.c b/pw_tokenizer/argument_types_test_c.c
index f594200..2030587 100644
--- a/pw_tokenizer/argument_types_test_c.c
+++ b/pw_tokenizer/argument_types_test_c.c
@@ -72,10 +72,10 @@
 static char char_array[16];
 
 // Define the test functions that are called by the C++ unit test.
-#define DEFINE_TEST_FUNCTION(name, ...)               \
-  pw_TokenizerArgTypes pw_TestTokenizer##name(void) { \
-    (void)char_array;                                 \
-    return PW_TOKENIZER_ARG_TYPES(__VA_ARGS__);       \
+#define DEFINE_TEST_FUNCTION(name, ...)                 \
+  _pw_tokenizer_ArgTypes pw_TestTokenizer##name(void) { \
+    (void)char_array;                                   \
+    return PW_TOKENIZER_ARG_TYPES(__VA_ARGS__);         \
   }
 
 DEFINE_TEST_FUNCTION(NoArgs);
diff --git a/pw_tokenizer/base64.cc b/pw_tokenizer/base64.cc
index 6ce4a4c..2b87161 100644
--- a/pw_tokenizer/base64.cc
+++ b/pw_tokenizer/base64.cc
@@ -14,37 +14,36 @@
 
 #include "pw_tokenizer/base64.h"
 
-#include <span>
-
-#include "pw_base64/base64.h"
-
 namespace pw::tokenizer {
 
-extern "C" size_t pw_TokenizerPrefixedBase64Encode(
+extern "C" size_t pw_tokenizer_PrefixedBase64Encode(
     const void* binary_message,
     size_t binary_size_bytes,
     void* output_buffer,
     size_t output_buffer_size_bytes) {
-  const size_t encoded_size = base64::EncodedSize(binary_size_bytes) + 1;
+  char* output = static_cast<char*>(output_buffer);
+  const size_t encoded_size = Base64EncodedBufferSize(binary_size_bytes);
 
   if (output_buffer_size_bytes < encoded_size) {
+    if (output_buffer_size_bytes > 0u) {
+      output[0] = '\0';
+    }
+
     return 0;
   }
 
-  char* output = static_cast<char*>(output_buffer);
   output[0] = kBase64Prefix;
-
   base64::Encode(std::span(static_cast<const std::byte*>(binary_message),
                            binary_size_bytes),
                  &output[1]);
-
-  return encoded_size;
+  output[encoded_size - 1] = '\0';
+  return encoded_size - sizeof('\0');  // exclude the null terminator
 }
 
-extern "C" size_t pw_TokenizerPrefixedBase64Decode(const void* base64_message,
-                                                   size_t base64_size_bytes,
-                                                   void* output_buffer,
-                                                   size_t output_buffer_size) {
+extern "C" size_t pw_tokenizer_PrefixedBase64Decode(const void* base64_message,
+                                                    size_t base64_size_bytes,
+                                                    void* output_buffer,
+                                                    size_t output_buffer_size) {
   const char* base64 = static_cast<const char*>(base64_message);
 
   if (base64_size_bytes == 0 || base64[0] != kBase64Prefix) {
diff --git a/pw_tokenizer/base64_test.cc b/pw_tokenizer/base64_test.cc
index d6ff56e..d751b8e 100644
--- a/pw_tokenizer/base64_test.cc
+++ b/pw_tokenizer/base64_test.cc
@@ -27,7 +27,12 @@
 
 class PrefixedBase64 : public ::testing::Test {
  protected:
-  PrefixedBase64() : binary_{}, base64_{} {}
+  static constexpr char kUnset = '#';
+
+  PrefixedBase64() {
+    std::memset(binary_, kUnset, sizeof(binary_));
+    std::memset(base64_, kUnset, sizeof(base64_));
+  }
 
   byte binary_[32];
   char base64_[32];
@@ -62,18 +67,49 @@
   for (auto& [binary, base64] : kTestData) {
     EXPECT_EQ(base64.size(), PrefixedBase64Encode(binary, base64_));
     ASSERT_EQ(base64, base64_);
+    EXPECT_EQ('\0', base64_[base64.size()]);
   }
 }
 
 TEST_F(PrefixedBase64, Encode_EmptyInput_WritesPrefix) {
   EXPECT_EQ(1u, PrefixedBase64Encode(std::span<byte>(), base64_));
   EXPECT_EQ('$', base64_[0]);
+  EXPECT_EQ('\0', base64_[1]);
 }
 
 TEST_F(PrefixedBase64, Encode_EmptyOutput_WritesNothing) {
   EXPECT_EQ(0u,
             PrefixedBase64Encode(kTestData[5].binary, std::span(base64_, 0)));
+  EXPECT_EQ(kUnset, base64_[0]);
+}
+
+TEST_F(PrefixedBase64, Encode_SingleByteOutput_OnlyNullTerminates) {
+  EXPECT_EQ(0u,
+            PrefixedBase64Encode(kTestData[5].binary, std::span(base64_, 1)));
   EXPECT_EQ('\0', base64_[0]);
+  EXPECT_EQ(kUnset, base64_[1]);
+}
+
+TEST_F(PrefixedBase64, Encode_NoRoomForNullAfterMessage_OnlyNullTerminates) {
+  EXPECT_EQ(
+      0u,
+      PrefixedBase64Encode(kTestData[5].binary,
+                           std::span(base64_, kTestData[5].base64.size())));
+  EXPECT_EQ('\0', base64_[0]);
+  EXPECT_EQ(kUnset, base64_[1]);
+}
+
+TEST_F(PrefixedBase64, Base64EncodedBufferSize_Empty_RoomForPrefixAndNull) {
+  EXPECT_EQ(2u, Base64EncodedBufferSize(0));
+}
+
+TEST_F(PrefixedBase64, Base64EncodedBufferSize_PositiveSizes) {
+  for (unsigned i = 1; i <= 3; ++i) {
+    EXPECT_EQ(6u, Base64EncodedBufferSize(i));
+  }
+  for (unsigned i = 4; i <= 6; ++i) {
+    EXPECT_EQ(10u, Base64EncodedBufferSize(i));
+  }
 }
 
 TEST_F(PrefixedBase64, Decode) {
@@ -85,18 +121,18 @@
 
 TEST_F(PrefixedBase64, Decode_EmptyInput_WritesNothing) {
   EXPECT_EQ(0u, PrefixedBase64Decode({}, binary_));
-  EXPECT_EQ(byte{0}, binary_[0]);
+  EXPECT_EQ(byte{kUnset}, binary_[0]);
 }
 
 TEST_F(PrefixedBase64, Decode_OnlyPrefix_WritesNothing) {
   EXPECT_EQ(0u, PrefixedBase64Decode("$", binary_));
-  EXPECT_EQ(byte{0}, binary_[0]);
+  EXPECT_EQ(byte{kUnset}, binary_[0]);
 }
 
 TEST_F(PrefixedBase64, Decode_EmptyOutput_WritesNothing) {
   EXPECT_EQ(0u,
             PrefixedBase64Decode(kTestData[5].base64, std::span(binary_, 0)));
-  EXPECT_EQ(byte{0}, binary_[0]);
+  EXPECT_EQ(byte{kUnset}, binary_[0]);
 }
 
 TEST_F(PrefixedBase64, Decode_OutputTooSmall_WritesNothing) {
@@ -104,7 +140,7 @@
   EXPECT_EQ(0u,
             PrefixedBase64Decode(item.base64,
                                  std::span(binary_, item.binary.size() - 1)));
-  EXPECT_EQ(byte{0}, binary_[0]);
+  EXPECT_EQ(byte{kUnset}, binary_[0]);
 }
 
 TEST(PrefixedBase64, DecodeInPlace) {
diff --git a/pw_tokenizer/database.gni b/pw_tokenizer/database.gni
index 0cad012..8b7b894 100644
--- a/pw_tokenizer/database.gni
+++ b/pw_tokenizer/database.gni
@@ -12,10 +12,9 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
-import("$dir_pw_build/python_script.gni")
+import("$dir_pw_build/python_action.gni")
 
 # Updates a tokenized string database in the source tree with artifacts from one
 # or more targets. Other database files may also be used.
@@ -25,7 +24,12 @@
 # created as a starting point.
 #
 # Args:
-#   database: source tree path to database file to update; must exist beforehand
+#   database: if updating a database, path to an existing database in the source
+#       tree; optional if creating a database, but may provide an output
+#       directory path to override the default of
+#       "$target_gen_dir/$target_name.[csv/binary]"
+#   create: if specified, create a database instead of updating one; 'create'
+#       must be set to one of the supported database types: "csv" or "binary"
 #   targets: GN targets (executables or libraries) from which to add tokens;
 #       these targets are added to deps
 #   optional_targets: GN targets from which to add tokens, if the output files
@@ -33,11 +37,27 @@
 #   input_databases: paths to other database files from which to add tokens
 #   deps: GN targets to build prior to generating the database; artifacts from
 #       these targets are NOT implicitly used for database generation
+#   domain: if provided, extract strings from tokenization domains matching this
+#       regular expression
 #
 template("pw_tokenizer_database") {
-  assert(defined(invoker.database),
-         "pw_tokenizer_database requires a 'database' variable")
+  assert(defined(invoker.database) || defined(invoker.create),
+         "pw_tokenizer_database requires a 'database' variable, unless " +
+             "'create' is specified")
 
+  if (defined(invoker.create)) {
+    assert(invoker.create == "csv" || invoker.create == "binary",
+           "If provided, 'create' must be \"csv\" or \"binary\"")
+    _create = invoker.create
+  } else {
+    _create = ""
+  }
+
+  if (defined(invoker.database)) {
+    _database = invoker.database
+  } else {
+    _database = "$target_gen_dir/$target_name.${invoker.create}"
+  }
   if (defined(invoker.targets)) {
     _targets = invoker.targets
   } else {
@@ -56,29 +76,57 @@
     _input_databases = []
   }
 
-  assert(
-      _targets != [] || _optional_targets != [] || _input_databases != [],
-      "No 'targets', 'optional_targets', or 'input_databases' were set for " +
-          "pw_tokenizer_database! At least one target or database must be " +
-          "provided as an input.")
+  if (defined(invoker.domain)) {
+    _domain = "#" + invoker.domain
+  } else {
+    _domain = ""
+  }
 
-  pw_python_script(target_name) {
+  if (_targets == [] && _optional_targets == []) {
+    # If no targets were specified, the domain will not be used, which is OK.
+    not_needed([ "_domain" ])
+  }
+
+  # Restrict parallelism for updating this database file to one thread. This
+  # makes it safe to update it from multiple toolchains.
+  pool("$target_name._pool") {
+    depth = 1
+  }
+
+  pw_python_action(target_name) {
     script = "$dir_pw_tokenizer/py/pw_tokenizer/database.py"
-    args = [
-      "add",
+    pool = ":$target_name._pool"
+
+    inputs = _input_databases
+
+    if (_create == "") {
+      args = [ "add" ]
+      inputs += [ _database ]
+      stamp = true
+    } else {
+      args = [
+        "create",
+        "--force",
+        "--type",
+        _create,
+      ]
+      outputs = [ _database ]
+    }
+
+    args += [
       "--database",
-      rebase_path(invoker.database),
+      rebase_path(_database),
     ]
     args += rebase_path(_input_databases)
 
     foreach(target, _targets) {
-      args += [ "<TARGET_FILE($target)>" ]
+      args += [ "<TARGET_FILE($target)>$_domain" ]
     }
 
     # For optional targets, the build outputs may not exist, since they aren't
     # added to deps. Use TARGET_FILE_IF_EXISTS to handle this.
     foreach(target, _optional_targets) {
-      args += [ "<TARGET_FILE_IF_EXISTS($target)>" ]
+      args += [ "<TARGET_FILE_IF_EXISTS($target)>$_domain" ]
     }
 
     deps = _targets
@@ -86,12 +134,5 @@
     if (defined(invoker.deps)) {
       deps += invoker.deps
     }
-
-    inputs = [ invoker.database ] + _input_databases
-
-    # Since the output file is in the source tree, create a corresponding stamp
-    # file in the output directory that is independent of the toolchain. That
-    # way, trying to update the database from multiple toolchains is an error.
-    stamp = "$root_build_dir/" + rebase_path(invoker.database, "//") + ".update"
   }
 }
diff --git a/pw_tokenizer/decode.cc b/pw_tokenizer/decode.cc
index cccaa08..f75da71 100644
--- a/pw_tokenizer/decode.cc
+++ b/pw_tokenizer/decode.cc
@@ -210,9 +210,10 @@
   const size_t bytes = varint::Decode(std::as_bytes(arguments), &value);
 
   if (bytes == 0u) {
-    return DecodedArg(ArgStatus::kDecodeError,
-                      text_,
-                      std::min(varint::kMaxVarintSizeBytes, arguments.size()));
+    return DecodedArg(
+        ArgStatus::kDecodeError,
+        text_,
+        std::min(varint::kMaxVarint64SizeBytes, arguments.size()));
   }
 
   // Unsigned ints need to be masked to their bit width due to sign extension.
diff --git a/pw_tokenizer/decode_test.cc b/pw_tokenizer/decode_test.cc
index d379060..425a2ca 100644
--- a/pw_tokenizer/decode_test.cc
+++ b/pw_tokenizer/decode_test.cc
@@ -123,7 +123,7 @@
   EXPECT_EQ(result.value_with_errors(),
             "The " ERR("%d ERROR") " " ERR("%s SKIPPED"));
   EXPECT_EQ(result.remaining_bytes(),
-            data.size() - varint::kMaxVarintSizeBytes);
+            data.size() - varint::kMaxVarint64SizeBytes);
   EXPECT_EQ(result.decoding_errors(), 2u);
 }
 
diff --git a/pw_tokenizer/docs.rst b/pw_tokenizer/docs.rst
index 9a5b369..121f025 100644
--- a/pw_tokenizer/docs.rst
+++ b/pw_tokenizer/docs.rst
@@ -1,8 +1,4 @@
-.. default-domain:: cpp
-
-.. highlight:: sh
-
-.. _chapter-pw-tokenizer:
+.. _module-pw_tokenizer:
 
 ------------
 pw_tokenizer
@@ -121,14 +117,15 @@
      the BUILD.gn's ``pw_tokenizer`` target to the build.
   2. Use the tokenization macros in your code. See `Tokenization`_.
   3. Add the contents of ``pw_tokenizer_linker_sections.ld`` to your project's
-     linker script.
+     linker script. In GN and CMake, this step is done automatically.
   4. Compile your code to produce an ELF file.
   5. Run ``database.py create`` on the ELF file to generate a CSV token
      database. See `Managing token databases`_.
   6. Commit the token database to your repository. See notes in `Database
      management`_.
   7. Integrate a ``database.py add`` command to your build to automatically
-     update the committed token database. See `Update a database`_.
+     update the committed token database. In GN, use the
+     ``pw_tokenizer_database`` template to do this. See `Update a database`_.
   8. Integrate ``detokenize.py`` or the C++ detokenization library with your
      tools to decode tokenized logs. See `Detokenization`_.
 
@@ -165,15 +162,15 @@
 ``PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES``.
 
 This macro is provided by the ``pw_tokenizer:global_handler`` facade. The
-backend for this facade must define the ``pw_TokenizerHandleEncodedMessage``
+backend for this facade must define the ``pw_tokenizer_HandleEncodedMessage``
 C-linkage function.
 
 .. code-block:: cpp
 
   PW_TOKENIZE_TO_GLOBAL_HANDLER(format_string_literal, arguments...);
 
-  void pw_TokenizerHandleEncodedMessage(const uint8_t encoded_message[],
-                                        size_t size_bytes);
+  void pw_tokenizer_HandleEncodedMessage(const uint8_t encoded_message[],
+                                         size_t size_bytes);
 
 ``PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD`` is similar, but passes a
 ``uintptr_t`` argument to the global handler function. Values like a log level
@@ -181,7 +178,7 @@
 
 This macro is provided by the ``pw_tokenizer:global_handler_with_payload``
 facade. The backend for this facade must define the
-``pw_TokenizerHandleEncodedMessageWithPayload`` C-linkage function.
+``pw_tokenizer_HandleEncodedMessageWithPayload`` C-linkage function.
 
 .. code-block:: cpp
 
@@ -189,9 +186,8 @@
                                              format_string_literal,
                                              arguments...);
 
-  void pw_TokenizerHandleEncodedMessageWithPayload(uintptr_t payload,
-                                                   const uint8_t encoded_message[],
-                                                   size_t size_bytes);
+  void pw_tokenizer_HandleEncodedMessageWithPayload(
+      uintptr_t payload, const uint8_t encoded_message[], size_t size_bytes);
 
 .. admonition:: When to use these macros
 
@@ -268,19 +264,19 @@
 It is trivial to convert this to a binary log using the tokenizer. The
 ``RecordLog`` call is replaced with a
 ``PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD`` invocation. The
-``pw_TokenizerHandleEncodedMessageWithPayload`` implementation collects the
+``pw_tokenizer_HandleEncodedMessageWithPayload`` implementation collects the
 timestamp and transmits the message with ``TransmitLog``.
 
 .. code-block:: cpp
 
   #define LOG_INFO(format, ...)                   \
       PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD( \
-          (uintptr_t)LogLevel_INFO,                   \
+          (pw_tokenizer_Payload)LogLevel_INFO,    \
           __FILE_NAME__ ":%d " format,            \
           __LINE__,                               \
           __VA_ARGS__);                           \
 
-  extern "C" void pw_TokenizerHandleEncodedMessageWithPayload(
+  extern "C" void pw_tokenizer_HandleEncodedMessageWithPayload(
       uintptr_t level, const uint8_t encoded_message[], size_t size_bytes) {
     if (static_cast<LogLevel>(level) >= current_log_level) {
       TransmitLog(TimeSinceBootMillis(), encoded_message, size_bytes);
@@ -294,6 +290,30 @@
 additional tokens, but it may not be desirable to fill a token database with
 duplicate log lines.
 
+Tokenizing function names
+-------------------------
+The string literal tokenization functions support tokenizing string literals or
+constexpr character arrays (``constexpr const char[]``). In GCC and Clang, the
+special ``__func__`` variable and ``__PRETTY_FUNCTION__`` extension are declared
+as ``static constexpr char[]`` in C++ instead of the standard ``static const
+char[]``. This means that ``__func__`` and ``__PRETTY_FUNCTION__`` can be
+tokenized while compiling C++ with GCC or Clang.
+
+.. code-block:: cpp
+
+  // Tokenize the special function name variables.
+  constexpr uint32_t function = PW_TOKENIZE_STRING(__func__);
+  constexpr uint32_t pretty_function = PW_TOKENIZE_STRING(__PRETTY_FUNCTION__);
+
+  // Tokenize the function name variables to a handler function.
+  PW_TOKENIZE_TO_GLOBAL_HANDLER(__func__)
+  PW_TOKENIZE_TO_GLOBAL_HANDLER(__PRETTY_FUNCTION__)
+
+Note that ``__func__`` and ``__PRETTY_FUNCTION__`` are not string literals.
+They are defined as static character arrays, so they cannot be implicitly
+concatentated with string literals. For example, ``printf(__func__ ": %d",
+123);`` will not compile.
+
 Tokenization in Python
 ----------------------
 The Python ``pw_tokenizer.encode`` module has limited support for encoding
@@ -333,39 +353,33 @@
 
 In C code, strings are hashed with a preprocessor macro. For compatibility with
 macros, the hash must be limited to a fixed maximum number of characters. This
-value is set by ``PW_TOKENIZER_CFG_HASH_LENGTH``.
+value is set by ``PW_TOKENIZER_CFG_C_HASH_LENGTH``. Increasing
+``PW_TOKENIZER_CFG_C_HASH_LENGTH`` increases the compilation time for C due to
+the complexity of the hashing macros.
 
-Increasing ``PW_TOKENIZER_CFG_HASH_LENGTH`` increases the compilation time for C
-due to the complexity of the hashing macros. C++ macros use a constexpr
-function instead of a macro, so the compilation time impact is minimal. Projects
-primarily in C++ may use a large value for ``PW_TOKENIZER_CFG_HASH_LENGTH``
-(perhaps even ``std::numeric_limits<size_t>::max()``).
+C++ macros use a constexpr function instead of a macro. This function works with
+any length of string and has lower compilation time impact than the C macros.
+For consistency, C++ tokenization uses the same hash algorithm, but the
+calculated values will differ between C and C++ for strings longer than
+``PW_TOKENIZER_CFG_C_HASH_LENGTH`` characters.
 
 Tokenization domains
 --------------------
-``pw_tokenizer`` supports having multiple tokenization domains. Strings from
-each tokenization domain are stored in separate sections in the ELF file. This
-allows projects to keep tokens from different sources separate. Potential use
-cases include the following:
+``pw_tokenizer`` supports having multiple tokenization domains. Domains are a
+string label associated with each tokenized string. This allows projects to keep
+tokens from different sources separate. Potential use cases include the
+following:
 
 * Keep large sets of tokenized strings separate to avoid collisions.
 * Create a separate database for a small number of strings that use truncated
   tokens, for example only 10 or 16 bits instead of the full 32 bits.
 
-Strings are tokenized by default into the "default" domain. For many projects,
-a single tokenization domain is sufficient, so no additional configuration is
-required.
-
-To support other multiple domains, add a ``pw_tokenized.<new domain name>``
-linker section, as described in ``pw_tokenizer_linker_sections.ld``. Strings are
-tokenized into a domain by providing the domain name as a string literal to the
-``*_DOMAIN`` versions of the tokenization macros. Domain names must be comprised
-of alphanumeric characters and underscores; spaces and special characters are
-not permitted.
+If no domain is specified, the domain is empty (``""``). For many projects, this
+default domain is sufficient, so no additional configuration is required.
 
 .. code-block:: cpp
 
-  // Tokenizes this string to the "default" domain.
+  // Tokenizes this string to the default ("") domain.
   PW_TOKENIZE_STRING("Hello, world!");
 
   // Tokenizes this string to the "my_custom_domain" domain.
@@ -485,10 +499,15 @@
 
 GN integration
 ^^^^^^^^^^^^^^
-Token databases may be updated as part of a GN build. The
+Token databases may be updated or created as part of a GN build. The
 ``pw_tokenizer_database`` template provided by ``dir_pw_tokenizer/database.gni``
-automatically updates a tokenized strings database in the source tree with
-artifacts from one or more GN targets or other database files.
+automatically updates an in-source tokenized strings database or creates a new
+database with artifacts from one or more GN targets or other database files.
+
+To create a new database, set the ``create`` variable to the desired database
+type (``"csv"`` or ``"binary"``). The database will be created in the output
+directory. To update an existing database, provide the path to the database with
+the ``database`` variable.
 
 Each database in the source tree can only be updated from a single
 ``pw_tokenizer_database`` rule. Updating the same database in multiple rules
@@ -499,7 +518,6 @@
 
 .. code-block::
 
-  # gn-format disable
   import("//build_overrides/pigweed.gni")
 
   import("$dir_pw_tokenizer/database.gni")
@@ -644,12 +662,12 @@
 Encoding
 --------
 To encode with the Base64 format, add a call to
-``pw::tokenizer::PrefixedBase64Encode`` or ``pw_TokenizerPrefixedBase64Encode``
+``pw::tokenizer::PrefixedBase64Encode`` or ``pw_tokenizer_PrefixedBase64Encode``
 in the tokenizer handler function. For example,
 
 .. code-block:: cpp
 
-  void pw_TokenizerHandleEncodedMessage(const uint8_t encoded_message[],
+  void pw_tokenizer_HandleEncodedMessage(const uint8_t encoded_message[],
                                         size_t size_bytes) {
     char base64_buffer[64];
     size_t base64_size = pw::tokenizer::PrefixedBase64Encode(
@@ -678,12 +696,12 @@
    "$pEVTYQkkUmhZam1RPT0=" → "Nested message: $RhYjmQ==" → "Nested message: Wow!"
 
 Base64 decoding is supported in C++ or C with the
-``pw::tokenizer::PrefixedBase64Decode`` or ``pw_TokenizerPrefixedBase64Decode``
+``pw::tokenizer::PrefixedBase64Decode`` or ``pw_tokenizer_PrefixedBase64Decode``
 functions.
 
 .. code-block:: cpp
 
-  void pw_TokenizerHandleEncodedMessage(const uint8_t encoded_message[],
+  void pw_tokenizer_HandleEncodedMessage(const uint8_t encoded_message[],
                                         size_t size_bytes) {
     char base64_buffer[64];
     size_t base64_size = pw::tokenizer::PrefixedBase64Encode(
@@ -692,6 +710,29 @@
     TransmitLogMessage(base64_buffer, base64_size);
   }
 
+Command line utilities
+^^^^^^^^^^^^^^^^^^^^^^
+``pw_tokenizer`` provides two standalone command line utilities for detokenizing
+Base64-encoded tokenized strings.
+
+* ``detokenize.py`` -- Detokenizes Base64-encoded strings in files or from
+  stdin.
+* ``detokenize_serial.py`` -- Detokenizes Base64-encoded strings from a
+  connected serial device.
+
+If the ``pw_tokenizer`` Python package is installed, these tools may be executed
+as runnable modules. For example:
+
+.. code-block::
+
+  # Detokenize Base64-encoded strings in a file
+  python -m pw_tokenizer.detokenize -i input_file.txt
+
+  # Detokenize Base64-encoded strings in output from a serial device
+  python -m pw_tokenizer.detokenize_serial --device /dev/ttyACM0
+
+See the ``--help`` options for these tools for full usage information.
+
 Deployment war story
 ====================
 The tokenizer module was developed to bring tokenized logging to an
@@ -728,7 +769,7 @@
   * The log level was passed as the payload argument to facilitate runtime log
     level control.
   * For this project, it was necessary to encode the log messages as text. In
-    ``pw_TokenizerHandleEncodedMessageWithPayload``, the log messages were
+    ``pw_tokenizer_HandleEncodedMessageWithPayload``, the log messages were
     encoded in the $-prefixed `Base64 format`_, then dispatched as normal log
     messages.
   * Asserts were tokenized using ``PW_TOKENIZE_TO_CALLBACK``.
@@ -820,7 +861,7 @@
 Supporting detokenization of strings tokenized on 64-bit targets would be
 simple. This could be done by adding an option to switch the 32-bit types to
 64-bit. The tokenizer stores the sizes of these types in the
-``.pw_tokenizer_info`` ELF section, so the sizes of these types can be verified
+``.pw_tokenizer.info`` ELF section, so the sizes of these types can be verified
 by checking the ELF file, if necessary.
 
 Tokenization in headers
@@ -861,6 +902,29 @@
 them as an integer. This would be efficient and simple, but only support a small
 number of arguments.
 
+Legacy tokenized string ELF format
+==================================
+The original version of ``pw_tokenizer`` stored tokenized stored as plain C
+strings in the ELF file instead of structured tokenized string entries. Strings
+in different domains were stored in different linker sections. The Python script
+that parsed the ELF file would re-calculate the tokens.
+
+In the current version of ``pw_tokenizer``, tokenized strings are stored in a
+structured entry containing a token, domain, and length-delimited string. This
+has several advantages over the legacy format:
+
+* The Python script does not have to recalculate the token, so any hash
+  algorithm may be used in the firmware.
+* In C++, the tokenization hash no longer has a length limitation.
+* Strings with null terminators in them are properly handled.
+* Only one linker section is required in the linker script, instead of a
+  separate section for each domain.
+
+To migrate to the new format, all that is required is update the linker sections
+to match those in ``pw_tokenizer_linker_sections.ld``. Replace all
+``pw_tokenized.<DOMAIN>`` sections with one ``pw_tokenizer.entries`` section.
+The Python tooling continues to support the legacy tokenized string ELF format.
+
 Compatibility
 =============
   * C11
diff --git a/pw_tokenizer/encode_args.cc b/pw_tokenizer/encode_args.cc
index b53cc22..1ab08ed 100644
--- a/pw_tokenizer/encode_args.cc
+++ b/pw_tokenizer/encode_args.cc
@@ -24,41 +24,6 @@
 namespace tokenizer {
 namespace {
 
-// Store metadata about this compilation's string tokenization in the ELF.
-//
-// The tokenizer metadata will not go into the on-device executable binary code.
-// This metadata will be present in the ELF file's .pw_tokenizer_info section,
-// from which the host-side tooling (Python, Java, etc.) can understand how to
-// decode tokenized strings for the given binary. Only attributes that affect
-// the decoding process are recorded.
-//
-// Tokenizer metadata is stored in an array of key-value pairs. Each Metadata
-// object is 32 bytes: a 24-byte string and an 8-byte value. Metadata structs
-// may be parsed in Python with the struct format '24s<Q'.
-PW_PACKED(struct) Metadata {
-  char name[24];   // name of the metadata field
-  uint64_t value;  // value of the field
-};
-
-static_assert(sizeof(Metadata) == 32);
-
-// Store tokenization metadata in its own section. Mach-O files are not
-// supported by pw_tokenizer, but a short, Mach-O compatible section name is
-// used on macOS so that this file can at least compile.
-#if __APPLE__
-#define PW_TOKENIZER_INFO_SECTION PW_KEEP_IN_SECTION(".pw_info")
-#else
-#define PW_TOKENIZER_INFO_SECTION PW_KEEP_IN_SECTION(".pw_tokenzier_info")
-#endif  // __APPLE__
-
-constexpr Metadata metadata[] PW_TOKENIZER_INFO_SECTION = {
-    {"hash_length_bytes", PW_TOKENIZER_CFG_HASH_LENGTH},
-    {"sizeof_long", sizeof(long)},            // %l conversion specifier
-    {"sizeof_intmax_t", sizeof(intmax_t)},    // %j conversion specifier
-    {"sizeof_size_t", sizeof(size_t)},        // %z conversion specifier
-    {"sizeof_ptrdiff_t", sizeof(ptrdiff_t)},  // %t conversion specifier
-};
-
 // Declare the types as an enum for convenience.
 enum class ArgType : uint8_t {
   kInt = PW_TOKENIZER_ARG_TYPE_INT,
@@ -124,7 +89,7 @@
 
 }  // namespace
 
-size_t EncodeArgs(pw_TokenizerArgTypes types,
+size_t EncodeArgs(_pw_tokenizer_ArgTypes types,
                   va_list args,
                   std::span<uint8_t> output) {
   size_t arg_count = types & PW_TOKENIZER_TYPE_COUNT_MASK;
diff --git a/pw_tokenizer/global_handlers_test.cc b/pw_tokenizer/global_handlers_test.cc
index 89885a8..6cac806 100644
--- a/pw_tokenizer/global_handlers_test.cc
+++ b/pw_tokenizer/global_handlers_test.cc
@@ -24,22 +24,10 @@
 namespace pw::tokenizer {
 namespace {
 
-// The hash to use for this test. This makes sure the strings are shorter than
-// the configured max length to ensure this test works with any reasonable
-// configuration.
-template <size_t kSize>
-constexpr uint32_t TestHash(const char (&string)[kSize]) {
-  constexpr unsigned kTestHashLength = 48;
-  static_assert(kTestHashLength <= PW_TOKENIZER_CFG_HASH_LENGTH);
-  static_assert(kSize <= kTestHashLength + 1);
-  return PwTokenizer65599FixedLengthHash(std::string_view(string, kSize - 1),
-                                         kTestHashLength);
-}
-
 // Constructs an array with the hashed string followed by the provided bytes.
 template <uint8_t... kData, size_t kSize>
 constexpr auto ExpectedData(const char (&format)[kSize]) {
-  const uint32_t value = TestHash(format);
+  const uint32_t value = Hash(format);
   return std::array<uint8_t, sizeof(uint32_t) + sizeof...(kData)>{
       static_cast<uint8_t>(value & 0xff),
       static_cast<uint8_t>(value >> 8 & 0xff),
@@ -104,7 +92,7 @@
 }
 
 TEST_F(TokenizeToGlobalHandler, C_SequentialZigZag) {
-  pw_TokenizeToGlobalHandlerTest_SequentialZigZag();
+  pw_tokenizer_ToGlobalHandlerTest_SequentialZigZag();
 
   constexpr std::array<uint8_t, 18> expected =
       ExpectedData<0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13>(
@@ -113,15 +101,15 @@
   EXPECT_EQ(std::memcmp(expected.data(), message_, expected.size()), 0);
 }
 
-extern "C" void pw_TokenizerHandleEncodedMessage(const uint8_t* encoded_message,
-                                                 size_t size_bytes) {
+extern "C" void pw_tokenizer_HandleEncodedMessage(
+    const uint8_t* encoded_message, size_t size_bytes) {
   TokenizeToGlobalHandler::SetMessage(encoded_message, size_bytes);
 }
 
 class TokenizeToGlobalHandlerWithPayload
     : public GlobalMessage<TokenizeToGlobalHandlerWithPayload> {
  public:
-  static void SetPayload(pw_TokenizerPayload payload) {
+  static void SetPayload(pw_tokenizer_Payload payload) {
     payload_ = static_cast<intptr_t>(payload);
   }
 
@@ -140,13 +128,18 @@
       ExpectedData<0, 0, 0x00, 0x00, 0x00, 0x80, 0>("%x%lld%1.2f%s");
 
   PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD(
-      static_cast<pw_TokenizerPayload>(123), "%x%lld%1.2f%s", 0, 0ll, -0.0, "");
+      static_cast<pw_tokenizer_Payload>(123),
+      "%x%lld%1.2f%s",
+      0,
+      0ll,
+      -0.0,
+      "");
   ASSERT_EQ(expected.size(), message_size_bytes_);
   EXPECT_EQ(std::memcmp(expected.data(), message_, expected.size()), 0);
   EXPECT_EQ(payload_, 123);
 
   PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD(
-      static_cast<pw_TokenizerPayload>(-543),
+      static_cast<pw_tokenizer_Payload>(-543),
       "%x%lld%1.2f%s",
       0,
       0ll,
@@ -170,7 +163,7 @@
 
 TEST_F(TokenizeToGlobalHandlerWithPayload, Strings_NonZeroPayload) {
   PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD(
-      static_cast<pw_TokenizerPayload>(5432), "The answer is: %s", "5432!");
+      static_cast<pw_tokenizer_Payload>(5432), "The answer is: %s", "5432!");
 
   ASSERT_EQ(kExpected.size(), message_size_bytes_);
   EXPECT_EQ(std::memcmp(kExpected.data(), message_, kExpected.size()), 0);
@@ -180,7 +173,7 @@
 TEST_F(TokenizeToGlobalHandlerWithPayload, Domain_Strings) {
   PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD_DOMAIN(
       "TEST_DOMAIN",
-      static_cast<pw_TokenizerPayload>(5432),
+      static_cast<pw_tokenizer_Payload>(5432),
       "The answer is: %s",
       "5432!");
   ASSERT_EQ(kExpected.size(), message_size_bytes_);
@@ -197,7 +190,7 @@
   Foo foo{254u, true};
 
   PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD(
-      reinterpret_cast<pw_TokenizerPayload>(&foo), "Boring!");
+      reinterpret_cast<pw_tokenizer_Payload>(&foo), "Boring!");
 
   constexpr auto expected = ExpectedData("Boring!");
   static_assert(expected.size() == 4);
@@ -211,7 +204,7 @@
 }
 
 TEST_F(TokenizeToGlobalHandlerWithPayload, C_SequentialZigZag) {
-  pw_TokenizeToGlobalHandlerWithPayloadTest_SequentialZigZag();
+  pw_tokenizer_ToGlobalHandlerWithPayloadTest_SequentialZigZag();
 
   constexpr std::array<uint8_t, 18> expected =
       ExpectedData<0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13>(
@@ -221,19 +214,18 @@
   EXPECT_EQ(payload_, 600613);
 }
 
-extern "C" void pw_TokenizerHandleEncodedMessageWithPayload(
-    pw_TokenizerPayload payload,
+extern "C" void pw_tokenizer_HandleEncodedMessageWithPayload(
+    pw_tokenizer_Payload payload,
     const uint8_t* encoded_message,
     size_t size_bytes) {
   TokenizeToGlobalHandlerWithPayload::SetMessage(encoded_message, size_bytes);
   TokenizeToGlobalHandlerWithPayload::SetPayload(payload);
 }
 
-// Hijack the PW_TOKENIZE_STRING_DOMAIN macro to capture the tokenizer domain.
-#undef PW_TOKENIZE_STRING_DOMAIN
-#define PW_TOKENIZE_STRING_DOMAIN(domain, string)                 \
-  /* assigned to a variable */ PW_TOKENIZER_STRING_TOKEN(string); \
-  tokenizer_domain = domain;                                      \
+// Hijack an internal macro to capture the tokenizer domain.
+#undef _PW_TOKENIZER_RECORD_ORIGINAL_STRING
+#define _PW_TOKENIZER_RECORD_ORIGINAL_STRING(token, domain, string) \
+  tokenizer_domain = domain;                                        \
   string_literal = string
 
 TEST_F(TokenizeToGlobalHandler, Domain_Default) {
@@ -261,7 +253,7 @@
   const char* string_literal = nullptr;
 
   PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD(
-      static_cast<pw_TokenizerPayload>(123), "Wow%s", "???");
+      static_cast<pw_tokenizer_Payload>(123), "Wow%s", "???");
 
   EXPECT_STREQ(tokenizer_domain, PW_TOKENIZER_DEFAULT_DOMAIN);
   EXPECT_STREQ(string_literal, "Wow%s");
@@ -272,7 +264,7 @@
   const char* string_literal = nullptr;
 
   PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD_DOMAIN(
-      "THEDOMAIN", static_cast<pw_TokenizerPayload>(123), "1234567890");
+      "THEDOMAIN", static_cast<pw_tokenizer_Payload>(123), "1234567890");
 
   EXPECT_STREQ(tokenizer_domain, "THEDOMAIN");
   EXPECT_STREQ(string_literal, "1234567890");
diff --git a/pw_tokenizer/global_handlers_test_c.c b/pw_tokenizer/global_handlers_test_c.c
index e0619e5..04d5551 100644
--- a/pw_tokenizer/global_handlers_test_c.c
+++ b/pw_tokenizer/global_handlers_test_c.c
@@ -26,7 +26,7 @@
 // This test invokes the tokenization API with a variety of types. To simplify
 // validating the encoded data, numbers that are sequential when zig-zag encoded
 // are used as arguments.
-void pw_TokenizeToGlobalHandlerTest_SequentialZigZag(void) {
+void pw_tokenizer_ToGlobalHandlerTest_SequentialZigZag(void) {
   PW_TOKENIZE_TO_GLOBAL_HANDLER(TEST_FORMAT_SEQUENTIAL_ZIG_ZAG,
                                 0u,
                                 -1,
@@ -44,8 +44,8 @@
                                 (signed char)-7);
 }
 
-void pw_TokenizeToGlobalHandlerWithPayloadTest_SequentialZigZag(void) {
-  PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD((pw_TokenizerPayload)600613,
+void pw_tokenizer_ToGlobalHandlerWithPayloadTest_SequentialZigZag(void) {
+  PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD((pw_tokenizer_Payload)600613,
                                              TEST_FORMAT_SEQUENTIAL_ZIG_ZAG,
                                              0u,
                                              -1,
diff --git a/pw_tokenizer/hash.cc b/pw_tokenizer/hash.cc
new file mode 100644
index 0000000..8cb3f26
--- /dev/null
+++ b/pw_tokenizer/hash.cc
@@ -0,0 +1,28 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_tokenizer/hash.h"
+
+namespace pw {
+namespace tokenizer {
+
+extern "C" uint32_t pw_tokenizer_65599FixedLengthHash(const char* string,
+                                                      size_t string_length,
+                                                      size_t hash_length) {
+  return PwTokenizer65599FixedLengthHash(
+      std::string_view(string, string_length), hash_length);
+}
+
+}  // namespace tokenizer
+}  // namespace pw
diff --git a/pw_tokenizer/hash_test.cc b/pw_tokenizer/hash_test.cc
index 014be0d..de6681e 100644
--- a/pw_tokenizer/hash_test.cc
+++ b/pw_tokenizer/hash_test.cc
@@ -12,6 +12,10 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
+// This tests the C hashing macros and C++ hashing functions.
+
+#include "pw_tokenizer/hash.h"
+
 #include <cstddef>
 #include <cstdint>
 
@@ -20,7 +24,6 @@
 #include "pw_tokenizer/internal/pw_tokenizer_65599_fixed_length_128_hash_macro.h"
 #include "pw_tokenizer/internal/pw_tokenizer_65599_fixed_length_80_hash_macro.h"
 #include "pw_tokenizer/internal/pw_tokenizer_65599_fixed_length_96_hash_macro.h"
-#include "pw_tokenizer/pw_tokenizer_65599_fixed_length_hash.h"
 #include "pw_tokenizer_private/generated_hash_test_cases.h"
 
 namespace pw::tokenizer {
@@ -44,7 +47,6 @@
 
 static_assert(CheckGeneratedCases(),
               "Hashes in the generated test cases must match");
-
 TEST(Hashing, GeneratedCasesAtRuntime) {
   for (const auto [string, hash_length, python_hash, macro_hash] : kHashTests) {
     const uint32_t calculated_hash =
@@ -56,10 +58,10 @@
 
 // Gets the size of the string, excluding the null terminator. A uint32_t is
 // used instead of a size_t since the hash calculation requires a uint32_t.
-template <uint32_t kSizeIncludingNull>
-constexpr uint32_t StringLength(const char (&)[kSizeIncludingNull]) {
-  static_assert(kSizeIncludingNull > 0u);
-  return kSizeIncludingNull - 1;  // subtract the null terminator
+template <uint32_t size_with_null>
+constexpr uint32_t StringLength(const char (&)[size_with_null]) {
+  static_assert(size_with_null > 0u);
+  return size_with_null - 1;  // subtract the null terminator
 }
 
 TEST(Hashing, Runtime) PW_NO_SANITIZE("unsigned-integer-overflow") {
@@ -103,7 +105,18 @@
       PwTokenizer65599FixedLengthHash(                                         \
           std::string_view(string_literal, sizeof(string_literal) - 1),        \
           128) == PW_TOKENIZER_65599_FIXED_LENGTH_128_HASH(string_literal),    \
-      "128-byte hash mismatch!")
+      "128-byte hash mismatch!");                                              \
+  static_assert(                                                               \
+      PwTokenizer65599FixedLengthHash(                                         \
+          std::string_view(string_literal, sizeof(string_literal) - 1),        \
+          sizeof(string_literal) - 1) == Hash(string_literal),                 \
+      "Hash function mismatch!");                                              \
+  EXPECT_EQ(PwTokenizer65599FixedLengthHash(                                   \
+                std::string_view(string_literal, sizeof(string_literal) - 1),  \
+                sizeof(string_literal) - 1),                                   \
+            pw_tokenizer_65599FixedLengthHash(string_literal,                  \
+                                              sizeof(string_literal) - 1,      \
+                                              sizeof(string_literal) - 1))
 
 TEST(HashMacro, Empty) { TEST_SUPPORTED_HASHES(""); }
 
diff --git a/pw_tokenizer/public/pw_tokenizer/base64.h b/pw_tokenizer/public/pw_tokenizer/base64.h
index bf84e29..735196a 100644
--- a/pw_tokenizer/public/pw_tokenizer/base64.h
+++ b/pw_tokenizer/public/pw_tokenizer/base64.h
@@ -38,25 +38,27 @@
 
 PW_EXTERN_C_START
 
-// Encodes a binary tokenized message as prefixed Base64. Returns the size of
-// the number of characters written to output_buffer. Returns 0 if the buffer is
-// too small.
+// Encodes a binary tokenized message as prefixed Base64 with a null terminator.
+// Returns the encoded string length (excluding the null terminator). Returns 0
+// if the buffer is too small. Always null terminates if the output buffer is
+// not empty.
 //
 // Equivalent to pw::tokenizer::PrefixedBase64Encode.
-size_t pw_TokenizerPrefixedBase64Encode(const void* binary_message,
-                                        size_t binary_size_bytes,
-                                        void* output_buffer,
-                                        size_t output_buffer_size_bytes);
+size_t pw_tokenizer_PrefixedBase64Encode(const void* binary_message,
+                                         size_t binary_size_bytes,
+                                         void* output_buffer,
+                                         size_t output_buffer_size_bytes);
+
 // Decodes a prefixed Base64 tokenized message to binary. Returns the size of
 // the decoded binary data. The resulting data is ready to be passed to
 // pw::tokenizer::Detokenizer::Detokenize. Returns 0 if the buffer is too small,
 // the expected prefix character is missing, or the Base64 data is corrupt.
 //
 // Equivalent to pw::tokenizer::PrefixedBase64Encode.
-size_t pw_TokenizerPrefixedBase64Decode(const void* base64_message,
-                                        size_t base64_size_bytes,
-                                        void* output_buffer,
-                                        size_t output_buffer_size);
+size_t pw_tokenizer_PrefixedBase64Decode(const void* base64_message,
+                                         size_t base64_size_bytes,
+                                         void* output_buffer,
+                                         size_t output_buffer_size);
 
 PW_EXTERN_C_END
 
@@ -65,19 +67,37 @@
 #include <span>
 #include <string_view>
 
+#include "pw_base64/base64.h"
+#include "pw_tokenizer/config.h"
+#include "pw_tokenizer/tokenize.h"
+
 namespace pw::tokenizer {
 
 inline constexpr char kBase64Prefix = PW_TOKENIZER_BASE64_PREFIX;
 
-// Encodes a binary tokenized message as prefixed Base64. Returns the size of
-// the number of characters written to output_buffer. Returns 0 if the buffer is
-// too small or does not start with kBase64Prefix.
+// Returns the size of a tokenized message (token + arguments) when encoded as
+// prefixed Base64. This can be used to size a buffer for encoding. Includes
+// room for the prefix character ($), encoded message, and a null terminator.
+constexpr size_t Base64EncodedBufferSize(size_t message_size) {
+  return sizeof(kBase64Prefix) + base64::EncodedSize(message_size) +
+         sizeof('\0');
+}
+
+// The minimum buffer size that can hold a tokenized message that is
+// PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES long encoded as prefixed Base64.
+inline constexpr size_t kDefaultBase64EncodedBufferSize =
+    Base64EncodedBufferSize(PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES);
+
+// Encodes a binary tokenized message as prefixed Base64 with a null terminator.
+// Returns the encoded string length (excluding the null terminator). Returns 0
+// if the buffer is too small. Always null terminates if the output buffer is
+// not empty.
 inline size_t PrefixedBase64Encode(std::span<const std::byte> binary_message,
                                    std::span<char> output_buffer) {
-  return pw_TokenizerPrefixedBase64Encode(binary_message.data(),
-                                          binary_message.size(),
-                                          output_buffer.data(),
-                                          output_buffer.size());
+  return pw_tokenizer_PrefixedBase64Encode(binary_message.data(),
+                                           binary_message.size(),
+                                           output_buffer.data(),
+                                           output_buffer.size());
 }
 
 // Also accept a std::span<const uint8_t> for the binary message.
@@ -91,16 +111,16 @@
 // pw::tokenizer::Detokenizer::Detokenize.
 inline size_t PrefixedBase64Decode(std::string_view base64_message,
                                    std::span<std::byte> output_buffer) {
-  return pw_TokenizerPrefixedBase64Decode(base64_message.data(),
-                                          base64_message.size(),
-                                          output_buffer.data(),
-                                          output_buffer.size());
+  return pw_tokenizer_PrefixedBase64Decode(base64_message.data(),
+                                           base64_message.size(),
+                                           output_buffer.data(),
+                                           output_buffer.size());
 }
 
 // Decodes a prefixed Base64 tokenized message to binary in place. Returns the
 // size of the decoded binary data.
 inline size_t PrefixedBase64DecodeInPlace(std::span<std::byte> buffer) {
-  return pw_TokenizerPrefixedBase64Decode(
+  return pw_tokenizer_PrefixedBase64Decode(
       buffer.data(), buffer.size(), buffer.data(), buffer.size());
 }
 
diff --git a/pw_tokenizer/public/pw_tokenizer/config.h b/pw_tokenizer/public/pw_tokenizer/config.h
index 2e49fa3..3614112 100644
--- a/pw_tokenizer/public/pw_tokenizer/config.h
+++ b/pw_tokenizer/public/pw_tokenizer/config.h
@@ -16,9 +16,6 @@
 #pragma once
 
 #include <assert.h>
-#include <stdint.h>
-
-// TODO(pwbug/17): Configure these options in the config system.
 
 // For a tokenized string that has arguments, the types of the arguments are
 // encoded in either a 4-byte (uint32_t) or a 8-byte (uint64_t) value. The 4 or
@@ -34,30 +31,32 @@
                   PW_TOKENIZER_CFG_ARG_TYPES_SIZE_BYTES == 8,
               "PW_TOKENIZER_CFG_ARG_TYPES_SIZE_BYTES must be 4 or 8");
 
-// How long of a string to hash. Strings shorter than this length are treated as
-// if they were zero-padded up to the length. Strings that are the same length
-// and share a common prefix longer than this value hash to the same value.
+// Maximum number of characters to hash in C. In C code, strings shorter than
+// this length are treated as if they were zero-padded up to the length. Strings
+// that are the same length and share a common prefix longer than this value
+// hash to the same value. Increasing PW_TOKENIZER_CFG_C_HASH_LENGTH increases
+// the compilation time for C due to the complexity of the hashing macros.
 //
-// Increasing PW_TOKENIZER_CFG_HASH_LENGTH increases the compilation time for C
-// due to the complexity of the hashing macros. C++ macros use a constexpr
-// function instead of a macro, so the compilation time impact is minimal.
-// Projects primarily in C++ should use a large value for
-// PW_TOKENIZER_CFG_HASH_LENGTH (perhaps even
-// std::numeric_limits<size_t>::max()).
+// PW_TOKENIZER_CFG_C_HASH_LENGTH has no effect on C++ code. In C++, hashing is
+// done with a constexpr function instead of a macro. There are no string length
+// limitations and compilation times are unaffected by this macro.
 //
 // Only hash lengths for which there is a corresponding macro header
 // (pw_tokenizer/internal/mash_macro_#.h) are supported. Additional macros may
 // be generated with the generate_hash_macro.py function. New macro headers must
 // then be added to pw_tokenizer/internal/hash.h.
-#ifndef PW_TOKENIZER_CFG_HASH_LENGTH
-#define PW_TOKENIZER_CFG_HASH_LENGTH 128
-#endif  // PW_TOKENIZER_CFG_HASH_LENGTH
+//
+// This MUST match the value of DEFAULT_C_HASH_LENGTH in
+// pw_tokenizer/py/pw_tokenizer/tokens.py.
+#ifndef PW_TOKENIZER_CFG_C_HASH_LENGTH
+#define PW_TOKENIZER_CFG_C_HASH_LENGTH 128
+#endif  // PW_TOKENIZER_CFG_C_HASH_LENGTH
 
 // The size of the stack-allocated argument encoding buffer to use. This only
 // affects tokenization macros that stack-allocate the encoding buffer
-// (PW_TOKENIZE_TO_CALLBACK and PW_TOKENIZE_TO_GLOBAL_HANDLER). This buffer size
-// is only allocated for argument encoding and does not include the 4-byte
-// token.
+// (PW_TOKENIZE_TO_CALLBACK and PW_TOKENIZE_TO_GLOBAL_HANDLER). A buffer of this
+// size is allocated and used for the 4-byte token and for encoding all
+// arguments. It must be at least large enough for the token (4 bytes).
 //
 // This buffer does not need to be large to accommodate a good number of
 // tokenized string arguments. Integer arguments are usually encoded smaller
@@ -65,5 +64,5 @@
 // point types are encoded as four bytes. Null-terminated strings are encoded
 // 1:1 in size.
 #ifndef PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES
-#define PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES 48
+#define PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES 52
 #endif  // PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES
diff --git a/pw_tokenizer/public/pw_tokenizer/hash.h b/pw_tokenizer/public/pw_tokenizer/hash.h
new file mode 100644
index 0000000..2cd8cf3
--- /dev/null
+++ b/pw_tokenizer/public/pw_tokenizer/hash.h
@@ -0,0 +1,113 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <stddef.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+
+#include <string_view>
+
+#include "pw_preprocessor/compiler.h"
+#include "pw_preprocessor/util.h"
+#include "pw_tokenizer/config.h"
+
+namespace pw::tokenizer {
+
+// The constant to use when generating the hash. Changing this changes the value
+// of all hashes, so do not change it randomly.
+inline constexpr uint32_t k65599HashConstant = 65599u;
+
+// Calculates the hash of a string. This function calculates hashes at either
+// runtime or compile time in C++ code.
+//
+// Unlike the C hashing macro, this hash supports strings of any length. Strings
+// longer than the maximum C hashing macro's length will hash different values
+// in C and C++. If the same very long string is used in C and C++, the string
+// will appear with both tokens in the resulting database.
+//
+// This hash is calculated with the following equation, where s is the string
+// and k is the maximum hash length:
+//
+//    H(s, k) = len(s) + 65599 * s[0] + 65599^2 * s[1] + ... + 65599^k * s[k-1]
+//
+// The hash algorithm is a modified version of the x65599 hash used by the SDBM
+// open source project. The modifications were made to support hashing in C
+// macros. These are the differences from x65599:
+//
+//   - Characters are hashed in reverse order.
+//   - The string length is hashed as the first character in the string.
+//
+constexpr uint32_t Hash(std::string_view string)
+    PW_NO_SANITIZE("unsigned-integer-overflow") {
+  // The length is hashed as if it were the first character.
+  uint32_t hash = string.size();
+  uint32_t coefficient = k65599HashConstant;
+
+  // Hash all of the characters in the string as unsigned ints.
+  // The coefficient calculation is done modulo 0x100000000, so the unsigned
+  // integer overflows are intentional.
+  for (uint8_t ch : string) {
+    hash += coefficient * ch;
+    coefficient *= k65599HashConstant;
+  }
+
+  return hash;
+}
+
+// Take the string as an array to support either literals or character arrays,
+// but not const char*.
+template <size_t size>
+constexpr uint32_t Hash(const char (&string)[size]) {
+  static_assert(size > 0);
+  return Hash(std::string_view(string, size - 1));
+}
+
+// This hash function is equivalent to the C hashing macros. It hashses a string
+// up to a maximum length.
+constexpr uint32_t PwTokenizer65599FixedLengthHash(
+    std::string_view string,
+    size_t hash_length = PW_TOKENIZER_CFG_C_HASH_LENGTH)
+    PW_NO_SANITIZE("unsigned-integer-overflow") {
+  uint32_t hash = string.size();
+  uint32_t coefficient = k65599HashConstant;
+
+  for (uint8_t ch : string.substr(0, hash_length)) {
+    hash += coefficient * ch;
+    coefficient *= k65599HashConstant;
+  }
+
+  return hash;
+}
+
+// Character array version of PwTokenizer65599FixedLengthHash.
+template <size_t size>
+constexpr uint32_t PwTokenizer65599FixedLengthHash(
+    const char (&string)[size],
+    size_t hash_length = PW_TOKENIZER_CFG_C_HASH_LENGTH) {
+  static_assert(size > 0);
+  return PwTokenizer65599FixedLengthHash(std::string_view(string, size - 1),
+                                         hash_length);
+}
+
+}  // namespace pw::tokenizer
+
+#endif  // __cplusplus
+
+// C version of the fixed-length hash. Can be used to calculate hashes
+// equivalent to the hashing macros at runtime in C.
+PW_EXTERN_C uint32_t pw_tokenizer_65599FixedLengthHash(const char* string,
+                                                       size_t string_length,
+                                                       size_t hash_length);
diff --git a/pw_tokenizer/public/pw_tokenizer/internal/argument_types.h b/pw_tokenizer/public/pw_tokenizer/internal/argument_types.h
index 0f376ec..1f431e4 100644
--- a/pw_tokenizer/public/pw_tokenizer/internal/argument_types.h
+++ b/pw_tokenizer/public/pw_tokenizer/internal/argument_types.h
@@ -17,7 +17,7 @@
 
 #include <stdint.h>
 
-#include "pw_preprocessor/macro_arg_count.h"
+#include "pw_preprocessor/arguments.h"
 #include "pw_tokenizer/config.h"
 
 // The size of the argument types variable determines the number of arguments
@@ -31,7 +31,7 @@
 #define PW_TOKENIZER_TYPE_COUNT_SIZE_BITS 4u
 #define PW_TOKENIZER_TYPE_COUNT_MASK 0x0Fu
 
-typedef uint32_t pw_TokenizerArgTypes;
+typedef uint32_t _pw_tokenizer_ArgTypes;
 
 #elif PW_TOKENIZER_CFG_ARG_TYPES_SIZE_BYTES == 8
 
@@ -42,7 +42,7 @@
 #define PW_TOKENIZER_TYPE_COUNT_SIZE_BITS 6u
 #define PW_TOKENIZER_TYPE_COUNT_MASK 0x1Fu  // only 5 bits will be needed
 
-typedef uint64_t pw_TokenizerArgTypes;
+typedef uint64_t _pw_tokenizer_ArgTypes;
 
 #else
 
@@ -52,7 +52,7 @@
 
 // The tokenized string encoding function is a variadic function that works
 // similarly to printf. Instead of a format string, however, the argument types
-// are packed into a pw_TokenizerArgTypes.
+// are packed into a _pw_tokenizer_ArgTypes.
 //
 // The four supported argument types are represented by two-bit argument codes.
 // Just four types are required because only printf-compatible arguments are
@@ -62,10 +62,10 @@
 // char* values cannot be printed as pointers with %p. These arguments are
 // always encoded as strings. To format a char* as an address, cast it to void*
 // or an integer.
-#define PW_TOKENIZER_ARG_TYPE_INT ((pw_TokenizerArgTypes)0)
-#define PW_TOKENIZER_ARG_TYPE_INT64 ((pw_TokenizerArgTypes)1)
-#define PW_TOKENIZER_ARG_TYPE_DOUBLE ((pw_TokenizerArgTypes)2)
-#define PW_TOKENIZER_ARG_TYPE_STRING ((pw_TokenizerArgTypes)3)
+#define PW_TOKENIZER_ARG_TYPE_INT ((_pw_tokenizer_ArgTypes)0)
+#define PW_TOKENIZER_ARG_TYPE_INT64 ((_pw_tokenizer_ArgTypes)1)
+#define PW_TOKENIZER_ARG_TYPE_DOUBLE ((_pw_tokenizer_ArgTypes)2)
+#define PW_TOKENIZER_ARG_TYPE_STRING ((_pw_tokenizer_ArgTypes)3)
 
 // Select the int argument type based on the size of the type. Values smaller
 // than int are promoted to int.
@@ -85,11 +85,11 @@
 namespace pw {
 namespace tokenizer {
 
-#if __cpp_if_constexpr  // C++17 version
+#ifdef __cpp_if_constexpr  // C++17 version
 
 // This function selects the matching type enum for supported argument types.
 template <typename T>
-constexpr pw_TokenizerArgTypes VarargsType() {
+constexpr _pw_tokenizer_ArgTypes VarargsType() {
   using ArgType = std::decay_t<T>;
 
   if constexpr (std::is_floating_point<ArgType>()) {
@@ -116,26 +116,26 @@
 
 template <typename T, bool kDontCare1, bool kDontCare2>
 struct SelectVarargsType<T, true, kDontCare1, kDontCare2> {
-  static constexpr pw_TokenizerArgTypes kValue = PW_TOKENIZER_ARG_TYPE_DOUBLE;
+  static constexpr _pw_tokenizer_ArgTypes kValue = PW_TOKENIZER_ARG_TYPE_DOUBLE;
 };
 
 template <typename T, bool kDontCare>
 struct SelectVarargsType<T, false, true, kDontCare> {
-  static constexpr pw_TokenizerArgTypes kValue = PW_TOKENIZER_ARG_TYPE_STRING;
+  static constexpr _pw_tokenizer_ArgTypes kValue = PW_TOKENIZER_ARG_TYPE_STRING;
 };
 
 template <typename T>
 struct SelectVarargsType<T, false, false, true> {
-  static constexpr pw_TokenizerArgTypes kValue = PW_TOKENIZER_ARG_TYPE_INT64;
+  static constexpr _pw_tokenizer_ArgTypes kValue = PW_TOKENIZER_ARG_TYPE_INT64;
 };
 
 template <typename T>
 struct SelectVarargsType<T, false, false, false> {
-  static constexpr pw_TokenizerArgTypes kValue = PW_TOKENIZER_ARG_TYPE_INT;
+  static constexpr _pw_tokenizer_ArgTypes kValue = PW_TOKENIZER_ARG_TYPE_INT;
 };
 
 template <typename T>
-constexpr pw_TokenizerArgTypes VarargsType() {
+constexpr _pw_tokenizer_ArgTypes VarargsType() {
   return SelectVarargsType<typename std::decay<T>::type>::kValue;
 }
 
@@ -174,21 +174,14 @@
 
 #endif  // __cplusplus
 
-// Encodes the types of the provided arguments as a pw_TokenizerArgTypes value.
-// Depending on the size of pw_TokenizerArgTypes, the bottom 4 or 6 bits store
-// the number of arguments and the remaining bits store the types, two bits per
-// type.
+// Encodes the types of the provided arguments as a _pw_tokenizer_ArgTypes
+// value. Depending on the size of _pw_tokenizer_ArgTypes, the bottom 4 or 6
+// bits store the number of arguments and the remaining bits store the types,
+// two bits per type.
 //
 // The arguments are not evaluated; only their types are used to
 // select the set their corresponding PW_TOKENIZER_ARG_TYPEs.
 #define PW_TOKENIZER_ARG_TYPES(...) \
-  _PW_TOKENIZER_TYPES_N(PW_ARG_COUNT(__VA_ARGS__), __VA_ARGS__)
+  PW_DELEGATE_BY_ARG_COUNT(_PW_TOKENIZER_TYPES_, __VA_ARGS__)
 
-// Selects which _PW_TOKENIZER_TYPES_* macro to use based on the number of
-// arguments this was called with.
-#define _PW_TOKENIZER_TYPES_N(count, ...) \
-  _PW_TOKENIZER_TYPES_EXPAND_N(count, __VA_ARGS__)
-#define _PW_TOKENIZER_TYPES_EXPAND_N(count, ...) \
-  _PW_TOKENIZER_TYPES_##count(__VA_ARGS__)
-
-#define _PW_TOKENIZER_TYPES_0() ((pw_TokenizerArgTypes)0)
+#define _PW_TOKENIZER_TYPES_0() ((_pw_tokenizer_ArgTypes)0)
diff --git a/pw_tokenizer/public/pw_tokenizer/internal/tokenize_string.h b/pw_tokenizer/public/pw_tokenizer/internal/tokenize_string.h
index b30223a..06cefd7 100644
--- a/pw_tokenizer/public/pw_tokenizer/internal/tokenize_string.h
+++ b/pw_tokenizer/public/pw_tokenizer/internal/tokenize_string.h
@@ -13,8 +13,8 @@
 // the License.
 
 // Selects the hash macro implementation to use. The implementation selected
-// depends on the language (C or C++) and value of PW_TOKENIZER_CFG_HASH_LENGTH.
-// The options are:
+// depends on the language (C or C++) and value of
+// PW_TOKENIZER_CFG_C_HASH_LENGTH. The options are:
 //
 //   - C++ hash constexpr function, which works for any hash length
 //   - C 80-character hash macro
@@ -27,29 +27,92 @@
 
 #include <stdint.h>
 
+#define _PW_TOKENIZER_ENTRY_MAGIC UINT32_C(0xBAA98DEE)
+
+#ifdef __cplusplus
+
+#include <array>
+
+namespace pw {
+namespace tokenizer {
+namespace internal {
+
+// The C++ tokenzied string entry supports both string literals and char arrays,
+// such as __func__.
+template <uint32_t domain_size, uint32_t string_size>
+PW_PACKED(class)
+Entry {
+ public:
+  constexpr Entry(uint32_t token,
+                  const char(&domain)[domain_size],
+                  const char(&string)[string_size])
+      : magic_(_PW_TOKENIZER_ENTRY_MAGIC),
+        token_(token),
+        domain_size_(domain_size),
+        string_size_(string_size),
+        domain_(std::to_array(domain)),
+        string_(std::to_array(string)) {}
+
+ private:
+  static_assert(string_size > 0u && domain_size > 0u);
+
+  uint32_t magic_;
+  uint32_t token_;
+  uint32_t domain_size_;
+  uint32_t string_size_;
+  std::array<char, domain_size> domain_;
+  std::array<char, string_size> string_;
+};
+
+}  // namespace internal
+}  // namespace tokenizer
+}  // namespace pw
+
+#else  // In C, define a struct inline with appropriately-sized string members.
+
+#define _PW_TOKENIZER_STRING_ENTRY(                   \
+    calculated_token, domain_literal, string_literal) \
+  PW_PACKED(struct) {                                 \
+    uint32_t magic;                                   \
+    uint32_t token;                                   \
+    uint32_t domain_size;                             \
+    uint32_t string_length;                           \
+    char domain[sizeof(domain_literal)];              \
+    char string[sizeof(string_literal)];              \
+  }                                                   \
+  _PW_TOKENIZER_UNIQUE(_pw_tokenizer_string_entry_)   \
+  _PW_TOKENIZER_SECTION = {                           \
+      _PW_TOKENIZER_ENTRY_MAGIC,                      \
+      calculated_token,                               \
+      sizeof(domain_literal),                         \
+      sizeof(string_literal),                         \
+      domain_literal,                                 \
+      string_literal,                                 \
+  }
+
+#endif  // __cplusplus
+
 // In C++17, use a constexpr function to calculate the hash.
-#if __cpp_constexpr >= 201304L && defined(__cpp_inline_variables)
+#if defined(__cpp_constexpr) && __cpp_constexpr >= 201304L && \
+    defined(__cpp_inline_variables)
 
-#include "pw_tokenizer/pw_tokenizer_65599_fixed_length_hash.h"
+#include "pw_tokenizer/hash.h"
 
-#define PW_TOKENIZER_STRING_TOKEN(format)                \
-  pw::tokenizer::PwTokenizer65599FixedLengthHash(        \
-      std::string_view((format), sizeof(format "") - 1), \
-      PW_TOKENIZER_CFG_HASH_LENGTH)
+#define PW_TOKENIZER_STRING_TOKEN(format) ::pw::tokenizer::Hash(format)
 
 #else  // In C or older C++ code, use the hashing macro.
 
-#if PW_TOKENIZER_CFG_HASH_LENGTH == 80
+#if PW_TOKENIZER_CFG_C_HASH_LENGTH == 80
 
 #include "pw_tokenizer/internal/pw_tokenizer_65599_fixed_length_80_hash_macro.h"
 #define PW_TOKENIZER_STRING_TOKEN PW_TOKENIZER_65599_FIXED_LENGTH_80_HASH
 
-#elif PW_TOKENIZER_CFG_HASH_LENGTH == 96
+#elif PW_TOKENIZER_CFG_C_HASH_LENGTH == 96
 
 #include "pw_tokenizer/internal/pw_tokenizer_65599_fixed_length_96_hash_macro.h"
 #define PW_TOKENIZER_STRING_TOKEN PW_TOKENIZER_65599_FIXED_LENGTH_96_HASH
 
-#elif PW_TOKENIZER_CFG_HASH_LENGTH == 128
+#elif PW_TOKENIZER_CFG_C_HASH_LENGTH == 128
 
 #include "pw_tokenizer/internal/pw_tokenizer_65599_fixed_length_128_hash_macro.h"
 #define PW_TOKENIZER_STRING_TOKEN PW_TOKENIZER_65599_FIXED_LENGTH_128_HASH
@@ -60,11 +123,8 @@
 // (pw_tokenizer/internal/mash_macro_#.h) are supported. Additional macros may
 // be generated with the generate_hash_macro.py function. New macro headers must
 // be added to this file.
-#error "Unsupported value for PW_TOKENIZER_CFG_HASH_LENGTH"
+#error "Unsupported value for PW_TOKENIZER_CFG_C_HASH_LENGTH"
 
-#endif  // PW_TOKENIZER_CFG_HASH_LENGTH
+#endif  // PW_TOKENIZER_CFG_C_HASH_LENGTH
 
 #endif  // __cpp_constexpr >= 201304L && defined(__cpp_inline_variables)
-
-// The type of the token used in place of a format string.
-typedef uint32_t pw_TokenizerStringToken;
diff --git a/pw_tokenizer/public/pw_tokenizer/pw_tokenizer_65599_fixed_length_hash.h b/pw_tokenizer/public/pw_tokenizer/pw_tokenizer_65599_fixed_length_hash.h
deleted file mode 100644
index 2b6039e..0000000
--- a/pw_tokenizer/public/pw_tokenizer/pw_tokenizer_65599_fixed_length_hash.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-#pragma once
-
-#include <cstddef>
-#include <cstdint>
-#include <string_view>
-
-#include "pw_preprocessor/compiler.h"
-
-namespace pw::tokenizer {
-
-// The constant to use when generating the hash. Changing this changes the value
-// of all hashes, so do not change it randomly.
-inline constexpr uint32_t k65599HashConstant = 65599u;
-
-// Calculates the hash of a string. This function calculates hashes at either
-// runtime or compile time in C++ code.
-//
-// This function only hashes up to a fixed length. Characters beyond that length
-// are ignored. Hashing to a fixed length makes it possible to compute this hash
-// in a preprocessor macro. To eliminate some collisions, the length of the
-// string is hashed as if it were the first character.
-//
-// This hash is calculated with the following equation, where s is the string
-// and k is the maximum hash length:
-//
-//    H(s, k) = len(s) + 65599 * s[0] + 65599^2 * s[1] + ... + 65599^k * s[k-1]
-//
-// The hash algorithm is a modified version of the x65599 hash used by the SDBM
-// open source project. This hash has the following differences from x65599:
-//   - Characters are only hashed up to a fixed maximum string length.
-//   - Characters are hashed in reverse order.
-//   - The string length is hashed as the first character in the string.
-constexpr uint32_t PwTokenizer65599FixedLengthHash(std::string_view string,
-                                                   size_t hash_length)
-    PW_NO_SANITIZE("unsigned-integer-overflow") {
-  // The length is hashed as if it were the first character.
-  uint32_t hash = string.size();
-  uint32_t coefficient = k65599HashConstant;
-
-  // Hash all of the characters in the string as unsigned ints.
-  // The coefficient calculation is done modulo 0x100000000, so the unsigned
-  // integer overflows are intentional.
-  for (uint8_t ch : string.substr(0, hash_length)) {
-    hash += coefficient * ch;
-    coefficient *= k65599HashConstant;
-  }
-
-  return hash;
-}
-
-}  // namespace pw::tokenizer
diff --git a/pw_tokenizer/public/pw_tokenizer/tokenize.h b/pw_tokenizer/public/pw_tokenizer/tokenize.h
index f5af3c4..c398160 100644
--- a/pw_tokenizer/public/pw_tokenizer/tokenize.h
+++ b/pw_tokenizer/public/pw_tokenizer/tokenize.h
@@ -13,28 +13,45 @@
 // the License.
 #pragma once
 
+#ifdef __cplusplus
+
+#include <cstddef>
+#include <cstdint>
+
+#else
+
 #include <assert.h>
 #include <stddef.h>
 #include <stdint.h>
 
+#endif  // __cplusplus
+
+#include "pw_preprocessor/arguments.h"
 #include "pw_preprocessor/compiler.h"
 #include "pw_preprocessor/concat.h"
-#include "pw_preprocessor/macro_arg_count.h"
 #include "pw_preprocessor/util.h"
 #include "pw_tokenizer/internal/argument_types.h"
 #include "pw_tokenizer/internal/tokenize_string.h"
 
+// The type of the token used in place of a format string. Also available as
+// pw::tokenizer::Token.
+typedef uint32_t pw_tokenizer_Token;
+
 // Strings may optionally be tokenized to a domain. Strings in different domains
 // can be processed separately by the token database tools. Each domain in use
 // must have a corresponding section declared in the linker script. See
 // pw_tokenizer_linker_sections.ld for more details.
 //
-// If no domain is specified, this default is used.
-#define PW_TOKENIZER_DEFAULT_DOMAIN "default"
+// The default domain is an empty string.
+#define PW_TOKENIZER_DEFAULT_DOMAIN ""
 
-// Tokenizes a string literal and converts it to a pw_TokenizerStringToken. This
-// expression can be assigned to a local or global variable, but cannot be used
-// in another expression. For example:
+// Tokenizes a string and converts it to a pw_tokenizer_Token. In C++, the
+// string may be a literal or a constexpr char array. In C, the argument must be
+// a string literal. In either case, the string must be null terminated, but may
+// contain any characters (including '\0').
+//
+// This expression can be assigned to a local or global variable, but cannot be
+// used in another expression. For example:
 //
 //   constexpr uint32_t global = PW_TOKENIZE_STRING("Wow!");  // This works.
 //
@@ -48,16 +65,11 @@
   PW_TOKENIZE_STRING_DOMAIN(PW_TOKENIZER_DEFAULT_DOMAIN, string_literal)
 
 // Same as PW_TOKENIZE_STRING, but tokenizes to the specified domain.
-#define PW_TOKENIZE_STRING_DOMAIN(domain, string_literal)                     \
-  /* assign to a variable */ PW_TOKENIZER_STRING_TOKEN(string_literal);       \
-                                                                              \
-  /* Declare the format string as an array in the special tokenized string */ \
-  /* section, which should be excluded from the final binary. Use __LINE__ */ \
-  /* to create unique names for the section and variable, which avoids     */ \
-  /* compiler warnings.                                                    */ \
-  static _PW_TOKENIZER_CONST char PW_CONCAT(                                  \
-      _pw_tokenizer_string_literal_DO_NOT_USE_,                               \
-      __LINE__)[] _PW_TOKENIZER_SECTION(domain) = string_literal
+#define PW_TOKENIZE_STRING_DOMAIN(domain, string_literal)               \
+  /* assign to a variable */ PW_TOKENIZER_STRING_TOKEN(string_literal); \
+                                                                        \
+  _PW_TOKENIZER_RECORD_ORIGINAL_STRING(                                 \
+      PW_TOKENIZER_STRING_TOKEN(string_literal), domain, string_literal)
 
 // Encodes a tokenized string and arguments to the provided buffer. The size of
 // the buffer is passed via a pointer to a size_t. After encoding is complete,
@@ -87,15 +99,15 @@
                                __VA_ARGS__)
 
 // Same as PW_TOKENIZE_TO_BUFFER, but tokenizes to the specified domain.
-#define PW_TOKENIZE_TO_BUFFER_DOMAIN(                        \
-    domain, buffer, buffer_size_pointer, format, ...)        \
-  do {                                                       \
-    _PW_TOKENIZE_FORMAT_STRING(domain, format, __VA_ARGS__); \
-    _pw_TokenizeToBuffer(buffer,                             \
-                         buffer_size_pointer,                \
-                         _pw_tokenizer_token,                \
-                         PW_TOKENIZER_ARG_TYPES(__VA_ARGS__) \
-                             PW_COMMA_ARGS(__VA_ARGS__));    \
+#define PW_TOKENIZE_TO_BUFFER_DOMAIN(                          \
+    domain, buffer, buffer_size_pointer, format, ...)          \
+  do {                                                         \
+    _PW_TOKENIZE_FORMAT_STRING(domain, format, __VA_ARGS__);   \
+    _pw_tokenizer_ToBuffer(buffer,                             \
+                           buffer_size_pointer,                \
+                           _pw_tokenizer_token,                \
+                           PW_TOKENIZER_ARG_TYPES(__VA_ARGS__) \
+                               PW_COMMA_ARGS(__VA_ARGS__));    \
   } while (0)
 
 // Encodes a tokenized string and arguments to a buffer on the stack. The
@@ -133,33 +145,33 @@
 #define PW_TOKENIZE_TO_CALLBACK_DOMAIN(domain, callback, format, ...) \
   do {                                                                \
     _PW_TOKENIZE_FORMAT_STRING(domain, format, __VA_ARGS__);          \
-    _pw_TokenizeToCallback(callback,                                  \
-                           _pw_tokenizer_token,                       \
-                           PW_TOKENIZER_ARG_TYPES(__VA_ARGS__)        \
-                               PW_COMMA_ARGS(__VA_ARGS__));           \
+    _pw_tokenizer_ToCallback(callback,                                \
+                             _pw_tokenizer_token,                     \
+                             PW_TOKENIZER_ARG_TYPES(__VA_ARGS__)      \
+                                 PW_COMMA_ARGS(__VA_ARGS__));         \
   } while (0)
 
 PW_EXTERN_C_START
 
 // These functions encode the tokenized strings. These should not be called
 // directly. Instead, use the corresponding PW_TOKENIZE_TO_* macros above.
-void _pw_TokenizeToBuffer(void* buffer,
-                          size_t* buffer_size_bytes,  // input and output arg
-                          pw_TokenizerStringToken token,
-                          pw_TokenizerArgTypes types,
-                          ...);
-
-void _pw_TokenizeToCallback(void (*callback)(const uint8_t* encoded_message,
-                                             size_t size_bytes),
-                            pw_TokenizerStringToken token,
-                            pw_TokenizerArgTypes types,
+void _pw_tokenizer_ToBuffer(void* buffer,
+                            size_t* buffer_size_bytes,  // input and output arg
+                            pw_tokenizer_Token token,
+                            _pw_tokenizer_ArgTypes types,
                             ...);
 
+void _pw_tokenizer_ToCallback(void (*callback)(const uint8_t* encoded_message,
+                                               size_t size_bytes),
+                              pw_tokenizer_Token token,
+                              _pw_tokenizer_ArgTypes types,
+                              ...);
+
 // This empty function allows the compiler to check the format string.
-inline void pw_TokenizerCheckFormatString(const char* format, ...)
+static inline void pw_tokenizer_CheckFormatString(const char* format, ...)
     PW_PRINTF_FORMAT(1, 2);
 
-inline void pw_TokenizerCheckFormatString(const char* format, ...) {
+static inline void pw_tokenizer_CheckFormatString(const char* format, ...) {
   PW_UNUSED(format);
 }
 
@@ -172,56 +184,75 @@
 // checks that the arguments are correct, stores the format string in a special
 // section, and calculates the string's token at compile time.
 // clang-format off
-#define _PW_TOKENIZE_FORMAT_STRING(domain, format, ...)                     \
+#define _PW_TOKENIZE_FORMAT_STRING(domain, format, ...)                        \
   if (0) { /* Do not execute to prevent double evaluation of the arguments. */ \
-    pw_TokenizerCheckFormatString(format PW_COMMA_ARGS(__VA_ARGS__));          \
+    pw_tokenizer_CheckFormatString(format PW_COMMA_ARGS(__VA_ARGS__));         \
   }                                                                            \
                                                                                \
   /* Check that the macro is invoked with a supported number of arguments. */  \
   static_assert(                                                               \
-      PW_ARG_COUNT(__VA_ARGS__) <= PW_TOKENIZER_MAX_SUPPORTED_ARGS,            \
+      PW_FUNCTION_ARG_COUNT(__VA_ARGS__) <= PW_TOKENIZER_MAX_SUPPORTED_ARGS,   \
       "Tokenized strings cannot have more than "                               \
       PW_STRINGIFY(PW_TOKENIZER_MAX_SUPPORTED_ARGS) " arguments; "             \
-      PW_STRINGIFY(PW_ARG_COUNT(__VA_ARGS__)) " arguments were used for "      \
-      #format " (" #__VA_ARGS__ ")");                                          \
+      PW_STRINGIFY(PW_FUNCTION_ARG_COUNT(__VA_ARGS__))                         \
+      " arguments were used for " #format " (" #__VA_ARGS__ ")");              \
                                                                                \
-  /* Tokenize the string to a pw_TokenizerStringToken at compile time. */      \
-  _PW_TOKENIZER_CONST pw_TokenizerStringToken _pw_tokenizer_token =            \
-      PW_TOKENIZE_STRING_DOMAIN(domain, format)
+  /* Tokenize the string to a pw_tokenizer_Token at compile time. */           \
+  static _PW_TOKENIZER_CONST pw_tokenizer_Token _pw_tokenizer_token =          \
+      PW_TOKENIZER_STRING_TOKEN(format);                                       \
+                                                                               \
+  _PW_TOKENIZER_RECORD_ORIGINAL_STRING(_pw_tokenizer_token, domain, format)
 
 // clang-format on
 
-#ifdef __cplusplus  // use constexpr for C++
+// Creates unique names to use for tokenized string entries and linker sections.
+#define _PW_TOKENIZER_UNIQUE(prefix) PW_CONCAT(prefix, __LINE__, _, __COUNTER__)
+
+#ifdef __cplusplus
+
 #define _PW_TOKENIZER_CONST constexpr
-#else  // use const for C
+
+#define _PW_TOKENIZER_RECORD_ORIGINAL_STRING(token, domain, string)            \
+  alignas(1) static constexpr ::pw::tokenizer::internal::Entry<sizeof(domain), \
+                                                               sizeof(string)> \
+      _PW_TOKENIZER_SECTION _PW_TOKENIZER_UNIQUE(                              \
+          _pw_tokenizer_string_entry_) {                                       \
+    token, domain, string                                                      \
+  }
+
+namespace pw {
+namespace tokenizer {
+
+using Token = ::pw_tokenizer_Token;
+
+}  // namespace tokenizer
+}  // namespace pw
+
+#else
+
 #define _PW_TOKENIZER_CONST const
+
+#define _PW_TOKENIZER_RECORD_ORIGINAL_STRING(token, domain, string) \
+  _Alignas(1) static const _PW_TOKENIZER_STRING_ENTRY(token, domain, string)
+
 #endif  // __cplusplus
 
-// _PW_TOKENIZER_SECTION places the format string in a special .pw_tokenized
-// linker section. Host-side decoding tools read the strings from this section
-// to build a database of tokenized strings.
+// _PW_TOKENIZER_SECTION places the tokenized strings in a special .pw_tokenizer
+// linker section. Host-side decoding tools read the strings and tokens from
+// this section to build a database of tokenized strings.
 //
 // This section should be declared as type INFO so that it is excluded from the
-// final binary. To declare the section, as well as the .pw_tokenizer_info
+// final binary. To declare the section, as well as the .pw_tokenizer.info
 // metadata section, add the following to the linker script's SECTIONS command:
 //
-//   .pw_tokenizer_info 0x0 (INFO) :
+//   .pw_tokenizer.info 0x0 (INFO) :
 //   {
-//     KEEP(*(.pw_atokenizer_info))
+//     KEEP(*(.pw_tokenizer.info))
 //   }
 //
-//   .pw_tokenized.default 0x0 (INFO) :
+//   .pw_tokenizer.entries 0x0 (INFO) :
 //   {
-//     KEEP(*(.pw_tokenized.default.*))
-//   }
-//
-//
-// If custom tokenization domains are used, a section must be declared for each
-// domain:
-//
-//   .pw_tokenized.YOUR_CUSTOM_TOKENIZATION_DOMAIN 0x0 (INFO) :
-//   {
-//     KEEP(*(.pw_tokenized.YOUR_CUSTOM_TOKENIZATION_DOMAIN.*))
+//     KEEP(*(.pw_tokenizer.entries.*))
 //   }
 //
 // A linker script snippet that provides these sections is provided in the file
@@ -232,8 +263,8 @@
 // modifications, though this is not recommended. The section can be extracted
 // and removed from the ELF with objcopy:
 //
-//   objcopy --only-section .pw_tokenize* <ORIGINAL_ELF> <OUTPUT_ELF>
-//   objcopy --remove-section .pw_tokenize* <ORIGINAL_ELF>
+//   objcopy --only-section .pw_tokenizer.* <ORIGINAL_ELF> <OUTPUT_ELF>
+//   objcopy --remove-section .pw_tokenizer.* <ORIGINAL_ELF>
 //
 // OUTPUT_ELF will be an ELF with only the tokenized strings, and the original
 // ELF file will have the sections removed.
@@ -246,10 +277,10 @@
 // pw_tokenizer is intended for use with ELF files only. Mach-O files (macOS
 // executables) do not support section names longer than 16 characters, so a
 // short, dummy section name is used on macOS.
-#if __APPLE__
-#define _PW_TOKENIZER_SECTION(unused_domain) \
-  PW_KEEP_IN_SECTION(".pw." PW_STRINGIFY(__LINE__))
+#ifdef __APPLE__
+#define _PW_TOKENIZER_SECTION \
+  PW_KEEP_IN_SECTION(PW_STRINGIFY(_PW_TOKENIZER_UNIQUE(.pw.)))
 #else
-#define _PW_TOKENIZER_SECTION(domain) \
-  PW_KEEP_IN_SECTION(".pw_tokenized." domain "." PW_STRINGIFY(__LINE__))
+#define _PW_TOKENIZER_SECTION \
+  PW_KEEP_IN_SECTION(PW_STRINGIFY(_PW_TOKENIZER_UNIQUE(.pw_tokenizer.entries.)))
 #endif  // __APPLE__
diff --git a/pw_tokenizer/public/pw_tokenizer/tokenize_to_global_handler.h b/pw_tokenizer/public/pw_tokenizer/tokenize_to_global_handler.h
index ce41741..c315f91 100644
--- a/pw_tokenizer/public/pw_tokenizer/tokenize_to_global_handler.h
+++ b/pw_tokenizer/public/pw_tokenizer/tokenize_to_global_handler.h
@@ -20,7 +20,7 @@
 #include "pw_tokenizer/tokenize.h"
 
 // Encodes a tokenized string and arguments to a buffer on the stack. The buffer
-// is passed to the user-defined pw_TokenizerHandleEncodedMessage function. The
+// is passed to the user-defined pw_tokenizer_HandleEncodedMessage function. The
 // size of the stack-allocated argument encoding buffer is set with the
 // PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES option.
 //
@@ -31,13 +31,13 @@
 //
 // For example, the following encodes a tokenized string with a value returned
 // from a function call. The encoded message is passed to the caller-defined
-// pw_TokenizerHandleEncodedMessage function.
+// pw_tokenizer_HandleEncodedMessage function.
 //
 //   void OutputLastReadSize() {
 //     PW_TOKENIZE_TO_GLOBAL_HANDLER("Read %u bytes", ReadSizeBytes());
 //   }
 //
-//   void pw_TokenizerHandleEncodedMessage(const uint8_t encoded_message[],
+//   void pw_tokenizer_HandleEncodedMessage(const uint8_t encoded_message[],
 //                                         size_t size_bytes) {
 //     MyProject_EnqueueMessageForUart(buffer, size_bytes);
 //   }
@@ -47,26 +47,26 @@
       PW_TOKENIZER_DEFAULT_DOMAIN, format, __VA_ARGS__)
 
 // Same as PW_TOKENIZE_TO_GLOBAL_HANDLER, but tokenizes to the specified domain.
-#define PW_TOKENIZE_TO_GLOBAL_HANDLER_DOMAIN(domain, format, ...)   \
-  do {                                                              \
-    _PW_TOKENIZE_FORMAT_STRING(domain, format, __VA_ARGS__);        \
-    _pw_TokenizeToGlobalHandler(_pw_tokenizer_token,                \
-                                PW_TOKENIZER_ARG_TYPES(__VA_ARGS__) \
-                                    PW_COMMA_ARGS(__VA_ARGS__));    \
+#define PW_TOKENIZE_TO_GLOBAL_HANDLER_DOMAIN(domain, format, ...)     \
+  do {                                                                \
+    _PW_TOKENIZE_FORMAT_STRING(domain, format, __VA_ARGS__);          \
+    _pw_tokenizer_ToGlobalHandler(_pw_tokenizer_token,                \
+                                  PW_TOKENIZER_ARG_TYPES(__VA_ARGS__) \
+                                      PW_COMMA_ARGS(__VA_ARGS__));    \
   } while (0)
 
 PW_EXTERN_C_START
 
 // This function must be defined by the pw_tokenizer:global_handler backend.
 // This function is called with the encoded message by
-// pw_TokenizeToGlobalHandler.
-void pw_TokenizerHandleEncodedMessage(const uint8_t encoded_message[],
-                                      size_t size_bytes);
+// _pw_tokenizer_ToGlobalHandler.
+void pw_tokenizer_HandleEncodedMessage(const uint8_t encoded_message[],
+                                       size_t size_bytes);
 
 // This function encodes the tokenized strings. Do not call it directly;
 // instead, use the PW_TOKENIZE_TO_GLOBAL_HANDLER macro.
-void _pw_TokenizeToGlobalHandler(pw_TokenizerStringToken token,
-                                 pw_TokenizerArgTypes types,
-                                 ...);
+void _pw_tokenizer_ToGlobalHandler(pw_tokenizer_Token token,
+                                   _pw_tokenizer_ArgTypes types,
+                                   ...);
 
 PW_EXTERN_C_END
diff --git a/pw_tokenizer/public/pw_tokenizer/tokenize_to_global_handler_with_payload.h b/pw_tokenizer/public/pw_tokenizer/tokenize_to_global_handler_with_payload.h
index df1800f..55914f7 100644
--- a/pw_tokenizer/public/pw_tokenizer/tokenize_to_global_handler_with_payload.h
+++ b/pw_tokenizer/public/pw_tokenizer/tokenize_to_global_handler_with_payload.h
@@ -22,7 +22,7 @@
 // Like PW_TOKENIZE_TO_GLOBAL_HANDLER, encodes a tokenized string and arguments
 // to a buffer on the stack. The macro adds a payload argument, which is passed
 // through to the global handler function
-// pw_TokenizerHandleEncodedMessageWithPayload, which must be defined by the
+// pw_tokenizer_HandleEncodedMessageWithPayload, which must be defined by the
 // user of pw_tokenizer. The payload is a uintptr_t.
 //
 // For example, the following tokenizes a log string and passes the log level as
@@ -31,8 +31,8 @@
      #define LOG_ERROR(...) \
          PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD(kLogLevelError, __VA_ARGS__)
 
-     void pw_TokenizerHandleEncodedMessageWithPayload(
-         pw_TokenizerPayload log_level,
+     void pw_tokenizer_HandleEncodedMessageWithPayload(
+         pw_tokenizer_Payload log_level,
          const uint8_t encoded_message[],
          size_t size_bytes) {
        if (log_level >= kLogLevelWarning) {
@@ -46,33 +46,33 @@
 
 // Same as PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD, but tokenizes to the
 // specified domain.
-#define PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD_DOMAIN(                     \
-    domain, payload, format, ...)                                              \
-  do {                                                                         \
-    _PW_TOKENIZE_FORMAT_STRING(domain, format, __VA_ARGS__);                   \
-    _pw_TokenizeToGlobalHandlerWithPayload(payload,                            \
-                                           _pw_tokenizer_token,                \
-                                           PW_TOKENIZER_ARG_TYPES(__VA_ARGS__) \
-                                               PW_COMMA_ARGS(__VA_ARGS__));    \
+#define PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD_DOMAIN(               \
+    domain, payload, format, ...)                                        \
+  do {                                                                   \
+    _PW_TOKENIZE_FORMAT_STRING(domain, format, __VA_ARGS__);             \
+    _pw_tokenizer_ToGlobalHandlerWithPayload(                            \
+        payload,                                                         \
+        _pw_tokenizer_token,                                             \
+        PW_TOKENIZER_ARG_TYPES(__VA_ARGS__) PW_COMMA_ARGS(__VA_ARGS__)); \
   } while (0)
 
 PW_EXTERN_C_START
 
-typedef uintptr_t pw_TokenizerPayload;
+typedef uintptr_t pw_tokenizer_Payload;
 
 // This function must be defined pw_tokenizer:global_handler_with_payload
 // backend. This function is called with the encoded message by
-// pw_TokenizeToGlobalHandler and a caller-provided payload argument.
-void pw_TokenizerHandleEncodedMessageWithPayload(
-    pw_TokenizerPayload payload,
+// pw_tokenizer_ToGlobalHandler and a caller-provided payload argument.
+void pw_tokenizer_HandleEncodedMessageWithPayload(
+    pw_tokenizer_Payload payload,
     const uint8_t encoded_message[],
     size_t size_bytes);
 
 // This function encodes the tokenized strings. Do not call it directly;
 // instead, use the PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD macro.
-void _pw_TokenizeToGlobalHandlerWithPayload(pw_TokenizerPayload payload,
-                                            pw_TokenizerStringToken token,
-                                            pw_TokenizerArgTypes types,
-                                            ...);
+void _pw_tokenizer_ToGlobalHandlerWithPayload(pw_tokenizer_Payload payload,
+                                              pw_tokenizer_Token token,
+                                              _pw_tokenizer_ArgTypes types,
+                                              ...);
 
 PW_EXTERN_C_END
diff --git a/pw_tokenizer/pw_tokenizer_linker_sections.ld b/pw_tokenizer/pw_tokenizer_linker_sections.ld
index afaba34..ae17f47 100644
--- a/pw_tokenizer/pw_tokenizer_linker_sections.ld
+++ b/pw_tokenizer/pw_tokenizer_linker_sections.ld
@@ -12,6 +12,9 @@
  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  * License for the specific language governing permissions and limitations under
  * the License.
+ */
+
+/*
  *
  * This linker script snippet declares the sections needed for string
  * tokenization. All sections have type INFO so they are excluded from the final
@@ -40,43 +43,28 @@
    * Metadata is declared as key-value pairs. See the metadata variable in
    * tokenize.cc for further details.
    */
-  .pw_tokenizer_info 0x0 (INFO) :
+  .pw_tokenizer.info 0x0 (INFO) :
   {
-    KEEP(*(.pw_tokenizer_info))
+    KEEP(*(.pw_tokenizer.info))
   }
 
   /*
-   * Tokenized strings are stored in this section by default. In the compiled
-   * code, format string literals are replaced by a hash of the string contents
-   * and a compact argument list encoded in a uint32_t. The compiled code
-   * contains no references to the tokenized strings in this section.
+   * Tokenized string entries are stored in this section. Each entry contains
+   * the original string literal and the calculated token that represents it. In
+   * the compiled code, the token and a compact argument list encoded in a
+   * uint32_t are used in place of the format string. The compiled code
+   * contains no references to the tokenized string entries in this section.
+   *
+   * The tokenized string entry format is specified by the
+   * pw::tokenizer::internal::Entry class in
+   * pw_tokenizer/public/pw_tokenizer/internal/tokenize_string.h.
    *
    * The section contents are declared with KEEP so that they are not removed
    * from the ELF. These are never emitted in the final binary or loaded into
    * memory.
    */
-  .pw_tokenized.default 0x0 (INFO) :
+  .pw_tokenizer.entries 0x0 (INFO) :
   {
-    KEEP(*(.pw_tokenized.default.*))
+    KEEP(*(.pw_tokenizer.entries.*))
   }
-
-/*
- * Projects may define additional tokenization domains, if desired. Strings in
- * different domains are stored in separate ELF sections so they can be
- * processed separately by the token database tools.
- *
- * Use cases for domains include keeping large sets of strings separate to avoid
- * collisions, or separating a small subset of strings that will use truncated
- * tokens (e.g. 16-bit tokens instead of 32-bit tokens).
- *
- * Each tokenization domain in use must have a corresponding section in the
- * linker script. As required, copy this section declaration and replace
- * YOUR_CUSTOM_TOKENIZATION_DOMAIN with the the domain name.
-
-  .pw_tokenized.YOUR_CUSTOM_TOKENIZATION_DOMAIN 0x0 (INFO) :
-  {
-    KEEP(*(.pw_tokenized.YOUR_CUSTOM_TOKENIZATION_DOMAIN.*))
-  }
-
- */
 }
diff --git a/pw_tokenizer/pw_tokenizer_private/argument_types_test.h b/pw_tokenizer/pw_tokenizer_private/argument_types_test.h
index 52ac6c9..b616392 100644
--- a/pw_tokenizer/pw_tokenizer_private/argument_types_test.h
+++ b/pw_tokenizer/pw_tokenizer_private/argument_types_test.h
@@ -20,27 +20,27 @@
 
 PW_EXTERN_C_START
 
-pw_TokenizerArgTypes pw_TestTokenizerNoArgs(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerNoArgs(void);
 
-pw_TokenizerArgTypes pw_TestTokenizerChar(void);
-pw_TokenizerArgTypes pw_TestTokenizerUint8(void);
-pw_TokenizerArgTypes pw_TestTokenizerUint16(void);
-pw_TokenizerArgTypes pw_TestTokenizerInt32(void);
-pw_TokenizerArgTypes pw_TestTokenizerInt64(void);
-pw_TokenizerArgTypes pw_TestTokenizerUint64(void);
-pw_TokenizerArgTypes pw_TestTokenizerFloat(void);
-pw_TokenizerArgTypes pw_TestTokenizerDouble(void);
-pw_TokenizerArgTypes pw_TestTokenizerString(void);
-pw_TokenizerArgTypes pw_TestTokenizerMutableString(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerChar(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerUint8(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerUint16(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerInt32(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerInt64(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerUint64(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerFloat(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerDouble(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerString(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerMutableString(void);
 
-pw_TokenizerArgTypes pw_TestTokenizerIntFloat(void);
-pw_TokenizerArgTypes pw_TestTokenizerUint64Char(void);
-pw_TokenizerArgTypes pw_TestTokenizerStringString(void);
-pw_TokenizerArgTypes pw_TestTokenizerUint16Int(void);
-pw_TokenizerArgTypes pw_TestTokenizerFloatString(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerIntFloat(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerUint64Char(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerStringString(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerUint16Int(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerFloatString(void);
 
-pw_TokenizerArgTypes pw_TestTokenizerNull(void);
-pw_TokenizerArgTypes pw_TestTokenizerPointer(void);
-pw_TokenizerArgTypes pw_TestTokenizerPointerPointer(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerNull(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerPointer(void);
+_pw_tokenizer_ArgTypes pw_TestTokenizerPointerPointer(void);
 
 PW_EXTERN_C_END
diff --git a/pw_tokenizer/pw_tokenizer_private/encode_args.h b/pw_tokenizer/pw_tokenizer_private/encode_args.h
index 2f39949..d16c3bf 100644
--- a/pw_tokenizer/pw_tokenizer_private/encode_args.h
+++ b/pw_tokenizer/pw_tokenizer_private/encode_args.h
@@ -20,23 +20,33 @@
 
 #include "pw_tokenizer/config.h"
 #include "pw_tokenizer/internal/argument_types.h"
-#include "pw_tokenizer/internal/tokenize_string.h"
+#include "pw_tokenizer/tokenize.h"
 
 namespace pw {
 namespace tokenizer {
 
 // Buffer for encoding a tokenized string and arguments.
 struct EncodedMessage {
-  pw_TokenizerStringToken token;
-  std::array<uint8_t, PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES> args;
+  pw_tokenizer_Token token;
+  std::array<uint8_t,
+             PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES - sizeof(token)>
+      args;
 };
 
-static_assert(offsetof(EncodedMessage, args) == sizeof(EncodedMessage::token),
+static_assert(PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES >=
+                  sizeof(pw_tokenizer_Token),
+              "PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES must be at least "
+              "large enough for a token (4 bytes)");
+
+static_assert(offsetof(EncodedMessage, args) == sizeof(EncodedMessage::token) &&
+                  PW_TOKENIZER_CFG_ENCODING_BUFFER_SIZE_BYTES ==
+                      sizeof(EncodedMessage),
               "EncodedMessage should not have padding bytes between members");
 
-// Encodes a tokenized string's arguments to a buffer. The pw_TokenizerArgTypes
-// parameter specifies the argument types, in place of a format string.
-size_t EncodeArgs(pw_TokenizerArgTypes types,
+// Encodes a tokenized string's arguments to a buffer. The
+// _pw_tokenizer_ArgTypes parameter specifies the argument types, in place of a
+// format string.
+size_t EncodeArgs(_pw_tokenizer_ArgTypes types,
                   va_list args,
                   std::span<uint8_t> output);
 
diff --git a/pw_tokenizer/pw_tokenizer_private/tokenize_test.h b/pw_tokenizer/pw_tokenizer_private/tokenize_test.h
index d67764a..3c4b463 100644
--- a/pw_tokenizer/pw_tokenizer_private/tokenize_test.h
+++ b/pw_tokenizer/pw_tokenizer_private/tokenize_test.h
@@ -25,23 +25,23 @@
 
 #define TEST_FORMAT_STRING_SHORT_FLOAT "Hello %s! %hd %e"
 
-void pw_TokenizeToBufferTest_StringShortFloat(void* buffer,
-                                              size_t* buffer_size);
+void pw_tokenizer_ToBufferTest_StringShortFloat(void* buffer,
+                                                size_t* buffer_size);
 
 #define TEST_FORMAT_SEQUENTIAL_ZIG_ZAG "%u%d%02x%X%hu%hhd%d%ld%lu%lld%llu%c%c%c"
 
-void pw_TokenizeToBufferTest_SequentialZigZag(void* buffer,
-                                              size_t* buffer_size);
+void pw_tokenizer_ToBufferTest_SequentialZigZag(void* buffer,
+                                                size_t* buffer_size);
 
-void pw_TokenizeToCallbackTest_SequentialZigZag(
+void pw_tokenizer_ToCallbackTest_SequentialZigZag(
     void (*callback)(const uint8_t* buffer, size_t size));
 
 #define TEST_FORMAT_REQUIRES_8 "Won't fit : %s%d"
 
-void pw_TokenizeToBufferTest_Requires8(void* buffer, size_t* buffer_size);
+void pw_tokenizer_ToBufferTest_Requires8(void* buffer, size_t* buffer_size);
 
-void pw_TokenizeToGlobalHandlerTest_SequentialZigZag(void);
+void pw_tokenizer_ToGlobalHandlerTest_SequentialZigZag(void);
 
-void pw_TokenizeToGlobalHandlerWithPayloadTest_SequentialZigZag(void);
+void pw_tokenizer_ToGlobalHandlerWithPayloadTest_SequentialZigZag(void);
 
 PW_EXTERN_C_END
diff --git a/pw_tokenizer/py/BUILD.gn b/pw_tokenizer/py/BUILD.gn
new file mode 100644
index 0000000..a8a1c1b
--- /dev/null
+++ b/pw_tokenizer/py/BUILD.gn
@@ -0,0 +1,49 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "generate_argument_types_macro.py",
+    "generate_hash_macro.py",
+    "generate_hash_test_data.py",
+    "pw_tokenizer/__init__.py",
+    "pw_tokenizer/__main__.py",
+    "pw_tokenizer/database.py",
+    "pw_tokenizer/decode.py",
+    "pw_tokenizer/detokenize.py",
+    "pw_tokenizer/elf_reader.py",
+    "pw_tokenizer/encode.py",
+    "pw_tokenizer/serial_detokenizer.py",
+    "pw_tokenizer/tokens.py",
+    "tokenized_string_decoding_test_data.py",
+    "varint_test_data.py",
+  ]
+  tests = [
+    "database_test.py",
+    "decode_test.py",
+    "detokenize_test.py",
+    "elf_reader_test.py",
+    "encode_test.py",
+    "tokens_test.py",
+  ]
+  inputs = [
+    "example_binary_with_tokenized_strings.elf",
+    "example_legacy_binary_with_tokenized_strings.elf",
+  ]
+}
diff --git a/pw_tokenizer/py/database_test.py b/pw_tokenizer/py/database_test.py
index cba994f..54c890e 100755
--- a/pw_tokenizer/py/database_test.py
+++ b/pw_tokenizer/py/database_test.py
@@ -14,6 +14,7 @@
 # the License.
 """Tests for the database module."""
 
+import json
 import io
 from pathlib import Path
 import shutil
@@ -24,11 +25,21 @@
 
 from pw_tokenizer import database
 
-ELF = Path(__file__).parent / 'example_binary_with_tokenized_strings.elf'
+# This is an ELF file with only the pw_tokenizer sections. It was created
+# from a tokenize_test binary built for the STM32F429i Discovery board. The
+# pw_tokenizer sections were extracted with this command:
+#
+#   arm-none-eabi-objcopy -S --only-section ".pw_tokenize*" <ELF> <OUTPUT>
+#
+TOKENIZED_ENTRIES_ELF = Path(
+    __file__).parent / 'example_binary_with_tokenized_strings.elf'
+LEGACY_PLAIN_STRING_ELF = Path(
+    __file__).parent / 'example_legacy_binary_with_tokenized_strings.elf'
 
 CSV_DEFAULT_DOMAIN = '''\
 00000000,          ,""
 141c35d5,          ,"The answer: ""%s"""
+29aef586,          ,"1234"
 2b78825f,          ,"[:-)"
 2e668cd6,          ,"Jello, world!"
 31631781,          ,"%d"
@@ -36,9 +47,13 @@
 68ab92da,          ,"%s there are %x (%.2f) of them%c"
 7b940e2a,          ,"Hello %s! %hd %e"
 7da55d52,          ,">:-[]"
+7f35a9a5,          ,"TestName"
 851beeb6,          ,"%u %d"
 881436a0,          ,"The answer is: %s"
 88808930,          ,"%u%d%02x%X%hu%hhd%d%ld%lu%lld%llu%c%c%c"
+92723f44,          ,"???"
+a09d6698,          ,"won-won-won-wonderful"
+aa9ffa66,          ,"void pw::tokenizer::{anonymous}::TestName()"
 ad002c97,          ,"%llx"
 b3653e13,          ,"Jello!"
 cc6d3131,          ,"Jello?"
@@ -46,15 +61,20 @@
 e65aefef,          ,"Won't fit : %s%d"
 '''
 
-CSV_TEST_DOMAIN = '''\
-00000000,          ,""
+CSV_TEST_DOMAIN = """\
+17fa86d3,          ,"hello"
+18c5017c,          ,"yes"
 59b2701c,          ,"The answer was: %s"
 881436a0,          ,"The answer is: %s"
-'''
+d18ada0f,          ,"something"
+"""
 
 CSV_ALL_DOMAINS = '''\
 00000000,          ,""
 141c35d5,          ,"The answer: ""%s"""
+17fa86d3,          ,"hello"
+18c5017c,          ,"yes"
+29aef586,          ,"1234"
 2b78825f,          ,"[:-)"
 2e668cd6,          ,"Jello, world!"
 31631781,          ,"%d"
@@ -63,18 +83,42 @@
 68ab92da,          ,"%s there are %x (%.2f) of them%c"
 7b940e2a,          ,"Hello %s! %hd %e"
 7da55d52,          ,">:-[]"
+7f35a9a5,          ,"TestName"
 851beeb6,          ,"%u %d"
 881436a0,          ,"The answer is: %s"
 88808930,          ,"%u%d%02x%X%hu%hhd%d%ld%lu%lld%llu%c%c%c"
+92723f44,          ,"???"
+a09d6698,          ,"won-won-won-wonderful"
+aa9ffa66,          ,"void pw::tokenizer::{anonymous}::TestName()"
 ad002c97,          ,"%llx"
 b3653e13,          ,"Jello!"
 cc6d3131,          ,"Jello?"
+d18ada0f,          ,"something"
 e13b0f94,          ,"%llu"
 e65aefef,          ,"Won't fit : %s%d"
 '''
 
+EXPECTED_REPORT = {
+    str(TOKENIZED_ENTRIES_ELF): {
+        '': {
+            'present_entries': 22,
+            'present_size_bytes': 289,
+            'total_entries': 22,
+            'total_size_bytes': 289,
+            'collisions': 0
+        },
+        'TEST_DOMAIN': {
+            'present_entries': 5,
+            'present_size_bytes': 57,
+            'total_entries': 5,
+            'total_size_bytes': 57,
+            'collisions': 0
+        }
+    }
+}
 
-def run_cli(*args):
+
+def run_cli(*args) -> None:
     original_argv = sys.argv
     sys.argv = ['database.py', *(str(a) for a in args)]
     # pylint: disable=protected-access
@@ -89,109 +133,149 @@
         sys.argv = original_argv
 
 
-def _mock_output():
+def _mock_output() -> io.TextIOWrapper:
     output = io.BytesIO()
     output.name = '<fake stdout>'
     return io.TextIOWrapper(output, write_through=True)
 
 
-REPORT_DEFAULT_DOMAIN = b'''\
-example_binary_with_tokenized_strings.elf]
-                 Domain: default
-        Entries present: 17
-        Size of strings: 205 B
-          Total entries: 17
-  Total size of strings: 205 B
-             Collisions: 0 tokens
-'''
-
-REPORT_TEST_DOMAIN = b'''\
-example_binary_with_tokenized_strings.elf]
-                 Domain: TEST_DOMAIN
-        Entries present: 3
-        Size of strings: 38 B
-          Total entries: 3
-  Total size of strings: 38 B
-             Collisions: 0 tokens
-'''
-
-
 class DatabaseCommandLineTest(unittest.TestCase):
     """Tests the database.py command line interface."""
     def setUp(self):
         self._dir = Path(tempfile.mkdtemp('_pw_tokenizer_test'))
         self._csv = self._dir / 'db.csv'
+        self._elf = TOKENIZED_ENTRIES_ELF
+
+        self._csv_test_domain = CSV_TEST_DOMAIN
 
     def tearDown(self):
         shutil.rmtree(self._dir)
 
     def test_create_csv(self):
-        run_cli('create', '--database', self._csv, ELF)
+        run_cli('create', '--database', self._csv, self._elf)
 
-        self.assertEqual(CSV_DEFAULT_DOMAIN, self._csv.read_text())
+        self.assertEqual(CSV_DEFAULT_DOMAIN.splitlines(),
+                         self._csv.read_text().splitlines())
 
     def test_create_csv_test_domain(self):
-        run_cli('create', '--database', self._csv, f'{ELF}#TEST_DOMAIN')
+        run_cli('create', '--database', self._csv, f'{self._elf}#TEST_DOMAIN')
 
-        self.assertEqual(CSV_TEST_DOMAIN, self._csv.read_text())
+        self.assertEqual(self._csv_test_domain.splitlines(),
+                         self._csv.read_text().splitlines())
 
     def test_create_csv_all_domains(self):
-        run_cli('create', '--database', self._csv, f'{ELF}#.*')
+        run_cli('create', '--database', self._csv, f'{self._elf}#.*')
 
-        self.assertEqual(CSV_ALL_DOMAINS, self._csv.read_text())
+        self.assertEqual(CSV_ALL_DOMAINS.splitlines(),
+                         self._csv.read_text().splitlines())
 
     def test_create_force(self):
         self._csv.write_text(CSV_ALL_DOMAINS)
 
         with self.assertRaises(FileExistsError):
-            run_cli('create', '--database', self._csv, ELF)
+            run_cli('create', '--database', self._csv, self._elf)
 
-        run_cli('create', '--force', '--database', self._csv, ELF)
+        run_cli('create', '--force', '--database', self._csv, self._elf)
 
     def test_create_binary(self):
         binary = self._dir / 'db.bin'
-        run_cli('create', '--type', 'binary', '--database', binary, ELF)
+        run_cli('create', '--type', 'binary', '--database', binary, self._elf)
 
         # Write the binary database as CSV to verify its contents.
         run_cli('create', '--database', self._csv, binary)
 
-        self.assertEqual(CSV_DEFAULT_DOMAIN, self._csv.read_text())
+        self.assertEqual(CSV_DEFAULT_DOMAIN.splitlines(),
+                         self._csv.read_text().splitlines())
 
-    def test_add(self):
-        self._csv.write_text(CSV_ALL_DOMAINS)
+    def test_add_does_not_recalculate_tokens(self):
+        db_with_custom_token = '01234567,          ,"hello"'
 
-        run_cli('add', '--database', self._csv, f'{ELF}#TEST_DOMAIN')
-        self.assertEqual(CSV_ALL_DOMAINS, self._csv.read_text())
+        to_add = self._dir / 'add_this.csv'
+        to_add.write_text(db_with_custom_token + '\n')
+        self._csv.touch()
+
+        run_cli('add', '--database', self._csv, to_add)
+        self.assertEqual(db_with_custom_token.splitlines(),
+                         self._csv.read_text().splitlines())
 
     def test_mark_removals(self):
         self._csv.write_text(CSV_ALL_DOMAINS)
 
         run_cli('mark_removals', '--database', self._csv, '--date',
-                '1998-09-04', f'{ELF}#default')
+                '1998-09-04', self._elf)
 
-        # Add the removal date to the token not in the default domain
-        new_csv = CSV_ALL_DOMAINS.replace('59b2701c,          ,',
-                                          '59b2701c,1998-09-04,')
+        # Add the removal date to the four tokens not in the default domain
+        new_csv = CSV_ALL_DOMAINS
+        new_csv = new_csv.replace('17fa86d3,          ,"hello"',
+                                  '17fa86d3,1998-09-04,"hello"')
+        new_csv = new_csv.replace('18c5017c,          ,"yes"',
+                                  '18c5017c,1998-09-04,"yes"')
+        new_csv = new_csv.replace('59b2701c,          ,"The answer was: %s"',
+                                  '59b2701c,1998-09-04,"The answer was: %s"')
+        new_csv = new_csv.replace('d18ada0f,          ,"something"',
+                                  'd18ada0f,1998-09-04,"something"')
         self.assertNotEqual(CSV_ALL_DOMAINS, new_csv)
 
-        self.assertEqual(new_csv, self._csv.read_text())
+        self.assertEqual(new_csv.splitlines(),
+                         self._csv.read_text().splitlines())
 
     def test_purge(self):
         self._csv.write_text(CSV_ALL_DOMAINS)
 
         # Mark everything not in TEST_DOMAIN as removed.
-        run_cli('mark_removals', '--database', self._csv, f'{ELF}#TEST_DOMAIN')
+        run_cli('mark_removals', '--database', self._csv,
+                f'{self._elf}#TEST_DOMAIN')
 
         # Delete all entries except those in TEST_DOMAIN.
         run_cli('purge', '--database', self._csv)
 
-        self.assertEqual(CSV_TEST_DOMAIN, self._csv.read_text())
+        self.assertEqual(self._csv_test_domain.splitlines(),
+                         self._csv.read_text().splitlines())
 
     @mock.patch('sys.stdout', new_callable=_mock_output)
     def test_report(self, mock_stdout):
-        run_cli('report', ELF)
-        self.assertIn(REPORT_DEFAULT_DOMAIN, mock_stdout.buffer.getvalue())
-        self.assertIn(REPORT_TEST_DOMAIN, mock_stdout.buffer.getvalue())
+        run_cli('report', self._elf)
+
+        self.assertEqual(json.loads(mock_stdout.buffer.getvalue()),
+                         EXPECTED_REPORT)
+
+    def test_replace(self):
+        sub = 'replace/ment'
+        run_cli('create', '--database', self._csv, self._elf, '--replace',
+                r'(?i)\b[jh]ello\b/' + sub)
+        self.assertEqual(
+            CSV_DEFAULT_DOMAIN.replace('Jello', sub).replace('Hello', sub),
+            self._csv.read_text())
+
+
+class LegacyDatabaseCommandLineTest(DatabaseCommandLineTest):
+    """Test an ELF with the legacy plain string storage format."""
+    def setUp(self):
+        super().setUp()
+        self._elf = LEGACY_PLAIN_STRING_ELF
+
+        # The legacy approach for storing tokenized strings in an ELF always
+        # adds an entry for "", even if the empty string was never tokenized.
+        self._csv_test_domain = '00000000,          ,""\n' + CSV_TEST_DOMAIN
+
+    @mock.patch('sys.stdout', new_callable=_mock_output)
+    def test_report(self, mock_stdout):
+        run_cli('report', self._elf)
+
+        report = EXPECTED_REPORT[str(TOKENIZED_ENTRIES_ELF)].copy()
+
+        # Count the implicitly added "" entry in TEST_DOMAIN.
+        report['TEST_DOMAIN']['present_entries'] += 1
+        report['TEST_DOMAIN']['present_size_bytes'] += 1
+        report['TEST_DOMAIN']['total_entries'] += 1
+        report['TEST_DOMAIN']['total_size_bytes'] += 1
+
+        # Rename "" to the legacy name "default"
+        report['default'] = report['']
+        del report['']
+
+        self.assertEqual({str(LEGACY_PLAIN_STRING_ELF): report},
+                         json.loads(mock_stdout.buffer.getvalue()))
 
 
 if __name__ == '__main__':
diff --git a/pw_tokenizer/py/detokenize_test.py b/pw_tokenizer/py/detokenize_test.py
index 300e782..6613e53 100755
--- a/pw_tokenizer/py/detokenize_test.py
+++ b/pw_tokenizer/py/detokenize_test.py
@@ -18,6 +18,7 @@
 import datetime as dt
 import io
 import os
+from pathlib import Path
 import struct
 import tempfile
 import unittest
@@ -82,45 +83,16 @@
     b'\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00'
     b'\x00\x00\x00')
 
-# This is an ELF file with only .pw_tokenized and .pw_tokenizer_info sections.
-# It was created from the ELF file for tokenize_test.cc with the command:
+# This is an ELF file with only the pw_tokenizer sections. It was created
+# from a tokenize_test binary built for the STM32F429i Discovery board. The
+# pw_tokenizer sections were extracted with this command:
 #
-#   arm-none-eabi-objcopy -S --only-section ".pw_tokenize*" <ELF> <OUTPUT>
+#   arm-none-eabi-objcopy -S --only-section ".pw_tokenizer*" <ELF> <OUTPUT>
 #
-# The resulting ELF was converted to a Python binary string using
-# path_to_byte_string function above. The file is also included in the repo as
-# example_binary_with_tokenized_strings.elf.
-ELF_WITH_TOKENIZER_SECTIONS = (
-    b'\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00(\x00\x01'
-    b'\x00\x00\x00!G\x00\x084\x00\x00\x00\xd4\x02\x00\x00\x00\x04\x00\x054\x00'
-    b' \x00\x04\x00(\x00\x04\x00\x03\x00\x01\x00\x00\x00\xb4\x00\x00\x00\x00'
-    b'\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00'
-    b'\x00\x00\x00\x01\x00\x01\x00\x00\x00\xb4\x00\x00\x00\x00\x02\x00\x08\x00'
-    b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x01'
-    b'\x00\x01\x00\x00\x00\xb4\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00'
-    b'\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00'
-    b'\xb4\x00\x00\x00\x18D\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-    b'\x00\x06\x00\x00\x00\x00\x00\x01\x00Hello %s! %hd %e\x00\x00\x00\x00%u'
-    b'%d%02x%X%hu%hhd%d%ld%lu%lld%llu%c%c%c\x00%u%d%02x%X%hu%hhd%d%ld%lu%lld'
-    b'%llu%c%c%c\x00Won\'t fit : %s%d\x00\x00\x00\x00%llx\x00\x00\x00\x00%ld'
-    b'\x00%d\x00\x00%ld\x00The answer is: %s\x00\x00\x00The answer is: %s\x00'
-    b'\x00\x00The answer is: %s\x00\x00\x00The answer is: %s\x00\x00\x00The '
-    b'answer is: %s\x00\x00\x00The answer is: %s\x00\x00\x00The answer is: %'
-    b's\x00\x00\x00The answer is: %s\x00\x00\x00%u %d\x00\x00\x00The answer:'
-    b' "%s"\x00\x00\x00\x00Jello, world!\x00\x00\x00Jello!\x00\x00Jello?\x00'
-    b'\x00%s there are %x (%.2f) of them%c\x00\x00\x00\x00The answer is: %s\x00'
-    b'\x00\x00\x00\x00\x00\x00[:-)\x00\x00\x00\x00>:-[]\x00\x00\x00%llu\x00\x00'
-    b'\x00\x00The answer was: %s\x00\x00The answer is: %s\x00\x00.shstrtab\x00'
-    b'.pw_tokenized.default\x00.pw_tokenized.TEST_DOMAIN\x00\x00\x00\x00\x00'
-    b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-    b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-    b'\x00\x00\x00\x0b\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-    b'\x00\xb4\x00\x00\x00\xb9\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04'
-    b'\x00\x00\x00\x00\x00\x00\x00!\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00'
-    b'\x00\x00\x00\x00p\x02\x00\x00&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-    b'\x00\x04\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00\x00'
-    b'\x00\x00\x00\x00\x00\x00\x00\x96\x02\x00\x00;\x00\x00\x00\x00\x00\x00\x00'
-    b'\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00')
+ELF_WITH_TOKENIZER_SECTIONS = Path(__file__).parent.joinpath(
+    'example_binary_with_tokenized_strings.elf').read_bytes()
+
+TOKENS_IN_ELF = 22
 
 # 0x2e668cd6 is 'Jello, world!' (which is also used in database_test.py).
 JELLO_WORLD_TOKEN = b'\xd6\x8c\x66\x2e'
@@ -131,8 +103,9 @@
     def test_simple(self):
         detok = detokenize.Detokenizer(
             tokens.Database([
-                tokens.TokenizedStringEntry(0xcdab, '%02d %s %c%%',
-                                            dt.datetime.now())
+                tokens.TokenizedStringEntry(0xcdab,
+                                            '%02d %s %c%%',
+                                            date_removed=dt.datetime.now())
             ]))
         self.assertEqual(str(detok.detokenize(b'\xab\xcd\0\0\x02\x03Two\x66')),
                          '01 Two 3%')
@@ -140,7 +113,9 @@
     def test_detokenize_extra_data_is_unsuccessful(self):
         detok = detokenize.Detokenizer(
             tokens.Database([
-                tokens.TokenizedStringEntry(1, 'no args', dt.datetime(1, 1, 1))
+                tokens.TokenizedStringEntry(1,
+                                            'no args',
+                                            date_removed=dt.datetime(1, 1, 1))
             ]))
 
         result = detok.detokenize(b'\x01\0\0\0\x04args')
@@ -154,8 +129,11 @@
 
     def test_detokenize_missing_data_is_unsuccessful(self):
         detok = detokenize.Detokenizer(
-            tokens.Database(
-                [tokens.TokenizedStringEntry(2, '%s', dt.datetime(1, 1, 1))]))
+            tokens.Database([
+                tokens.TokenizedStringEntry(2,
+                                            '%s',
+                                            date_removed=dt.datetime(1, 1, 1))
+            ]))
 
         result = detok.detokenize(b'\x02\0\0\0')
         string, args, remaining = result.failures[0]
@@ -166,8 +144,11 @@
         self.assertEqual('%s', str(result))
 
     def test_detokenize_missing_data_with_errors_is_unsuccessful(self):
-        detok = detokenize.Detokenizer(tokens.Database(
-            [tokens.TokenizedStringEntry(2, '%s', dt.datetime(1, 1, 1))]),
+        detok = detokenize.Detokenizer(tokens.Database([
+            tokens.TokenizedStringEntry(2,
+                                        '%s',
+                                        date_removed=dt.datetime(1, 1, 1))
+        ]),
                                        show_errors=True)
 
         result = detok.detokenize(b'\x02\0\0\0')
@@ -181,8 +162,10 @@
     def test_unparsed_data(self):
         detok = detokenize.Detokenizer(
             tokens.Database([
-                tokens.TokenizedStringEntry(1, 'no args',
-                                            dt.datetime(100, 1, 1)),
+                tokens.TokenizedStringEntry(1,
+                                            'no args',
+                                            date_removed=dt.datetime(
+                                                100, 1, 1)),
             ]))
         result = detok.detokenize(b'\x01\0\0\0o_o')
         self.assertFalse(result.ok())
@@ -258,12 +241,15 @@
         detok = detokenize.Detokenizer(io.BytesIO(ELF_WITH_TOKENIZER_SECTIONS))
         expected_tokens = frozenset(detok.database.token_to_entries.keys())
 
-        with tempfile.NamedTemporaryFile() as elf:
+        elf = tempfile.NamedTemporaryFile('wb', delete=False)
+        try:
             elf.write(ELF_WITH_TOKENIZER_SECTIONS)
-            elf.seek(0)
+            elf.close()
 
             # Open ELF by file object
-            detok = detokenize.Detokenizer(elf)
+            with open(elf.name, 'rb') as fd:
+                detok = detokenize.Detokenizer(fd)
+
             self.assertEqual(expected_tokens,
                              frozenset(detok.database.token_to_entries.keys()))
 
@@ -273,21 +259,25 @@
                              frozenset(detok.database.token_to_entries.keys()))
 
             # Open ELF by elf_reader.Elf
-            elf.seek(0)
-            detok = detokenize.Detokenizer(elf_reader.Elf(elf))
+            with open(elf.name, 'rb') as fd:
+                detok = detokenize.Detokenizer(elf_reader.Elf(fd))
+
             self.assertEqual(expected_tokens,
                              frozenset(detok.database.token_to_entries.keys()))
+        finally:
+            os.unlink(elf.name)
 
     def test_decode_from_csv_file(self):
         detok = detokenize.Detokenizer(io.BytesIO(ELF_WITH_TOKENIZER_SECTIONS))
         expected_tokens = frozenset(detok.database.token_to_entries.keys())
 
         csv_database = str(detok.database)
-        self.assertEqual(len(csv_database.splitlines()), 17)
+        self.assertEqual(len(csv_database.splitlines()), TOKENS_IN_ELF)
 
-        with tempfile.NamedTemporaryFile('r+') as csv_file:
+        csv_file = tempfile.NamedTemporaryFile('w', delete=False)
+        try:
             csv_file.write(csv_database)
-            csv_file.seek(0)
+            csv_file.close()
 
             # Open CSV by path
             detok = detokenize.Detokenizer(csv_file.name)
@@ -295,9 +285,13 @@
                              frozenset(detok.database.token_to_entries.keys()))
 
             # Open CSV by file object
-            detok = detokenize.Detokenizer(csv_file)
+            with open(csv_file.name) as fd:
+                detok = detokenize.Detokenizer(fd)
+
             self.assertEqual(expected_tokens,
                              frozenset(detok.database.token_to_entries.keys()))
+        finally:
+            os.unlink(csv_file.name)
 
     def test_create_detokenizer_with_token_database(self):
         detok = detokenize.Detokenizer(io.BytesIO(ELF_WITH_TOKENIZER_SECTIONS))
@@ -316,10 +310,13 @@
 
         # Database with several conflicting tokens.
         self.detok = detokenize.Detokenizer(tokens.Database([
-            tokens.TokenizedStringEntry(token, 'REMOVED', dt.datetime(9, 1, 1)),
+            tokens.TokenizedStringEntry(
+                token, 'REMOVED', date_removed=dt.datetime(9, 1, 1)),
             tokens.TokenizedStringEntry(token, 'newer'),
-            tokens.TokenizedStringEntry(token, 'A: %d', dt.datetime(30, 5, 9)),
-            tokens.TokenizedStringEntry(token, 'B: %c', dt.datetime(30, 5, 10)),
+            tokens.TokenizedStringEntry(
+                token, 'A: %d', date_removed=dt.datetime(30, 5, 9)),
+            tokens.TokenizedStringEntry(
+                token, 'B: %c', date_removed=dt.datetime(30, 5, 10)),
             tokens.TokenizedStringEntry(token, 'C: %s'),
             tokens.TokenizedStringEntry(token, '%d%u'),
             tokens.TokenizedStringEntry(token, '%s%u %d'),
@@ -385,9 +382,11 @@
 class AutoUpdatingDetokenizerTest(unittest.TestCase):
     """Tests the AutoUpdatingDetokenizer class."""
     def test_update(self, mock_getmtime):
+        """Tests the update command."""
+
         db = database.load_token_database(
             io.BytesIO(ELF_WITH_TOKENIZER_SECTIONS))
-        self.assertEqual(len(db), 17)
+        self.assertEqual(len(db), TOKENS_IN_ELF)
 
         the_time = [100]
 
@@ -400,15 +399,20 @@
 
         mock_getmtime.side_effect = move_back_time_if_file_exists
 
-        with tempfile.NamedTemporaryFile('wb', delete=True) as fd:
-            detok = detokenize.AutoUpdatingDetokenizer(fd.name,
+        file = tempfile.NamedTemporaryFile('wb', delete=False)
+        try:
+            file.close()
+
+            detok = detokenize.AutoUpdatingDetokenizer(file.name,
                                                        min_poll_period_s=0)
             self.assertFalse(detok.detokenize(JELLO_WORLD_TOKEN).ok())
 
-            tokens.write_binary(db, fd)
-            fd.flush()
+            with open(file.name, 'wb') as fd:
+                tokens.write_binary(db, fd)
 
             self.assertTrue(detok.detokenize(JELLO_WORLD_TOKEN).ok())
+        finally:
+            os.unlink(file.name)
 
         # The database stays around if the file is deleted.
         self.assertTrue(detok.detokenize(JELLO_WORLD_TOKEN).ok())
@@ -416,27 +420,32 @@
     def test_no_update_if_time_is_same(self, mock_getmtime):
         mock_getmtime.return_value = 100
 
-        with tempfile.NamedTemporaryFile('wb', delete=True) as fd:
+        file = tempfile.NamedTemporaryFile('wb', delete=False)
+        try:
             tokens.write_csv(
                 database.load_token_database(
-                    io.BytesIO(ELF_WITH_TOKENIZER_SECTIONS)), fd)
-            fd.flush()
+                    io.BytesIO(ELF_WITH_TOKENIZER_SECTIONS)), file)
+            file.close()
 
-            detok = detokenize.AutoUpdatingDetokenizer(fd, min_poll_period_s=0)
+            detok = detokenize.AutoUpdatingDetokenizer(file,
+                                                       min_poll_period_s=0)
             self.assertTrue(detok.detokenize(JELLO_WORLD_TOKEN).ok())
 
-            # Empty the database, but keep the modified time the same.
-            fd.truncate(0)
-            fd.flush()
+            # Empty the database, but keep the mock modified time the same.
+            with open(file.name, 'wb'):
+                pass
+
             self.assertTrue(detok.detokenize(JELLO_WORLD_TOKEN).ok())
             self.assertTrue(detok.detokenize(JELLO_WORLD_TOKEN).ok())
 
             # Move back time so the now-empty file is reloaded.
             mock_getmtime.return_value = 50
             self.assertFalse(detok.detokenize(JELLO_WORLD_TOKEN).ok())
+        finally:
+            os.unlink(file.name)
 
 
-def _next_char(message):
+def _next_char(message: bytes) -> bytes:
     return bytes(b + 1 for b in message)
 
 
@@ -483,11 +492,17 @@
 
     TEST_CASES = (
         (b'', b''),
+        (b'nothing here', b'nothing here'),
         (JELLO, b'Jello, world!'),
+        (JELLO + b'a', b'Jello, world!a'),
+        (JELLO + b'abc', b'Jello, world!abc'),
+        (JELLO + b'abc=', b'Jello, world!abc='),
+        (b'$a' + JELLO + b'a', b'$aJello, world!a'),
         (b'Hello ' + JELLO + b'?', b'Hello Jello, world!?'),
         (b'$' + JELLO, b'$Jello, world!'),
         (JELLO + JELLO, b'Jello, world!Jello, world!'),
         (JELLO + b'$' + JELLO, b'Jello, world!$Jello, world!'),
+        (JELLO + b'$a' + JELLO + b'bcd', b'Jello, world!$aJello, world!bcd'),
         (b'$3141', b'$3141'),
         (JELLO + b'$3141', b'Jello, world!$3141'),
         (RECURSION, b'The secret message is "Jello, world!"'),
@@ -499,7 +514,9 @@
         super().setUp()
         db = database.load_token_database(
             io.BytesIO(ELF_WITH_TOKENIZER_SECTIONS))
-        db.add([self.RECURSION_STRING, self.RECURSION_STRING_2])
+        db.add(
+            tokens.TokenizedStringEntry(tokens.default_hash(s), s)
+            for s in [self.RECURSION_STRING, self.RECURSION_STRING_2])
         self.detok = detokenize.Detokenizer(db)
 
     def test_detokenize_base64_live(self):
diff --git a/pw_tokenizer/py/elf_reader_test.py b/pw_tokenizer/py/elf_reader_test.py
index a65587f..2473182 100755
--- a/pw_tokenizer/py/elf_reader_test.py
+++ b/pw_tokenizer/py/elf_reader_test.py
@@ -125,9 +125,9 @@
             self.assertEqual(section.size, size)
 
     def test_dump_single_section(self):
-        self.assertEqual(self._elf.dump_sections(r'\.test_section_1'),
+        self.assertEqual(self._elf.dump_section_contents(r'\.test_section_1'),
                          b'You cannot pass\0')
-        self.assertEqual(self._elf.dump_sections(r'\.test_section_2'),
+        self.assertEqual(self._elf.dump_section_contents(r'\.test_section_2'),
                          b'\xef\xbe\xed\xfe')
 
     def test_dump_multiple_sections(self):
@@ -137,7 +137,8 @@
         else:
             contents = b'\xef\xbe\xed\xfeYou cannot pass\0'
 
-        self.assertIn(self._elf.dump_sections(r'.test_section_\d'), contents)
+        self.assertIn(self._elf.dump_section_contents(r'.test_section_\d'),
+                      contents)
 
     def test_read_values(self):
         address = self._section('.test_section_1').address
@@ -247,9 +248,9 @@
 
     def test_elf_reader_dump_single_section(self):
         elf = elf_reader.Elf(self._archive)
-        self.assertEqual(elf.dump_sections(r'\.test_section_1'),
+        self.assertEqual(elf.dump_section_contents(r'\.test_section_1'),
                          b'You cannot pass\0')
-        self.assertEqual(elf.dump_sections(r'\.test_section_2'),
+        self.assertEqual(elf.dump_section_contents(r'\.test_section_2'),
                          b'\xef\xbe\xed\xfe')
 
     def test_elf_reader_read_values(self):
diff --git a/pw_tokenizer/py/encode_test.py b/pw_tokenizer/py/encode_test.py
index 7a8c4a5..0dfc22a 100755
--- a/pw_tokenizer/py/encode_test.py
+++ b/pw_tokenizer/py/encode_test.py
@@ -16,9 +16,10 @@
 
 import unittest
 
-from pw_tokenizer.encode import encode_token_and_args
 import varint_test_data
 
+from pw_tokenizer.encode import encode_token_and_args
+
 
 class TestEncodeTokenized(unittest.TestCase):
     """Tests encoding tokenized strings with various arguments."""
diff --git a/pw_tokenizer/py/example_binary_with_tokenized_strings.elf b/pw_tokenizer/py/example_binary_with_tokenized_strings.elf
old mode 100644
new mode 100755
index 7313906..118c05a
--- a/pw_tokenizer/py/example_binary_with_tokenized_strings.elf
+++ b/pw_tokenizer/py/example_binary_with_tokenized_strings.elf
Binary files differ
diff --git a/pw_tokenizer/py/example_legacy_binary_with_tokenized_strings.elf b/pw_tokenizer/py/example_legacy_binary_with_tokenized_strings.elf
new file mode 100755
index 0000000..0fe2e60
--- /dev/null
+++ b/pw_tokenizer/py/example_legacy_binary_with_tokenized_strings.elf
Binary files differ
diff --git a/pw_tokenizer/py/generate_argument_types_macro.py b/pw_tokenizer/py/generate_argument_types_macro.py
index 3808a38..306fc4d 100755
--- a/pw_tokenizer/py/generate_argument_types_macro.py
+++ b/pw_tokenizer/py/generate_argument_types_macro.py
@@ -14,8 +14,6 @@
 # the License.
 """Generates macros for encoding tokenizer argument types."""
 
-from __future__ import print_function
-
 import datetime
 import os
 
diff --git a/pw_tokenizer/py/generate_hash_macro.py b/pw_tokenizer/py/generate_hash_macro.py
index 929d562..4c144e1 100755
--- a/pw_tokenizer/py/generate_hash_macro.py
+++ b/pw_tokenizer/py/generate_hash_macro.py
@@ -14,8 +14,6 @@
 # the License.
 """Generates a C macro for the PW tokenizer 65599 fixed length hash."""
 
-from __future__ import print_function
-
 import datetime
 import os
 
diff --git a/pw_tokenizer/py/pw_tokenizer/__main__.py b/pw_tokenizer/py/pw_tokenizer/__main__.py
index 3ed2835..4a7bdf2 100644
--- a/pw_tokenizer/py/pw_tokenizer/__main__.py
+++ b/pw_tokenizer/py/pw_tokenizer/__main__.py
@@ -15,4 +15,4 @@
 
 from pw_tokenizer import detokenize
 
-detokenize._main(detokenize._parse_args())  # pylint: disable=protected-access
+detokenize.main()
diff --git a/pw_tokenizer/py/pw_tokenizer/database.py b/pw_tokenizer/py/pw_tokenizer/database.py
index 10563a1..cf21481 100755
--- a/pw_tokenizer/py/pw_tokenizer/database.py
+++ b/pw_tokenizer/py/pw_tokenizer/database.py
@@ -21,13 +21,15 @@
 import argparse
 from datetime import datetime
 import glob
+import json
 import logging
 import os
 from pathlib import Path
 import re
 import struct
 import sys
-from typing import Callable, Dict, Iterable, List, Set
+from typing import (Callable, Dict, Iterable, Iterator, List, Pattern, Set,
+                    TextIO, Tuple, Union)
 
 try:
     from pw_tokenizer import elf_reader, tokens
@@ -40,36 +42,117 @@
 
 _LOG = logging.getLogger('pw_tokenizer')
 
-DEFAULT_DOMAIN = 'default'
-
 
 def _elf_reader(elf) -> elf_reader.Elf:
     return elf if isinstance(elf, elf_reader.Elf) else elf_reader.Elf(elf)
 
 
-def _read_strings_from_elf(elf, domain: str) -> Iterable[str]:
+# Magic number used to indicate the beginning of a tokenized string entry. This
+# value MUST match the value of _PW_TOKENIZER_ENTRY_MAGIC in
+# pw_tokenizer/public/pw_tokenizer/internal/tokenize_string.h.
+_TOKENIZED_ENTRY_MAGIC = 0xBAA98DEE
+_ENTRY = struct.Struct('<4I')
+_TOKENIZED_ENTRY_SECTIONS = re.compile(
+    r'^\.pw_tokenizer.entries(?:\.[_\d]+)?$')
+
+_LEGACY_STRING_SECTIONS = re.compile(
+    r'^\.pw_tokenized\.(?P<domain>[^.]+)(?:\.\d+)?$')
+
+_ERROR_HANDLER = 'surrogateescape'  # How to deal with UTF-8 decoding errors
+
+
+class Error(Exception):
+    """Failed to extract token entries from an ELF file."""
+
+
+def _read_tokenized_entries(
+        data: bytes,
+        domain: Pattern[str]) -> Iterator[tokens.TokenizedStringEntry]:
+    index = 0
+
+    while index + _ENTRY.size <= len(data):
+        magic, token, domain_len, string_len = _ENTRY.unpack_from(data, index)
+
+        if magic != _TOKENIZED_ENTRY_MAGIC:
+            raise Error(
+                f'Expected magic number 0x{_TOKENIZED_ENTRY_MAGIC:08x}, '
+                f'found 0x{magic:08x}')
+
+        start = index + _ENTRY.size
+        index = start + domain_len + string_len
+
+        # Create the entries, trimming null terminators.
+        entry = tokens.TokenizedStringEntry(
+            token,
+            data[start + domain_len:index - 1].decode(errors=_ERROR_HANDLER),
+            data[start:start + domain_len - 1].decode(errors=_ERROR_HANDLER),
+        )
+
+        if data[start + domain_len - 1] != 0:
+            raise Error(
+                f'Domain {entry.domain} for {entry.string} not null terminated'
+            )
+
+        if data[index - 1] != 0:
+            raise Error(f'String {entry.string} is not null terminated')
+
+        if domain.fullmatch(entry.domain):
+            yield entry
+
+
+def _read_tokenized_strings(sections: Dict[str, bytes],
+                            domain: Pattern[str]) -> Iterator[tokens.Database]:
+    # Legacy ELF files used "default" as the default domain instead of "". Remap
+    # the default if necessary.
+    if domain.pattern == tokens.DEFAULT_DOMAIN:
+        domain = re.compile('default')
+
+    for section, data in sections.items():
+        match = _LEGACY_STRING_SECTIONS.match(section)
+        if match and domain.match(match.group('domain')):
+            yield tokens.Database.from_strings(
+                (s.decode(errors=_ERROR_HANDLER) for s in data.split(b'\0')),
+                match.group('domain'))
+
+
+def _database_from_elf(elf, domain: Pattern[str]) -> tokens.Database:
     """Reads the tokenized strings from an elf_reader.Elf or ELF file object."""
     _LOG.debug('Reading tokenized strings in domain "%s" from %s', domain, elf)
 
-    sections = _elf_reader(elf).dump_sections(
-        rf'^\.pw_tokenized\.{domain}(?:\.\d+)?$')
-    if sections is not None:
-        for string in sections.split(b'\0'):
-            yield string.decode()
+    reader = _elf_reader(elf)
+
+    # Read tokenized string entries.
+    section_data = reader.dump_section_contents(_TOKENIZED_ENTRY_SECTIONS)
+    if section_data is not None:
+        return tokens.Database(_read_tokenized_entries(section_data, domain))
+
+    # Read legacy null-terminated string entries.
+    sections = reader.dump_sections(_LEGACY_STRING_SECTIONS)
+    if sections:
+        return tokens.Database.merged(
+            *_read_tokenized_strings(sections, domain))
+
+    return tokens.Database([])
 
 
-def tokenization_domains(elf) -> Iterable[str]:
+def tokenization_domains(elf) -> Iterator[str]:
     """Lists all tokenization domains in an ELF file."""
-    tokenized_section = re.compile(r'\.pw_tokenized\.(?P<domain>.+)(?:\.\d+)?')
-    for section in _elf_reader(elf).sections:
-        match = tokenized_section.match(section.name)
-        if match:
-            yield match.group('domain')
+    reader = _elf_reader(elf)
+    section_data = reader.dump_section_contents(_TOKENIZED_ENTRY_SECTIONS)
+    if section_data is not None:
+        yield from frozenset(
+            e.domain
+            for e in _read_tokenized_entries(section_data, re.compile('.*')))
+    else:  # Check for the legacy domain sections
+        for section in reader.sections:
+            match = _LEGACY_STRING_SECTIONS.match(section.name)
+            if match:
+                yield match.group('domain')
 
 
 def read_tokenizer_metadata(elf) -> Dict[str, int]:
     """Reads the metadata entries from an ELF."""
-    sections = _elf_reader(elf).dump_sections(r'\.pw_tokenizer_info')
+    sections = _elf_reader(elf).dump_section_contents(r'\.pw_tokenizer\.info')
 
     metadata: Dict[str, int] = {}
     if sections is not None:
@@ -83,7 +166,7 @@
     return metadata
 
 
-def _load_token_database(db, domain: str) -> tokens.Database:
+def _load_token_database(db, domain: Pattern[str]) -> tokens.Database:
     """Loads a Database from a database object, ELF, CSV, or binary database."""
     if db is None:
         return tokens.Database()
@@ -92,7 +175,7 @@
         return db
 
     if isinstance(db, elf_reader.Elf):
-        return tokens.Database.from_strings(_read_strings_from_elf(db, domain))
+        return _database_from_elf(db, domain)
 
     # If it's a str, it might be a path. Check if it's an ELF or CSV.
     if isinstance(db, (str, Path)):
@@ -103,15 +186,14 @@
         # Read the path as an ELF file.
         with open(db, 'rb') as fd:
             if elf_reader.compatible_file(fd):
-                return tokens.Database.from_strings(
-                    _read_strings_from_elf(fd, domain))
+                return _database_from_elf(fd, domain)
 
         # Read the path as a packed binary or CSV file.
         return tokens.DatabaseFile(db)
 
     # Assume that it's a file object and check if it's an ELF.
     if elf_reader.compatible_file(db):
-        return tokens.Database.from_strings(_read_strings_from_elf(db, domain))
+        return _database_from_elf(db, domain)
 
     # Read the database as CSV or packed binary from a file object's path.
     if hasattr(db, 'name') and os.path.exists(db.name):
@@ -121,14 +203,17 @@
     return tokens.Database(tokens.parse_csv(db))
 
 
-def load_token_database(*databases,
-                        domain: str = DEFAULT_DOMAIN) -> tokens.Database:
+def load_token_database(
+    *databases,
+    domain: Union[str,
+                  Pattern[str]] = tokens.DEFAULT_DOMAIN) -> tokens.Database:
     """Loads a Database from database objects, ELFs, CSVs, or binary files."""
+    domain = re.compile(domain)
     return tokens.Database.merged(*(_load_token_database(db, domain)
                                     for db in databases))
 
 
-def generate_report(db: tokens.Database) -> Dict[str, int]:
+def database_summary(db: tokens.Database) -> Dict[str, int]:
     """Returns a simple report of properties of the database."""
     present = [entry for entry in db.entries() if not entry.date_removed]
 
@@ -143,7 +228,33 @@
     }
 
 
-def _handle_create(databases, database, force, output_type, include, exclude):
+_DatabaseReport = Dict[str, Dict[str, Dict[str, int]]]
+
+
+def generate_reports(paths: Iterable[Path]) -> _DatabaseReport:
+    """Returns a dictionary with information about the provided databases."""
+    reports: _DatabaseReport = {}
+
+    for path in paths:
+        with path.open('rb') as file:
+            if elf_reader.compatible_file(file):
+                domains = list(tokenization_domains(file))
+            else:
+                domains = ['']
+
+        domain_reports = {}
+
+        for domain in domains:
+            domain_reports[domain] = database_summary(
+                load_token_database(path, domain=domain))
+
+        reports[str(path)] = domain_reports
+
+    return reports
+
+
+def _handle_create(databases, database, force, output_type, include, exclude,
+                   replace):
     """Creates a token database file from one or more ELF files."""
 
     if database == '-':
@@ -156,7 +267,7 @@
         fd = open(database, 'wb')
 
     database = tokens.Database.merged(*databases)
-    database.filter(include, exclude)
+    database.filter(include, exclude, replace)
 
     with fd:
         if output_type == 'csv':
@@ -174,7 +285,7 @@
     initial = len(token_database)
 
     for source in databases:
-        token_database.add((entry.string for entry in source.entries()))
+        token_database.add(source.entries())
 
     token_database.write_to_file()
 
@@ -184,8 +295,7 @@
 
 def _handle_mark_removals(token_database, databases, date):
     marked_removed = token_database.mark_removals(
-        (entry.string
-         for entry in tokens.Database.merged(*databases).entries()
+        (entry for entry in tokens.Database.merged(*databases).entries()
          if not entry.date_removed), date)
 
     token_database.write_to_file()
@@ -201,27 +311,9 @@
     _LOG.info('Removed %d entries from %s', len(purged), token_database.path)
 
 
-def _handle_report(token_database_or_elf, output):
-    for path in token_database_or_elf:
-        with path.open('rb') as file:
-            if elf_reader.compatible_file(file):
-                domains = list(tokenization_domains(file))
-            else:
-                domains = [path.name]
-
-        for domain in domains:
-            output.write(
-                '[{name}]\n'
-                '                 Domain: {domain}\n'
-                '        Entries present: {present_entries}\n'
-                '        Size of strings: {present_size_bytes} B\n'
-                '          Total entries: {total_entries}\n'
-                '  Total size of strings: {total_size_bytes} B\n'
-                '             Collisions: {collisions} tokens\n'.format(
-                    name=path,
-                    domain=domain,
-                    **generate_report(load_token_database(path,
-                                                          domain=domain))))
+def _handle_report(token_database_or_elf: List[Path], output: TextIO) -> None:
+    json.dump(generate_reports(token_database_or_elf), output, indent=2)
+    output.write('\n')
 
 
 def expand_paths_or_globs(*paths_or_globs: str) -> Iterable[Path]:
@@ -249,19 +341,23 @@
         setattr(namespace, self.dest, list(expand_paths_or_globs(*values)))
 
 
-def _read_elf_with_domain(elf: str, domain: str) -> Iterable[tokens.Database]:
+def _read_elf_with_domain(elf: str,
+                          domain: Pattern[str]) -> Iterable[tokens.Database]:
     for path in expand_paths_or_globs(elf):
         with path.open('rb') as file:
             if not elf_reader.compatible_file(file):
                 raise ValueError(f'{elf} is not an ELF file, '
                                  f'but the "{domain}" domain was specified')
 
-            yield tokens.Database.from_strings(
-                _read_strings_from_elf(file, domain))
+            yield _database_from_elf(file, domain)
 
 
-class _LoadTokenDatabases(argparse.Action):
-    """Argparse action that reads tokenize databases from paths or globs."""
+class LoadTokenDatabases(argparse.Action):
+    """Argparse action that reads tokenize databases from paths or globs.
+
+    ELF files may have #domain appended to them to specify a tokenization domain
+    other than the default.
+    """
     def __call__(self, parser, namespace, values, option_string=None):
         databases: List[tokens.Database] = []
         paths: Set[Path] = set()
@@ -269,23 +365,30 @@
         try:
             for value in values:
                 if value.count('#') == 1:
-                    databases.extend(_read_elf_with_domain(*value.split('#')))
+                    path, domain = value.split('#')
+                    domain = re.compile(domain)
+                    databases.extend(_read_elf_with_domain(path, domain))
                 else:
                     paths.update(expand_paths_or_globs(value))
 
             for path in paths:
-                try:
-                    databases.append(load_token_database(path))
-                except:
-                    _LOG.exception('Failed to load token database %s', path)
-                    raise
-        except (FileNotFoundError, ValueError) as err:
+                databases.append(load_token_database(path))
+        except tokens.DatabaseFormatError as err:
+            parser.error(
+                f'argument elf_or_token_database: {path} is not a supported '
+                'token database file. Only ELF files or token databases (CSV '
+                f'or binary format) are supported. {err}. ')
+        except FileNotFoundError as err:
             parser.error(f'argument elf_or_token_database: {err}')
+        except:  # pylint: disable=bare-except
+            _LOG.exception('Failed to load token database %s', path)
+            parser.error('argument elf_or_token_database: '
+                         f'Error occurred while loading token database {path}')
 
         setattr(namespace, self.dest, databases)
 
 
-def token_databases_parser() -> argparse.ArgumentParser:
+def token_databases_parser(nargs: str = '+') -> argparse.ArgumentParser:
     """Returns an argument parser for reading token databases.
 
     These arguments can be added to another parser using the parents arg.
@@ -294,14 +397,14 @@
     parser.add_argument(
         'databases',
         metavar='elf_or_token_database',
-        nargs='+',
-        action=_LoadTokenDatabases,
+        nargs=nargs,
+        action=LoadTokenDatabases,
         help=('ELF or token database files from which to read strings and '
               'tokens. For ELF files, the tokenization domain to read from '
               'may specified after the path as #domain_name (e.g. '
               'foo.elf#TEST_DOMAIN). Unless specified, only the default '
-              'domain is read from ELF files; .* reads all domains. Globs are '
-              'expanded to compatible database files.'))
+              'domain ("") is read from ELF files; .* reads all domains. '
+              'Globs are expanded to compatible database files.'))
     return parser
 
 
@@ -324,7 +427,7 @@
                            required=True,
                            help='The database file to update.')
 
-    option_tokens = token_databases_parser()
+    option_tokens = token_databases_parser('*')
 
     # Top-level argument parser.
     parser = argparse.ArgumentParser(
@@ -362,18 +465,47 @@
         '-i',
         '--include',
         type=re.compile,
+        default=[],
         action='append',
-        help=(
-            'If provided, at least one of these regular expressions must match '
-            'for a string to be included in the database.'))
+        help=('If provided, at least one of these regular expressions must '
+              'match for a string to be included in the database.'))
     subparser.add_argument(
         '-e',
         '--exclude',
         type=re.compile,
+        default=[],
         action='append',
         help=('If provided, none of these regular expressions may match for a '
               'string to be included in the database.'))
 
+    unescaped_slash = re.compile(r'(?<!\\)/')
+
+    def replacement(value: str) -> Tuple[Pattern, 'str']:
+        try:
+            find, sub = unescaped_slash.split(value, 1)
+        except ValueError as err:
+            raise argparse.ArgumentTypeError(
+                'replacements must be specified as "search_regex/replacement"')
+
+        try:
+            return re.compile(find.replace(r'\/', '/')), sub
+        except re.error as err:
+            raise argparse.ArgumentTypeError(
+                f'"{value}" is not a valid regular expression: {err}')
+
+    subparser.add_argument(
+        '--replace',
+        type=replacement,
+        default=[],
+        action='append',
+        help=('If provided, replaces text that matches a regular expression. '
+              'This can be used to replace sensitive terms in a token '
+              'database that will be distributed publicly. The expression and '
+              'replacement are specified as "search_regex/replacement". '
+              'Plain slash characters in the regex must be escaped with a '
+              r'backslash (\/). The replacement text may include '
+              'backreferences for captured groups in the regex.'))
+
     # The 'add' command adds strings to a database from a set of ELFs.
     subparser = subparsers.add_parser(
         'add',
diff --git a/pw_tokenizer/py/pw_tokenizer/detokenize.py b/pw_tokenizer/py/pw_tokenizer/detokenize.py
index 2ec0369..26f5d52 100755
--- a/pw_tokenizer/py/pw_tokenizer/detokenize.py
+++ b/pw_tokenizer/py/pw_tokenizer/detokenize.py
@@ -31,8 +31,6 @@
 messages from a file or stdin.
 """
 
-from __future__ import division
-
 import argparse
 import base64
 import binascii
@@ -40,12 +38,14 @@
 import io
 import logging
 import os
+from pathlib import Path
 import re
 import string
 import struct
 import sys
 import time
-from typing import Dict, List, Iterable, NamedTuple, Optional, Tuple
+from typing import (BinaryIO, Callable, Dict, List, Iterable, Iterator, Match,
+                    NamedTuple, Optional, Pattern, Tuple, Union)
 
 try:
     from pw_tokenizer import database, decode, tokens
@@ -209,10 +209,9 @@
 class AutoUpdatingDetokenizer:
     """Loads and updates a detokenizer from database paths."""
     class _DatabasePath:
-        """Tracks the modified time of a path."""
+        """Tracks the modified time of a path or file object."""
         def __init__(self, path):
-            self.path = path if isinstance(path,
-                                           (str, os.PathLike)) else path.name
+            self.path = path if isinstance(path, (str, Path)) else path.name
             self._modified_time: Optional[float] = self._last_modified_time()
 
         def updated(self) -> bool:
@@ -236,7 +235,9 @@
             except FileNotFoundError:
                 return database.load_token_database()
 
-    def __init__(self, *paths_or_files, min_poll_period_s: float = 1.0):
+    def __init__(self,
+                 *paths_or_files,
+                 min_poll_period_s: float = 1.0) -> None:
         self.paths = tuple(self._DatabasePath(path) for path in paths_or_files)
         self.min_poll_period_s = min_poll_period_s
         self._last_checked_time: float = time.time()
@@ -255,15 +256,18 @@
         return self._detokenizer.detokenize(data)
 
 
+_Detokenizer = Union[Detokenizer, AutoUpdatingDetokenizer]
+
+
 class PrefixedMessageDecoder:
     """Parses messages that start with a prefix character from a byte stream."""
-    def __init__(self, prefix, chars):
+    def __init__(self, prefix: Union[str, bytes], chars: Union[str, bytes]):
         """Parses prefixed messages.
 
-    Args:
-      prefix: str or bytes; one character that signifies the start of a message
-      chars: str or bytes; characters allowed in a message
-    """
+        Args:
+          prefix: one character that signifies the start of a message
+          chars: characters allowed in a message
+        """
         self._prefix = prefix.encode() if isinstance(prefix, str) else prefix
 
         if isinstance(chars, str):
@@ -281,14 +285,15 @@
 
         self.data = bytearray()
 
-    def _read_next(self, fd):
+    def _read_next(self, fd: BinaryIO) -> Tuple[bytes, int]:
         """Returns the next character and its index."""
         char = fd.read(1)
         index = len(self.data)
         self.data += char
         return char, index
 
-    def read_messages(self, binary_fd):
+    def read_messages(self,
+                      binary_fd: BinaryIO) -> Iterator[Tuple[bool, bytes]]:
         """Parses prefixed messages; yields (is_message, contents) chunks."""
         message_start = None
 
@@ -314,21 +319,26 @@
             else:
                 yield False, char
 
-    def transform(self, binary_fd, transform):
+    def transform(self, binary_fd: BinaryIO,
+                  transform: Callable[[bytes], bytes]) -> Iterator[bytes]:
         """Yields the file with a transformation applied to the messages."""
         for is_message, chunk in self.read_messages(binary_fd):
             yield transform(chunk) if is_message else chunk
 
 
-def _detokenize_prefixed_base64(detokenizer, prefix, recursion):
+def _detokenize_prefixed_base64(
+        detokenizer: _Detokenizer, prefix: bytes,
+        recursion: int) -> Callable[[Match[bytes]], bytes]:
     """Returns a function that decodes prefixed Base64 with the detokenizer."""
-    def decode_and_detokenize(original):
+    def decode_and_detokenize(match: Match[bytes]) -> bytes:
         """Decodes prefixed base64 with the provided detokenizer."""
+        original = match.group(0)
+
         try:
-            result = detokenizer.detokenize(
+            detokenized_string = detokenizer.detokenize(
                 base64.b64decode(original[1:], validate=True))
-            if result.matches():
-                result = str(result).encode()
+            if detokenized_string.matches():
+                result = str(detokenized_string).encode()
 
                 if recursion > 0 and original != result:
                     result = detokenize_base64(detokenizer, result, prefix,
@@ -343,16 +353,35 @@
     return decode_and_detokenize
 
 
+BASE64_PREFIX = b'$'
 DEFAULT_RECURSION = 9
 
 
-def detokenize_base64_live(detokenizer,
-                           input_file,
-                           output,
-                           prefix=b'$',
-                           recursion=DEFAULT_RECURSION):
+def _base64_message_regex(prefix: bytes) -> Pattern[bytes]:
+    """Returns a regular expression for prefixed base64 tokenized strings."""
+    return re.compile(
+        # Base64 tokenized strings start with the prefix character ($)
+        re.escape(prefix) + (
+            # Tokenized strings contain 0 or more blocks of four Base64 chars.
+            br'(?:[A-Za-z0-9+/\-_]{4})*'
+            # The last block of 4 chars may have one or two padding chars (=).
+            br'(?:[A-Za-z0-9+/\-_]{3}=|[A-Za-z0-9+/\-_]{2}==)?'))
+
+
+def detokenize_base64_live(detokenizer: _Detokenizer,
+                           input_file: BinaryIO,
+                           output: BinaryIO,
+                           prefix: Union[str, bytes] = BASE64_PREFIX,
+                           recursion: int = DEFAULT_RECURSION) -> None:
     """Reads chars one-at-a-time and decodes messages; SLOW for big files."""
-    transform = _detokenize_prefixed_base64(detokenizer, prefix, recursion)
+    prefix_bytes = prefix.encode() if isinstance(prefix, str) else prefix
+
+    base64_message = _base64_message_regex(prefix_bytes)
+
+    def transform(data: bytes) -> bytes:
+        return base64_message.sub(
+            _detokenize_prefixed_base64(detokenizer, prefix_bytes, recursion),
+            data)
 
     for message in PrefixedMessageDecoder(
             prefix, string.ascii_letters + string.digits + '+/-_=').transform(
@@ -364,50 +393,59 @@
             output.flush()
 
 
-def detokenize_base64_to_file(detokenizer,
-                              data,
-                              output,
-                              prefix=b'$',
-                              recursion=DEFAULT_RECURSION):
+def detokenize_base64_to_file(detokenizer: _Detokenizer,
+                              data: bytes,
+                              output: BinaryIO,
+                              prefix: Union[str, bytes] = BASE64_PREFIX,
+                              recursion: int = DEFAULT_RECURSION) -> None:
     """Decodes prefixed Base64 messages in data; decodes to an output file."""
-    transform = _detokenize_prefixed_base64(detokenizer, prefix, recursion)
-
-    messages = re.compile(
-        re.escape(prefix.encode() if isinstance(prefix, str) else prefix) +
-        (br'(?:[A-Za-z0-9+/\-_]{4})*'
-         br'(?:[A-Za-z0-9+/\-_]{3}=|[A-Za-z0-9+/\-_]{2}==)?'))
-
-    index = 0
-
-    for match in messages.finditer(data):
-        output.write(data[index:match.start()])
-        output.write(transform(match.group(0)))
-        index = match.end()
-
-    output.write(data[index:])
+    prefix = prefix.encode() if isinstance(prefix, str) else prefix
+    output.write(
+        _base64_message_regex(prefix).sub(
+            _detokenize_prefixed_base64(detokenizer, prefix, recursion), data))
 
 
-def detokenize_base64(detokenizer,
-                      data,
-                      prefix=b'$',
-                      recursion=DEFAULT_RECURSION):
+def detokenize_base64(detokenizer: _Detokenizer,
+                      data: bytes,
+                      prefix: Union[str, bytes] = BASE64_PREFIX,
+                      recursion: int = DEFAULT_RECURSION) -> bytes:
     """Decodes and replaces prefixed Base64 messages in the provided data.
 
-  Args:
-    detokenizer: the detokenizer with which to decode messages
-    data: the binary data to decode
-    prefix: one-character byte string that signals the start of a message
-    recursion: how many levels to recursively decode
+    Args:
+      detokenizer: the detokenizer with which to decode messages
+      data: the binary data to decode
+      prefix: one-character byte string that signals the start of a message
+      recursion: how many levels to recursively decode
 
-  Returns:
-    copy of the data with all recognized tokens decoded
-  """
+    Returns:
+      copy of the data with all recognized tokens decoded
+    """
     output = io.BytesIO()
     detokenize_base64_to_file(detokenizer, data, output, prefix, recursion)
     return output.getvalue()
 
 
-def _handle_base64(databases, input_file, output, prefix, show_errors):
+def _follow_and_detokenize_file(detokenizer: _Detokenizer,
+                                file: BinaryIO,
+                                output: BinaryIO,
+                                prefix: Union[str, bytes],
+                                poll_period_s: float = 0.01) -> None:
+    """Polls a file to detokenize it and any appended data."""
+
+    try:
+        while True:
+            data = file.read()
+            if data:
+                detokenize_base64_to_file(detokenizer, data, output, prefix)
+                output.flush()
+            else:
+                time.sleep(poll_period_s)
+    except KeyboardInterrupt:
+        pass
+
+
+def _handle_base64(databases, input_file: BinaryIO, output: BinaryIO,
+                   prefix: str, show_errors: bool, follow: bool) -> None:
     """Handles the base64 command line option."""
     # argparse.FileType doesn't correctly handle - for binary files.
     if input_file is sys.stdin:
@@ -419,16 +457,19 @@
     detokenizer = Detokenizer(tokens.Database.merged(*databases),
                               show_errors=show_errors)
 
-    # If the input is seekable, process it all at once, which is MUCH faster.
-    if input_file.seekable():
+    if follow:
+        _follow_and_detokenize_file(detokenizer, input_file, output, prefix)
+    elif input_file.seekable():
+        # Process seekable files all at once, which is MUCH faster.
         detokenize_base64_to_file(detokenizer, input_file.read(), output,
                                   prefix)
     else:
+        # For non-seekable inputs (e.g. pipes), read one character at a time.
         detokenize_base64_live(detokenizer, input_file, output, prefix)
 
 
-def _parse_args():
-    """Parse and return command line arguments."""
+def _parse_args() -> argparse.Namespace:
+    """Parses and return command line arguments."""
 
     parser = argparse.ArgumentParser(
         description=__doc__,
@@ -451,6 +492,12 @@
         type=argparse.FileType('rb'),
         default=sys.stdin.buffer,
         help='The file from which to read; provide - or omit for stdin.')
+    subparser.add_argument(
+        '-f',
+        '--follow',
+        action='store_true',
+        help=('Detokenize data appended to input_file as it grows; similar to '
+              'tail -f.'))
     subparser.add_argument('-o',
                            '--output',
                            type=argparse.FileType('wb'),
@@ -460,7 +507,7 @@
     subparser.add_argument(
         '-p',
         '--prefix',
-        default='$',
+        default=BASE64_PREFIX,
         help=('The one-character prefix that signals the start of a '
               'Base64-encoded message. (default: $)'))
     subparser.add_argument(
@@ -473,14 +520,17 @@
     return parser.parse_args()
 
 
-def _main(args):
+def main() -> int:
+    args = _parse_args()
+
     handler = args.handler
     del args.handler
 
     handler(**vars(args))
+    return 0
 
 
 if __name__ == '__main__':
     if sys.version_info[0] < 3:
         sys.exit('ERROR: The detokenizer command line tools require Python 3.')
-    _main(_parse_args())
+    sys.exit(main())
diff --git a/pw_tokenizer/py/pw_tokenizer/elf_reader.py b/pw_tokenizer/py/pw_tokenizer/elf_reader.py
index 2a3ac3b..a917c5b 100755
--- a/pw_tokenizer/py/pw_tokenizer/elf_reader.py
+++ b/pw_tokenizer/py/pw_tokenizer/elf_reader.py
@@ -304,17 +304,23 @@
 
         return self._elf.read(size)
 
-    def dump_sections(self, name: Union[str, Pattern[str]]) -> Optional[bytes]:
+    def dump_sections(self, name: Union[str,
+                                        Pattern[str]]) -> Dict[str, bytes]:
         """Dumps a binary string containing the sections matching the regex."""
         name_regex = re.compile(name)
 
-        sections = []
+        sections: Dict[str, bytes] = {}
         for section in self.sections:
             if name_regex.match(section.name):
                 self._elf.seek(section.file_offset + section.offset)
-                sections.append(self._elf.read(section.size))
+                sections[section.name] = self._elf.read(section.size)
 
-        return b''.join(sections) if sections else None
+        return sections
+
+    def dump_section_contents(
+            self, name: Union[str, Pattern[str]]) -> Optional[bytes]:
+        sections = self.dump_sections(name)
+        return b''.join(sections.values()) if sections else None
 
     def summary(self) -> str:
         return '\n'.join(
@@ -342,7 +348,7 @@
         return
 
     for section_pattern in sections:
-        output(elf.dump_sections(section_pattern))
+        output(elf.dump_section_contents(section_pattern))
 
 
 def _parse_args() -> argparse.Namespace:
diff --git a/pw_tokenizer/py/pw_tokenizer/py.typed b/pw_tokenizer/py/pw_tokenizer/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_tokenizer/py/pw_tokenizer/py.typed
diff --git a/pw_tokenizer/py/pw_tokenizer/serial_detokenizer.py b/pw_tokenizer/py/pw_tokenizer/serial_detokenizer.py
new file mode 100644
index 0000000..e010225
--- /dev/null
+++ b/pw_tokenizer/py/pw_tokenizer/serial_detokenizer.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python3
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Decodes and detokenizes Base64-encoded strings in serial output.
+
+The output is printed or saved to a file. Input is not supported.
+"""
+
+import argparse
+import sys
+from typing import BinaryIO, Iterable
+
+import serial  # type: ignore
+from pw_tokenizer import database, detokenize, tokens
+
+
+def _parse_args():
+    """Parses and return command line arguments."""
+
+    parser = argparse.ArgumentParser(
+        description=__doc__,
+        formatter_class=argparse.RawDescriptionHelpFormatter,
+        parents=[database.token_databases_parser()])
+    parser.add_argument('-d',
+                        '--device',
+                        required=True,
+                        help='The serial device from which to read')
+    parser.add_argument('-b',
+                        '--baudrate',
+                        type=int,
+                        default=115200,
+                        help='The baud rate for the serial device')
+    parser.add_argument('-o',
+                        '--output',
+                        type=argparse.FileType('wb'),
+                        default=sys.stdout.buffer,
+                        help=('The file to which to write the output; '
+                              'provide - or omit for stdout.'))
+    parser.add_argument(
+        '-p',
+        '--prefix',
+        default=detokenize.BASE64_PREFIX,
+        help=('The one-character prefix that signals the start of a '
+              'Base64-encoded message. (default: $)'))
+    parser.add_argument(
+        '-s',
+        '--show_errors',
+        action='store_true',
+        help=('Show error messages instead of conversion specifiers when '
+              'arguments cannot be decoded.'))
+
+    return parser.parse_args()
+
+
+def _detokenize_serial(databases: Iterable, device: serial.Serial,
+                       baudrate: int, show_errors: bool, output: BinaryIO,
+                       prefix: str) -> None:
+    if output is sys.stdout:
+        output = sys.stdout.buffer
+
+    detokenizer = detokenize.Detokenizer(tokens.Database.merged(*databases),
+                                         show_errors=show_errors)
+    serial_device = serial.Serial(port=device, baudrate=baudrate)
+
+    try:
+        detokenize.detokenize_base64_live(detokenizer, serial_device, output,
+                                          prefix)
+    except KeyboardInterrupt:
+        output.flush()
+
+
+def main():
+    _detokenize_serial(**vars(_parse_args()))
+    return 0
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/pw_tokenizer/py/pw_tokenizer/tokens.py b/pw_tokenizer/py/pw_tokenizer/tokens.py
index 4a416bf..a45f4b0 100644
--- a/pw_tokenizer/py/pw_tokenizer/tokens.py
+++ b/pw_tokenizer/py/pw_tokenizer/tokens.py
@@ -15,6 +15,7 @@
 
 import collections
 import csv
+from dataclasses import dataclass
 from datetime import datetime
 import io
 import logging
@@ -22,11 +23,19 @@
 import re
 import struct
 from typing import BinaryIO, Callable, Dict, Iterable, List, NamedTuple
-from typing import Optional, Tuple, Union, ValuesView
+from typing import Optional, Pattern, Tuple, Union, ValuesView
 
 DATE_FORMAT = '%Y-%m-%d'
+DEFAULT_DOMAIN = ''
 
-DEFAULT_HASH_LENGTH = 96
+# The default hash length to use. This value only applies when hashing strings
+# from a legacy-style ELF with plain strings. New tokenized string entries
+# include the token alongside the string.
+#
+# This MUST match the default value of PW_TOKENIZER_CFG_C_HASH_LENGTH in
+# pw_tokenizer/public/pw_tokenizer/config.h.
+DEFAULT_C_HASH_LENGTH = 128
+
 TOKENIZER_HASH_CONSTANT = 65599
 
 _LOG = logging.getLogger('pw_tokenizer')
@@ -38,7 +47,11 @@
 
 def pw_tokenizer_65599_fixed_length_hash(string: Union[str, bytes],
                                          hash_length: int) -> int:
-    """Hashes the provided string."""
+    """Hashes the provided string.
+
+    This hash function is only used when adding tokens from legacy-style
+    tokenized strings in an ELF, which do not include the token.
+    """
     hash_value = len(string)
     coefficient = TOKENIZER_HASH_CONSTANT
 
@@ -50,25 +63,26 @@
 
 
 def default_hash(string: Union[str, bytes]) -> int:
-    return pw_tokenizer_65599_fixed_length_hash(string, DEFAULT_HASH_LENGTH)
+    return pw_tokenizer_65599_fixed_length_hash(string, DEFAULT_C_HASH_LENGTH)
 
 
-_EntryKey = Tuple[int, str]  # Key for uniquely referring to an entry
+class _EntryKey(NamedTuple):
+    """Uniquely refers to an entry."""
+    token: int
+    string: str
 
 
+@dataclass(eq=True, order=False)
 class TokenizedStringEntry:
     """A tokenized string with its metadata."""
-    def __init__(self,
-                 token: int,
-                 string: str,
-                 date_removed: Optional[datetime] = None):
-        self.token = token
-        self.string = string
-        self.date_removed = date_removed
+    token: int
+    string: str
+    domain: str = DEFAULT_DOMAIN
+    date_removed: Optional[datetime] = None
 
     def key(self) -> _EntryKey:
         """The key determines uniqueness for a tokenized string."""
-        return self.token, self.string
+        return _EntryKey(self.token, self.string)
 
     def update_date_removed(self,
                             new_date_removed: Optional[datetime]) -> None:
@@ -96,22 +110,16 @@
     def __str__(self) -> str:
         return self.string
 
-    def __repr__(self) -> str:
-        return '{}({!r})'.format(type(self).__name__, self.string)
-
 
 class Database:
     """Database of tokenized strings stored as TokenizedStringEntry objects."""
-    def __init__(self,
-                 entries: Iterable[TokenizedStringEntry] = (),
-                 tokenize: Callable[[str], int] = default_hash):
+    def __init__(self, entries: Iterable[TokenizedStringEntry] = ()):
         """Creates a token database."""
         # The database dict stores each unique (token, string) entry.
         self._database: Dict[_EntryKey, TokenizedStringEntry] = {
             entry.key(): entry
             for entry in entries
         }
-        self.tokenize = tokenize
 
         # This is a cache for fast token lookup that is built as needed.
         self._cache: Optional[Dict[int, List[TokenizedStringEntry]]] = None
@@ -120,10 +128,11 @@
     def from_strings(
             cls,
             strings: Iterable[str],
+            domain: str = DEFAULT_DOMAIN,
             tokenize: Callable[[str], int] = default_hash) -> 'Database':
         """Creates a Database from an iterable of strings."""
-        return cls((TokenizedStringEntry(tokenize(string), string)
-                    for string in strings), tokenize)
+        return cls((TokenizedStringEntry(tokenize(string), string, domain)
+                    for string in strings))
 
     @classmethod
     def merged(cls, *databases: 'Database') -> 'Database':
@@ -154,19 +163,19 @@
 
     def mark_removals(
             self,
-            all_strings: Iterable[str],
+            all_entries: Iterable[TokenizedStringEntry],
             removal_date: Optional[datetime] = None
     ) -> List[TokenizedStringEntry]:
-        """Marks strings missing from all_strings as having been removed.
+        """Marks entries missing from all_entries as having been removed.
 
-        The strings are assumed to represent the complete set of strings for the
-        database. Strings currently in the database not present in the provided
-        strings are marked with a removal date but remain in the database.
-        Strings in all_strings missing from the database are NOT added; call the
-        add function to add these strings.
+        The entries are assumed to represent the complete set of entries for the
+        database. Entries currently in the database not present in the provided
+        entries are marked with a removal date but remain in the database.
+        Entries in all_entries missing from the database are NOT added; call the
+        add function to add these.
 
         Args:
-          all_strings: the complete set of strings present in the database
+          all_entries: the complete set of strings present in the database
           removal_date: the datetime for removed entries; today by default
 
         Returns:
@@ -177,13 +186,12 @@
         if removal_date is None:
             removal_date = datetime.now()
 
-        all_strings = frozenset(all_strings)  # for faster lookup
+        all_keys = frozenset(entry.key() for entry in all_entries)
 
         removed = []
 
-        # Mark this entry as having been removed from the ELF.
         for entry in self._database.values():
-            if (entry.string not in all_strings
+            if (entry.key() not in all_keys
                     and (entry.date_removed is None
                          or removal_date < entry.date_removed)):
                 # Add a removal date, or update it to the oldest date.
@@ -192,20 +200,19 @@
 
         return removed
 
-    def add(self, strings: Iterable[str]) -> None:
-        """Adds new strings to the database."""
+    def add(self, entries: Iterable[TokenizedStringEntry]) -> None:
+        """Adds new entries and updates date_removed for existing entries."""
         self._cache = None
 
-        # Add new and update previously removed entries.
-        for string in strings:
-            key = self.tokenize(string), string
-
+        for new_entry in entries:
+            # Update an existing entry or create a new one.
             try:
-                entry = self._database[key]
-                if entry.date_removed:
-                    entry.date_removed = None
+                entry = self._database[new_entry.key()]
+                entry.domain = new_entry.domain
+                entry.date_removed = None
             except KeyError:
-                self._database[key] = TokenizedStringEntry(key[0], string)
+                self._database[new_entry.key()] = TokenizedStringEntry(
+                    new_entry.token, new_entry.string, new_entry.domain)
 
     def purge(
         self,
@@ -240,13 +247,19 @@
                 else:
                     self._database[key] = entry
 
-    def filter(self, include: Iterable = (), exclude: Iterable = ()) -> None:
+    def filter(
+        self,
+        include: Iterable[Union[str, Pattern[str]]] = (),
+        exclude: Iterable[Union[str, Pattern[str]]] = (),
+        replace: Iterable[Tuple[Union[str, Pattern[str]], str]] = ()
+    ) -> None:
         """Filters the database using regular expressions (strings or compiled).
 
-    Args:
-      include: iterable of regexes; only entries matching at least one are kept
-      exclude: iterable of regexes; entries matching any of these are removed
-    """
+        Args:
+          include: regexes; only entries matching at least one are kept
+          exclude: regexes; entries matching any of these are removed
+          replace: (regex, str) tuples; replaces matching terms in all entries
+        """
         self._cache = None
 
         to_delete: List[_EntryKey] = []
@@ -259,13 +272,18 @@
 
         if exclude:
             exclude_re = [re.compile(pattern) for pattern in exclude]
-            to_delete.extend(key for key, val in self._database.items()  #
-                             if any(
-                                 rgx.search(val.string) for rgx in exclude_re))
+            to_delete.extend(key for key, val in self._database.items() if any(
+                rgx.search(val.string) for rgx in exclude_re))
 
         for key in to_delete:
             del self._database[key]
 
+        for search, replacement in replace:
+            search = re.compile(search)
+
+            for value in self._database.values():
+                value.string = search.sub(replacement, value.string)
+
     def __len__(self) -> int:
         """Returns the number of entries in the database."""
         return len(self.entries())
@@ -287,7 +305,8 @@
             date = (datetime.strptime(date_str, DATE_FORMAT)
                     if date_str.strip() else None)
 
-            yield TokenizedStringEntry(token, string_literal, date)
+            yield TokenizedStringEntry(token, string_literal, DEFAULT_DOMAIN,
+                                       date)
         except (ValueError, UnicodeDecodeError) as err:
             _LOG.error('Failed to parse tokenized string entry %s: %s', line,
                        err)
@@ -315,6 +334,10 @@
 BINARY_FORMAT = _BinaryFileFormat()
 
 
+class DatabaseFormatError(Exception):
+    """Failed to parse a token database file."""
+
+
 def file_is_binary_database(fd: BinaryIO) -> bool:
     """True if the file starts with the binary token database magic string."""
     try:
@@ -326,15 +349,37 @@
         return False
 
 
+def _check_that_file_is_csv_database(path: Path) -> None:
+    """Raises an error unless the path appears to be a CSV token database."""
+    try:
+        with path.open('rb') as fd:
+            data = fd.read(8)  # Read 8 bytes, which should be the first token.
+
+        if not data:
+            return  # File is empty, which is valid CSV.
+
+        if len(data) != 8:
+            raise DatabaseFormatError(
+                f'Attempted to read {path} as a CSV token database, but the '
+                f'file is too short ({len(data)} B)')
+
+        # Make sure the first 8 chars are a valid hexadecimal number.
+        _ = int(data.decode(), 16)
+    except (IOError, UnicodeDecodeError, ValueError) as err:
+        raise DatabaseFormatError(
+            f'Encountered error while reading {path} as a CSV token database'
+        ) from err
+
+
 def parse_binary(fd: BinaryIO) -> Iterable[TokenizedStringEntry]:
     """Parses TokenizedStringEntries from a binary token database file."""
     magic, entry_count = BINARY_FORMAT.header.unpack(
         fd.read(BINARY_FORMAT.header.size))
 
     if magic != BINARY_FORMAT.magic:
-        raise ValueError(
-            'Magic number mismatch (found {!r}, expected {!r})'.format(
-                magic, BINARY_FORMAT.magic))
+        raise DatabaseFormatError(
+            f'Binary token database magic number mismatch (found {magic!r}, '
+            f'expected {BINARY_FORMAT.magic!r}) while reading from {fd}')
 
     entries = []
 
@@ -360,7 +405,7 @@
     offset = 0
     for token, removed in entries:
         string, offset = read_string(offset)
-        yield TokenizedStringEntry(token, string, removed)
+        yield TokenizedStringEntry(token, string, DEFAULT_DOMAIN, removed)
 
 
 def write_binary(database: Database, fd: BinaryIO) -> None:
@@ -397,9 +442,9 @@
 class DatabaseFile(Database):
     """A token database that is associated with a particular file.
 
-  This class adds the write_to_file() method that writes to file from which it
-  was created in the correct format (CSV or binary).
-  """
+    This class adds the write_to_file() method that writes to file from which it
+    was created in the correct format (CSV or binary).
+    """
     def __init__(self, path: Union[Path, str]):
         self.path = Path(path)
 
@@ -411,6 +456,7 @@
                 return
 
         # Read the path as a CSV file.
+        _check_that_file_is_csv_database(self.path)
         with self.path.open('r', newline='') as file:
             super().__init__(parse_csv(file))
             self._export = write_csv
diff --git a/pw_tokenizer/py/setup.py b/pw_tokenizer/py/setup.py
index 3b3dec2..04680f1 100644
--- a/pw_tokenizer/py/setup.py
+++ b/pw_tokenizer/py/setup.py
@@ -13,7 +13,7 @@
 # the License.
 """The pw_tokenizer package."""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='pw_tokenizer',
@@ -22,5 +22,8 @@
     author_email='pigweed-developers@googlegroups.com',
     description='Tools for decoding tokenized strings',
     packages=setuptools.find_packages(),
+    package_data={'pw_tokenizer': ['py.typed']},
+    zip_safe=False,
     test_suite='setup.test_suite',
+    extra_requires=['serial'],
 )
diff --git a/pw_tokenizer/py/tokens_test.py b/pw_tokenizer/py/tokens_test.py
index c80a9f7..8c71a3b 100755
--- a/pw_tokenizer/py/tokens_test.py
+++ b/pw_tokenizer/py/tokens_test.py
@@ -17,10 +17,13 @@
 import datetime
 import io
 import logging
+from pathlib import Path
+import tempfile
+from typing import Iterator
 import unittest
 
 from pw_tokenizer import tokens
-from pw_tokenizer.tokens import _LOG
+from pw_tokenizer.tokens import default_hash, _LOG
 
 CSV_DATABASE = '''\
 00000000,2019-06-10,""
@@ -87,11 +90,16 @@
 """
 
 
-def read_db_from_csv(csv_str):
+def read_db_from_csv(csv_str: str) -> tokens.Database:
     with io.StringIO(csv_str) as csv_db:
         return tokens.Database(tokens.parse_csv(csv_db))
 
 
+def _entries(*strings: str) -> Iterator[tokens.TokenizedStringEntry]:
+    for string in strings:
+        yield tokens.TokenizedStringEntry(default_hash(string), string)
+
+
 class TokenDatabaseTest(unittest.TestCase):
     """Tests the token database class."""
     def test_csv(self):
@@ -192,8 +200,10 @@
         # Test basic merging into an empty database.
         db.merge(
             tokens.Database([
-                tokens.TokenizedStringEntry(1, 'one', datetime.datetime.min),
-                tokens.TokenizedStringEntry(2, 'two', datetime.datetime.min),
+                tokens.TokenizedStringEntry(
+                    1, 'one', date_removed=datetime.datetime.min),
+                tokens.TokenizedStringEntry(
+                    2, 'two', date_removed=datetime.datetime.min),
             ]))
         self.assertEqual({str(e) for e in db.entries()}, {'one', 'two'})
         self.assertEqual(db.token_to_entries[1][0].date_removed,
@@ -205,7 +215,8 @@
         db.merge(
             tokens.Database([
                 tokens.TokenizedStringEntry(3, 'three'),
-                tokens.TokenizedStringEntry(4, 'four', datetime.datetime.min),
+                tokens.TokenizedStringEntry(
+                    4, 'four', date_removed=datetime.datetime.min),
             ]))
         self.assertEqual({str(e)
                           for e in db.entries()},
@@ -228,8 +239,10 @@
         # Merge in repeated entries different removal dates.
         db.merge(
             tokens.Database([
-                tokens.TokenizedStringEntry(4, 'four', datetime.datetime.max),
-                tokens.TokenizedStringEntry(5, 'five', datetime.datetime.max),
+                tokens.TokenizedStringEntry(
+                    4, 'four', date_removed=datetime.datetime.max),
+                tokens.TokenizedStringEntry(
+                    5, 'five', date_removed=datetime.datetime.max),
             ]))
         self.assertEqual(len(db.entries()), 5)
         self.assertEqual({str(e)
@@ -258,28 +271,41 @@
                           for e in db.entries()},
                          {'one', 'two', 'three', 'four', 'five'})
 
-    def test_merge_multiple(self):
+    def test_merge_multiple_datbases_in_one_call(self):
+        """Tests the merge and merged methods with multiple databases."""
         db = tokens.Database.merged(
-            tokens.Database(
-                [tokens.TokenizedStringEntry(1, 'one',
-                                             datetime.datetime.max)]),
-            tokens.Database(
-                [tokens.TokenizedStringEntry(2, 'two',
-                                             datetime.datetime.min)]),
-            tokens.Database(
-                [tokens.TokenizedStringEntry(1, 'one',
-                                             datetime.datetime.min)]))
+            tokens.Database([
+                tokens.TokenizedStringEntry(1,
+                                            'one',
+                                            date_removed=datetime.datetime.max)
+            ]),
+            tokens.Database([
+                tokens.TokenizedStringEntry(2,
+                                            'two',
+                                            date_removed=datetime.datetime.min)
+            ]),
+            tokens.Database([
+                tokens.TokenizedStringEntry(1,
+                                            'one',
+                                            date_removed=datetime.datetime.min)
+            ]))
         self.assertEqual({str(e) for e in db.entries()}, {'one', 'two'})
 
         db.merge(
             tokens.Database([
-                tokens.TokenizedStringEntry(4, 'four', datetime.datetime.max)
+                tokens.TokenizedStringEntry(4,
+                                            'four',
+                                            date_removed=datetime.datetime.max)
             ]),
-            tokens.Database(
-                [tokens.TokenizedStringEntry(2, 'two',
-                                             datetime.datetime.max)]),
             tokens.Database([
-                tokens.TokenizedStringEntry(3, 'three', datetime.datetime.min)
+                tokens.TokenizedStringEntry(2,
+                                            'two',
+                                            date_removed=datetime.datetime.max)
+            ]),
+            tokens.Database([
+                tokens.TokenizedStringEntry(3,
+                                            'three',
+                                            date_removed=datetime.datetime.min)
             ]))
         self.assertEqual({str(e)
                           for e in db.entries()},
@@ -293,12 +319,13 @@
         self.assertEqual(len(db.token_to_entries), 16)
 
         # Add two strings with the same hash.
-        db.add(['o000', '0Q1Q'])
+        db.add(_entries('o000', '0Q1Q'))
 
         self.assertEqual(len(db.entries()), 18)
         self.assertEqual(len(db.token_to_entries), 17)
 
     def test_mark_removals(self):
+        """Tests that date_removed field is set by mark_removals."""
         db = tokens.Database.from_strings(
             ['MILK', 'apples', 'oranges', 'CHEESE', 'pears'])
 
@@ -306,42 +333,44 @@
             all(entry.date_removed is None for entry in db.entries()))
         date_1 = datetime.datetime(1, 2, 3)
 
-        db.mark_removals(['apples', 'oranges', 'pears'], date_1)
+        db.mark_removals(_entries('apples', 'oranges', 'pears'), date_1)
 
         self.assertEqual(
-            db.token_to_entries[db.tokenize('MILK')][0].date_removed, date_1)
+            db.token_to_entries[default_hash('MILK')][0].date_removed, date_1)
         self.assertEqual(
-            db.token_to_entries[db.tokenize('CHEESE')][0].date_removed, date_1)
+            db.token_to_entries[default_hash('CHEESE')][0].date_removed,
+            date_1)
 
         now = datetime.datetime.now()
-        db.mark_removals(['MILK', 'CHEESE', 'pears'])
+        db.mark_removals(_entries('MILK', 'CHEESE', 'pears'))
 
         # New strings are not added or re-added in mark_removed().
         self.assertGreaterEqual(
-            db.token_to_entries[db.tokenize('MILK')][0].date_removed, date_1)
+            db.token_to_entries[default_hash('MILK')][0].date_removed, date_1)
         self.assertGreaterEqual(
-            db.token_to_entries[db.tokenize('CHEESE')][0].date_removed, date_1)
+            db.token_to_entries[default_hash('CHEESE')][0].date_removed,
+            date_1)
 
         # These strings were removed.
         self.assertGreaterEqual(
-            db.token_to_entries[db.tokenize('apples')][0].date_removed, now)
+            db.token_to_entries[default_hash('apples')][0].date_removed, now)
         self.assertGreaterEqual(
-            db.token_to_entries[db.tokenize('oranges')][0].date_removed, now)
+            db.token_to_entries[default_hash('oranges')][0].date_removed, now)
         self.assertIsNone(
-            db.token_to_entries[db.tokenize('pears')][0].date_removed)
+            db.token_to_entries[default_hash('pears')][0].date_removed)
 
     def test_add(self):
         db = tokens.Database()
-        db.add(['MILK', 'apples'])
+        db.add(_entries('MILK', 'apples'))
         self.assertEqual({e.string for e in db.entries()}, {'MILK', 'apples'})
 
-        db.add(['oranges', 'CHEESE', 'pears'])
+        db.add(_entries('oranges', 'CHEESE', 'pears'))
         self.assertEqual(len(db.entries()), 5)
 
-        db.add(['MILK', 'apples', 'only this one is new'])
+        db.add(_entries('MILK', 'apples', 'only this one is new'))
         self.assertEqual(len(db.entries()), 6)
 
-        db.add(['MILK'])
+        db.add(_entries('MILK'))
         self.assertEqual({e.string
                           for e in db.entries()}, {
                               'MILK', 'apples', 'oranges', 'CHEESE', 'pears',
@@ -364,11 +393,50 @@
         self.assertEqual(str(db), CSV_DATABASE)
 
 
+class TestDatabaseFile(unittest.TestCase):
+    """Tests the DatabaseFile class."""
+    def setUp(self):
+        file = tempfile.NamedTemporaryFile(delete=False)
+        file.close()
+        self._path = Path(file.name)
+
+    def tearDown(self):
+        self._path.unlink()
+
+    def test_update_csv_file(self):
+        self._path.write_text(CSV_DATABASE)
+        db = tokens.DatabaseFile(self._path)
+        self.assertEqual(str(db), CSV_DATABASE)
+
+        db.add([tokens.TokenizedStringEntry(0xffffffff, 'New entry!')])
+
+        db.write_to_file()
+
+        self.assertEqual(self._path.read_text(),
+                         CSV_DATABASE + 'ffffffff,          ,"New entry!"\n')
+
+    def test_csv_file_too_short_raises_exception(self):
+        self._path.write_text('1234')
+
+        with self.assertRaises(tokens.DatabaseFormatError):
+            tokens.DatabaseFile(self._path)
+
+    def test_csv_invalid_format_raises_exception(self):
+        self._path.write_text('MK34567890')
+
+        with self.assertRaises(tokens.DatabaseFormatError):
+            tokens.DatabaseFile(self._path)
+
+    def test_csv_not_utf8(self):
+        self._path.write_bytes(b'\x80' * 20)
+
+        with self.assertRaises(tokens.DatabaseFormatError):
+            tokens.DatabaseFile(self._path)
+
+
 class TestFilter(unittest.TestCase):
     """Tests the filtering functionality."""
     def setUp(self):
-        super().setUp()
-
         self.db = tokens.Database([
             tokens.TokenizedStringEntry(1, 'Luke'),
             tokens.TokenizedStringEntry(2, 'Leia'),
diff --git a/pw_tokenizer/simple_tokenize_test.cc b/pw_tokenizer/simple_tokenize_test.cc
index ac00c91..4112cef 100644
--- a/pw_tokenizer/simple_tokenize_test.cc
+++ b/pw_tokenizer/simple_tokenize_test.cc
@@ -36,7 +36,7 @@
   uint32_t coefficient = k65599HashConstant;
 
   size_t length =
-      std::min(static_cast<size_t>(PW_TOKENIZER_CFG_HASH_LENGTH), kSize - 1);
+      std::min(static_cast<size_t>(PW_TOKENIZER_CFG_C_HASH_LENGTH), kSize - 1);
 
   // Hash all of the characters in the string as unsigned ints.
   // The coefficient calculation is done modulo 0x100000000, so the unsigned
@@ -50,7 +50,7 @@
 }
 
 TEST(TokenizeStringLiteral, EmptyString_IsZero) {
-  constexpr pw_TokenizerStringToken token = PW_TOKENIZE_STRING("");
+  constexpr pw_tokenizer_Token token = PW_TOKENIZE_STRING("");
   EXPECT_TRUE(0u == token);
 }
 
@@ -139,15 +139,15 @@
   EXPECT_TRUE(std::memcmp(expected.data(), message_, expected.size()) == 0);
 }
 
-extern "C" void pw_TokenizerHandleEncodedMessage(const uint8_t* encoded_message,
-                                                 size_t size_bytes) {
+extern "C" void pw_tokenizer_HandleEncodedMessage(
+    const uint8_t* encoded_message, size_t size_bytes) {
   TokenizeToGlobalHandler::SetMessage(encoded_message, size_bytes);
 }
 
 class TokenizeToGlobalHandlerWithPayload
     : public GlobalMessage<TokenizeToGlobalHandlerWithPayload> {
  public:
-  static void SetPayload(pw_TokenizerPayload payload) {
+  static void SetPayload(pw_tokenizer_Payload payload) {
     payload_ = static_cast<intptr_t>(payload);
   }
 
@@ -166,13 +166,18 @@
       ExpectedData<0, 0, 0x00, 0x00, 0x00, 0x80, 0>("%x%lld%1.2f%s");
 
   PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD(
-      static_cast<pw_TokenizerPayload>(123), "%x%lld%1.2f%s", 0, 0ll, -0.0, "");
+      static_cast<pw_tokenizer_Payload>(123),
+      "%x%lld%1.2f%s",
+      0,
+      0ll,
+      -0.0,
+      "");
   ASSERT_TRUE(expected.size() == message_size_bytes_);
   EXPECT_TRUE(std::memcmp(expected.data(), message_, expected.size()) == 0);
   EXPECT_TRUE(payload_ == 123);
 
   PW_TOKENIZE_TO_GLOBAL_HANDLER_WITH_PAYLOAD(
-      static_cast<pw_TokenizerPayload>(-543),
+      static_cast<pw_tokenizer_Payload>(-543),
       "%x%lld%1.2f%s",
       0,
       0ll,
@@ -183,8 +188,8 @@
   EXPECT_TRUE(payload_ == -543);
 }
 
-extern "C" void pw_TokenizerHandleEncodedMessageWithPayload(
-    pw_TokenizerPayload payload,
+extern "C" void pw_tokenizer_HandleEncodedMessageWithPayload(
+    pw_tokenizer_Payload payload,
     const uint8_t* encoded_message,
     size_t size_bytes) {
   TokenizeToGlobalHandlerWithPayload::SetMessage(encoded_message, size_bytes);
diff --git a/pw_tokenizer/tokenize.cc b/pw_tokenizer/tokenize.cc
index c7e3699..16703bb 100644
--- a/pw_tokenizer/tokenize.cc
+++ b/pw_tokenizer/tokenize.cc
@@ -20,17 +20,54 @@
 
 #include <cstring>
 
-#include "pw_polyfill/language_features.h"  // static_assert
 #include "pw_tokenizer_private/encode_args.h"
 
 namespace pw {
 namespace tokenizer {
+namespace {
 
-extern "C" void _pw_TokenizeToBuffer(void* buffer,
-                                     size_t* buffer_size_bytes,
-                                     pw_TokenizerStringToken token,
-                                     pw_TokenizerArgTypes types,
-                                     ...) {
+// Store metadata about this compilation's string tokenization in the ELF.
+//
+// The tokenizer metadata will not go into the on-device executable binary code.
+// This metadata will be present in the ELF file's .pw_tokenizer.info section,
+// from which the host-side tooling (Python, Java, etc.) can understand how to
+// decode tokenized strings for the given binary. Only attributes that affect
+// the decoding process are recorded.
+//
+// Tokenizer metadata is stored in an array of key-value pairs. Each Metadata
+// object is 32 bytes: a 24-byte string and an 8-byte value. Metadata structs
+// may be parsed in Python with the struct format '24s<Q'.
+PW_PACKED(struct) Metadata {
+  char name[24];   // name of the metadata field
+  uint64_t value;  // value of the field
+};
+
+static_assert(sizeof(Metadata) == 32);
+
+// Store tokenization metadata in its own section. Mach-O files are not
+// supported by pw_tokenizer, but a short, Mach-O compatible section name is
+// used on macOS so that this file can at least compile.
+#ifdef __APPLE__
+#define PW_TOKENIZER_INFO_SECTION PW_KEEP_IN_SECTION(".pw_tokenizer")
+#else
+#define PW_TOKENIZER_INFO_SECTION PW_KEEP_IN_SECTION(".pw_tokenizer.info")
+#endif  // __APPLE__
+
+constexpr Metadata metadata[] PW_TOKENIZER_INFO_SECTION = {
+    {"hash_length_bytes", PW_TOKENIZER_CFG_C_HASH_LENGTH},
+    {"sizeof_long", sizeof(long)},            // %l conversion specifier
+    {"sizeof_intmax_t", sizeof(intmax_t)},    // %j conversion specifier
+    {"sizeof_size_t", sizeof(size_t)},        // %z conversion specifier
+    {"sizeof_ptrdiff_t", sizeof(ptrdiff_t)},  // %t conversion specifier
+};
+
+}  // namespace
+
+extern "C" void _pw_tokenizer_ToBuffer(void* buffer,
+                                       size_t* buffer_size_bytes,
+                                       Token token,
+                                       _pw_tokenizer_ArgTypes types,
+                                       ...) {
   if (*buffer_size_bytes < sizeof(token)) {
     *buffer_size_bytes = 0;
     return;
@@ -50,10 +87,10 @@
   *buffer_size_bytes = sizeof(token) + encoded_bytes;
 }
 
-extern "C" void _pw_TokenizeToCallback(
+extern "C" void _pw_tokenizer_ToCallback(
     void (*callback)(const uint8_t* encoded_message, size_t size_bytes),
-    pw_TokenizerStringToken token,
-    pw_TokenizerArgTypes types,
+    Token token,
+    _pw_tokenizer_ArgTypes types,
     ...) {
   EncodedMessage encoded;
   encoded.token = token;
diff --git a/pw_tokenizer/tokenize_test.cc b/pw_tokenizer/tokenize_test.cc
index 18987fb..01c6bc5 100644
--- a/pw_tokenizer/tokenize_test.cc
+++ b/pw_tokenizer/tokenize_test.cc
@@ -20,29 +20,17 @@
 #include <iterator>
 
 #include "gtest/gtest.h"
-#include "pw_tokenizer/pw_tokenizer_65599_fixed_length_hash.h"
+#include "pw_tokenizer/hash.h"
 #include "pw_tokenizer_private/tokenize_test.h"
 #include "pw_varint/varint.h"
 
 namespace pw::tokenizer {
 namespace {
 
-// The hash to use for this test. This makes sure the strings are shorter than
-// the configured max length to ensure this test works with any reasonable
-// configuration.
-template <size_t kSize>
-constexpr uint32_t TestHash(const char (&string)[kSize]) {
-  constexpr unsigned kTestHashLength = 48;
-  static_assert(kTestHashLength <= PW_TOKENIZER_CFG_HASH_LENGTH);
-  static_assert(kSize <= kTestHashLength + 1);
-  return PwTokenizer65599FixedLengthHash(std::string_view(string, kSize - 1),
-                                         kTestHashLength);
-}
-
 // Constructs an array with the hashed string followed by the provided bytes.
 template <uint8_t... kData, size_t kSize>
 constexpr auto ExpectedData(const char (&format)[kSize]) {
-  const uint32_t value = TestHash(format);
+  const uint32_t value = Hash(format);
   return std::array<uint8_t, sizeof(uint32_t) + sizeof...(kData)>{
       static_cast<uint8_t>(value & 0xff),
       static_cast<uint8_t>(value >> 8 & 0xff),
@@ -51,20 +39,92 @@
       kData...};
 }
 
-TEST(TokenizeStringLiteral, EmptyString_IsZero) {
-  constexpr pw_TokenizerStringToken token = PW_TOKENIZE_STRING("");
+TEST(TokenizeString, EmptyString_IsZero) {
+  constexpr pw_tokenizer_Token token = PW_TOKENIZE_STRING("");
   EXPECT_EQ(0u, token);
 }
 
-TEST(TokenizeStringLiteral, String_MatchesHash) {
+TEST(TokenizeString, String_MatchesHash) {
   constexpr uint32_t token = PW_TOKENIZE_STRING("[:-)");
-  EXPECT_EQ(TestHash("[:-)"), token);
+  EXPECT_EQ(Hash("[:-)"), token);
 }
 
 constexpr uint32_t kGlobalToken = PW_TOKENIZE_STRING(">:-[]");
 
-TEST(TokenizeStringLiteral, GlobalVariable_MatchesHash) {
-  EXPECT_EQ(TestHash(">:-[]"), kGlobalToken);
+TEST(TokenizeString, GlobalVariable_MatchesHash) {
+  EXPECT_EQ(Hash(">:-[]"), kGlobalToken);
+}
+
+struct TokenizedWithinClass {
+  static constexpr uint32_t kThisToken = PW_TOKENIZE_STRING("???");
+};
+
+static_assert(Hash("???") == TokenizedWithinClass::kThisToken);
+
+TEST(TokenizeString, ClassMember_MatchesHash) {
+  EXPECT_EQ(Hash("???"), TokenizedWithinClass().kThisToken);
+}
+
+// Use a function with a shorter name to test tokenizing __func__ and
+// __PRETTY_FUNCTION__.
+//
+// WARNING: This function might cause errors for compilers other than GCC and
+// clang. It relies on two GCC/clang extensions:
+//
+//   1 - The __PRETTY_FUNCTION__ C++ function name variable.
+//   2 - __func__ as a static constexpr array instead of static const. See
+//       https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66639 for background.
+//
+void TestName() {
+  constexpr uint32_t function_hash = PW_TOKENIZE_STRING(__func__);
+  EXPECT_EQ(pw::tokenizer::Hash(__func__), function_hash);
+
+  // Check the non-standard __PRETTY_FUNCTION__ name.
+  constexpr uint32_t pretty_function = PW_TOKENIZE_STRING(__PRETTY_FUNCTION__);
+  EXPECT_EQ(pw::tokenizer::Hash(__PRETTY_FUNCTION__), pretty_function);
+}
+
+TEST(TokenizeString, FunctionName) { TestName(); }
+
+TEST(TokenizeString, Array) {
+  constexpr char array[] = "won-won-won-wonderful";
+
+  const uint32_t array_hash = PW_TOKENIZE_STRING(array);
+  EXPECT_EQ(Hash(array), array_hash);
+}
+
+TEST(TokenizeString, NullInString) {
+  // Use PW_TOKENIZER_STRING_TOKEN to avoid emitting strings with NUL into the
+  // ELF file. The CSV database format does not support NUL.
+  constexpr char nulls[32] = {};
+  static_assert(Hash(nulls) == PW_TOKENIZER_STRING_TOKEN(nulls));
+  static_assert(PW_TOKENIZER_STRING_TOKEN(nulls) != 0u);
+
+  static_assert(PW_TOKENIZER_STRING_TOKEN("\0") == Hash("\0"));
+  static_assert(PW_TOKENIZER_STRING_TOKEN("\0") != Hash(""));
+
+  static_assert(PW_TOKENIZER_STRING_TOKEN("abc\0def") == Hash("abc\0def"));
+
+  static_assert(Hash("abc\0def") != Hash("abc\0def\0"));
+}
+
+// Verify that we can tokenize multiple strings from one source line.
+#define THREE_FOR_ONE(first, second, third)             \
+  [[maybe_unused]] constexpr uint32_t token_1 =         \
+      PW_TOKENIZE_STRING_DOMAIN("TEST_DOMAIN", first);  \
+  [[maybe_unused]] constexpr uint32_t token_2 =         \
+      PW_TOKENIZE_STRING_DOMAIN("TEST_DOMAIN", second); \
+  [[maybe_unused]] constexpr uint32_t token_3 =         \
+      PW_TOKENIZE_STRING_DOMAIN("TEST_DOMAIN", third);
+
+TEST(TokenizeString, MultipleTokenizationsInOneMacroExpansion) {
+  // This verifies that we can safely tokenize multiple times in a single macro
+  // expansion. This can be useful when for example a name and description are
+  // both tokenized after being passed into a macro.
+  //
+  // This test only verifies that this compiles correctly; it does not test
+  // that the tokenizations make it to the final token database.
+  THREE_FOR_ONE("hello", "yes", "something");
 }
 
 class TokenizeToBuffer : public ::testing::Test {
@@ -221,6 +281,16 @@
   EXPECT_EQ(std::memcmp(empty.data(), buffer_, empty.size()), 0);
 }
 
+TEST_F(TokenizeToBuffer, Array) {
+  static constexpr char array[] = "1234";
+  size_t message_size = 4;
+  PW_TOKENIZE_TO_BUFFER(buffer_, &message_size, array);
+
+  constexpr std::array<uint8_t, 4> result = ExpectedData<>("1234");
+  ASSERT_EQ(result.size(), message_size);
+  EXPECT_EQ(std::memcmp(result.data(), buffer_, result.size()), 0);
+}
+
 TEST_F(TokenizeToBuffer, NullptrString_EncodesNull) {
   char* string = nullptr;
   size_t message_size = 9;
@@ -297,7 +367,7 @@
 
 TEST_F(TokenizeToBuffer, C_StringShortFloat) {
   size_t size = sizeof(buffer_);
-  pw_TokenizeToBufferTest_StringShortFloat(buffer_, &size);
+  pw_tokenizer_ToBufferTest_StringShortFloat(buffer_, &size);
   constexpr std::array<uint8_t, 11> expected =  // clang-format off
       ExpectedData<1, '1',                 // string '1'
                    3,                      // -2 (zig-zag encoded)
@@ -309,7 +379,7 @@
 
 TEST_F(TokenizeToBuffer, C_SequentialZigZag) {
   size_t size = sizeof(buffer_);
-  pw_TokenizeToBufferTest_SequentialZigZag(buffer_, &size);
+  pw_tokenizer_ToBufferTest_SequentialZigZag(buffer_, &size);
   constexpr std::array<uint8_t, 18> expected =
       ExpectedData<0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13>(
           TEST_FORMAT_SEQUENTIAL_ZIG_ZAG);
@@ -323,7 +393,7 @@
 
   {
     size_t size = 7;
-    pw_TokenizeToBufferTest_Requires8(buffer_, &size);
+    pw_tokenizer_ToBufferTest_Requires8(buffer_, &size);
     constexpr std::array<uint8_t, 7> expected =
         ExpectedData<2, 'h', 'i'>(TEST_FORMAT_REQUIRES_8);
     ASSERT_EQ(expected.size(), size);
@@ -333,7 +403,7 @@
 
   {
     size_t size = 8;
-    pw_TokenizeToBufferTest_Requires8(buffer_, &size);
+    pw_tokenizer_ToBufferTest_Requires8(buffer_, &size);
     constexpr std::array<uint8_t, 8> expected =
         ExpectedData<2, 'h', 'i', 13>(TEST_FORMAT_REQUIRES_8);
     ASSERT_EQ(expected.size(), size);
@@ -403,7 +473,7 @@
 }
 
 TEST_F(TokenizeToCallback, C_SequentialZigZag) {
-  pw_TokenizeToCallbackTest_SequentialZigZag(SetMessage);
+  pw_tokenizer_ToCallbackTest_SequentialZigZag(SetMessage);
 
   constexpr std::array<uint8_t, 18> expected =
       ExpectedData<0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13>(
@@ -412,11 +482,10 @@
   EXPECT_EQ(std::memcmp(expected.data(), message_, expected.size()), 0);
 }
 
-// Hijack the PW_TOKENIZE_STRING_DOMAIN macro to capture the domain name.
-#undef PW_TOKENIZE_STRING_DOMAIN
-#define PW_TOKENIZE_STRING_DOMAIN(domain, string)                 \
-  /* assigned to a variable */ PW_TOKENIZER_STRING_TOKEN(string); \
-  tokenizer_domain = domain;                                      \
+// Hijack an internal macro to capture the tokenizer domain.
+#undef _PW_TOKENIZER_RECORD_ORIGINAL_STRING
+#define _PW_TOKENIZER_RECORD_ORIGINAL_STRING(token, domain, string) \
+  tokenizer_domain = domain;                                        \
   string_literal = string
 
 TEST_F(TokenizeToBuffer, Domain_Default) {
diff --git a/pw_tokenizer/tokenize_test_c.c b/pw_tokenizer/tokenize_test_c.c
index f012cb8..bf8877a 100644
--- a/pw_tokenizer/tokenize_test_c.c
+++ b/pw_tokenizer/tokenize_test_c.c
@@ -22,8 +22,8 @@
 #error "This is a test of C code and must be compiled as C, not C++."
 #endif  // __cplusplus
 
-void pw_TokenizeToBufferTest_StringShortFloat(void* buffer,
-                                              size_t* buffer_size) {
+void pw_tokenizer_ToBufferTest_StringShortFloat(void* buffer,
+                                                size_t* buffer_size) {
   char str[] = "1";
   PW_TOKENIZE_TO_BUFFER(
       buffer, buffer_size, TEST_FORMAT_STRING_SHORT_FLOAT, str, (short)-2, 3.0);
@@ -32,8 +32,8 @@
 // This test invokes the tokenization API with a variety of types. To simplify
 // validating the encoded data, numbers that are sequential when zig-zag encoded
 // are used as arguments.
-void pw_TokenizeToBufferTest_SequentialZigZag(void* buffer,
-                                              size_t* buffer_size) {
+void pw_tokenizer_ToBufferTest_SequentialZigZag(void* buffer,
+                                                size_t* buffer_size) {
   PW_TOKENIZE_TO_BUFFER(buffer,
                         buffer_size,
                         TEST_FORMAT_SEQUENTIAL_ZIG_ZAG,
@@ -53,7 +53,7 @@
                         (signed char)-7);
 }
 
-void pw_TokenizeToCallbackTest_SequentialZigZag(
+void pw_tokenizer_ToCallbackTest_SequentialZigZag(
     void (*callback)(const uint8_t* buffer, size_t size)) {
   PW_TOKENIZE_TO_CALLBACK(callback,
                           TEST_FORMAT_SEQUENTIAL_ZIG_ZAG,
@@ -73,6 +73,6 @@
                           (signed char)-7);
 }
 
-void pw_TokenizeToBufferTest_Requires8(void* buffer, size_t* buffer_size) {
+void pw_tokenizer_ToBufferTest_Requires8(void* buffer, size_t* buffer_size) {
   PW_TOKENIZE_TO_BUFFER(buffer, buffer_size, TEST_FORMAT_REQUIRES_8, "hi", -7);
 }
diff --git a/pw_tokenizer/tokenize_test_fakes.cc b/pw_tokenizer/tokenize_test_fakes.cc
deleted file mode 100644
index 4dcc9aa..0000000
--- a/pw_tokenizer/tokenize_test_fakes.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2020 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-// This file provide stub implementations for the function projects are expected
-// to provide when PW_TOKENIZER_CFG_ENABLE_TOKENIZE_TO_GLOBAL_HANDLER is set.
-
-#include <cstddef>
-#include <cstdint>
-
-#include "pw_tokenizer/tokenize.h"
-
-#if PW_TOKENIZER_CFG_ENABLE_TOKENIZE_TO_GLOBAL_HANDLER
-
-PW_EXTERN_C void pw_TokenizerHandleEncodedMessage(
-    const uint8_t encoded_message[], size_t size_bytes) {
-  PW_UNUSED(encoded_message[0]);
-  PW_UNUSED(size_bytes);
-}
-
-PW_EXTERN_C void pw_TokenizerHandleEncodedMessageWithPayload(
-    pw_TokenizerPayload payload,
-    const uint8_t encoded_message[],
-    size_t size_bytes) {
-  PW_UNUSED(payload);
-  PW_UNUSED(encoded_message[0]);
-  PW_UNUSED(size_bytes);
-}
-
-#endif  // PW_TOKENIZER_CFG_ENABLE_TOKENIZE_TO_GLOBAL_HANDLER
diff --git a/pw_tokenizer/tokenize_to_global_handler.cc b/pw_tokenizer/tokenize_to_global_handler.cc
index ecca3b4..da36a49 100644
--- a/pw_tokenizer/tokenize_to_global_handler.cc
+++ b/pw_tokenizer/tokenize_to_global_handler.cc
@@ -19,9 +19,9 @@
 namespace pw {
 namespace tokenizer {
 
-extern "C" void _pw_TokenizeToGlobalHandler(pw_TokenizerStringToken token,
-                                            pw_TokenizerArgTypes types,
-                                            ...) {
+extern "C" void _pw_tokenizer_ToGlobalHandler(pw_tokenizer_Token token,
+                                              _pw_tokenizer_ArgTypes types,
+                                              ...) {
   EncodedMessage encoded;
   encoded.token = token;
 
@@ -30,8 +30,8 @@
   const size_t encoded_bytes = EncodeArgs(types, args, encoded.args);
   va_end(args);
 
-  pw_TokenizerHandleEncodedMessage(reinterpret_cast<const uint8_t*>(&encoded),
-                                   sizeof(encoded.token) + encoded_bytes);
+  pw_tokenizer_HandleEncodedMessage(reinterpret_cast<const uint8_t*>(&encoded),
+                                    sizeof(encoded.token) + encoded_bytes);
 }
 
 }  // namespace tokenizer
diff --git a/pw_tokenizer/tokenize_to_global_handler_with_payload.cc b/pw_tokenizer/tokenize_to_global_handler_with_payload.cc
index b04e549..56b6520 100644
--- a/pw_tokenizer/tokenize_to_global_handler_with_payload.cc
+++ b/pw_tokenizer/tokenize_to_global_handler_with_payload.cc
@@ -19,10 +19,10 @@
 namespace pw {
 namespace tokenizer {
 
-extern "C" void _pw_TokenizeToGlobalHandlerWithPayload(
-    const pw_TokenizerPayload payload,
-    pw_TokenizerStringToken token,
-    pw_TokenizerArgTypes types,
+extern "C" void _pw_tokenizer_ToGlobalHandlerWithPayload(
+    const pw_tokenizer_Payload payload,
+    pw_tokenizer_Token token,
+    _pw_tokenizer_ArgTypes types,
     ...) {
   EncodedMessage encoded;
   encoded.token = token;
@@ -32,7 +32,7 @@
   const size_t encoded_bytes = EncodeArgs(types, args, encoded.args);
   va_end(args);
 
-  pw_TokenizerHandleEncodedMessageWithPayload(
+  pw_tokenizer_HandleEncodedMessageWithPayload(
       payload,
       reinterpret_cast<const uint8_t*>(&encoded),
       sizeof(encoded.token) + encoded_bytes);
diff --git a/pw_toolchain/BUILD.gn b/pw_toolchain/BUILD.gn
index 314f335..0ac53a8 100644
--- a/pw_toolchain/BUILD.gn
+++ b/pw_toolchain/BUILD.gn
@@ -12,7 +12,6 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_docgen/docs.gni")
diff --git a/pw_toolchain/arm_gcc/BUILD.gn b/pw_toolchain/arm_gcc/BUILD.gn
index 1fafdf9..852f06a 100644
--- a/pw_toolchain/arm_gcc/BUILD.gn
+++ b/pw_toolchain/arm_gcc/BUILD.gn
@@ -12,7 +12,6 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 # Disable obnoxious ABI warning.
diff --git a/pw_toolchain/arm_gcc/toolchains.gni b/pw_toolchain/arm_gcc/toolchains.gni
index bf7dc25..68bcdc1 100644
--- a/pw_toolchain/arm_gcc/toolchains.gni
+++ b/pw_toolchain/arm_gcc/toolchains.gni
@@ -12,7 +12,6 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 # Specifies the tools used by ARM GCC toolchains.
diff --git a/pw_toolchain/docs.rst b/pw_toolchain/docs.rst
index 68706a5..07e27fb 100644
--- a/pw_toolchain/docs.rst
+++ b/pw_toolchain/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-toolchain:
-
-.. default-domain:: cpp
-
-.. highlight:: cpp
+.. _module-pw_toolchain:
 
 ------------
 pw_toolchain
diff --git a/pw_toolchain/dummy/BUILD.gn b/pw_toolchain/dummy/BUILD.gn
index 5d4cf74..aaf43d3 100644
--- a/pw_toolchain/dummy/BUILD.gn
+++ b/pw_toolchain/dummy/BUILD.gn
@@ -12,16 +12,19 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_toolchain/universal_tools.gni")
+
 # A dummy toolchain which is set as the default for Pigweed. This is never used;
 # the top-level BUILD.gn enumerates the toolchains for each build.
 toolchain("dummy") {
   tool("stamp") {
-    if (host_os == "win") {
-      command = "cmd /c type nul > \"{{output}}\""
-    } else {
-      command = "touch {{output}}"
-    }
-    description = "stamp {{output}}"
+    forward_variables_from(pw_universal_stamp, "*")
+  }
+
+  tool("copy") {
+    forward_variables_from(pw_universal_copy, "*")
   }
 
   # If the user tries to build a target with the default toolchain, run a script
diff --git a/pw_toolchain/generate_toolchain.gni b/pw_toolchain/generate_toolchain.gni
index 0f9185c..9a74cdf 100644
--- a/pw_toolchain/generate_toolchain.gni
+++ b/pw_toolchain/generate_toolchain.gni
@@ -12,13 +12,20 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_toolchain/universal_tools.gni")
+
 declare_args() {
   # Scope defining the current toolchain. Contains all of the arguments required
   # by the generate_toolchain template.
   pw_toolchain_SCOPE = {
   }
+
+  # Prefix for compilation commands (e.g. the path to a Goma or CCache compiler
+  # launcher). Example for ccache:
+  #   gn gen out --args='pw_command_launcher="ccache"'
+  pw_command_launcher = ""
 }
 
 # Creates a toolchain target.
@@ -38,15 +45,40 @@
 #   defaults: (required) A scope setting defaults to apply to GN
 #     targets in this toolchain, as described in pw_vars_default.gni
 #
+# The defaults scope should contain values for builtin GN arguments:
+#   current_cpu: The CPU of the toolchain.
+#     Well known values include "arm", "arm64", "x64", "x86", and "mips".
+#   current_os: The OS of the toolchain. Defaults to "".
+#     Well known values include "win", "mac", "linux", "android", and "ios".
+#
 template("generate_toolchain") {
+  assert(defined(invoker.defaults), "toolchain is missing 'defaults'")
+
   # In multi-toolchain builds from the top level, we run into issues where
   # toolchains defined with this template are re-generated each time. To avoid
   # collisions, the actual toolchain is only generated for the default (dummy)
   # toolchain, and an unused target is created otherwise.
   if (current_toolchain == default_toolchain) {
+    invoker_toolchain_args = invoker.defaults
+
+    # These values should always be set as they influence toolchain
+    # behavior, but allow them to be unset as a transitional measure.
+    if (!defined(invoker_toolchain_args.current_cpu)) {
+      invoker_toolchain_args.current_cpu = ""
+    }
+    if (!defined(invoker_toolchain_args.current_os)) {
+      invoker_toolchain_args.current_os = ""
+    }
+
+    # Determine OS of toolchain, which is the builtin argument "current_os".
+    toolchain_os = invoker_toolchain_args.current_os
+
     toolchain(target_name) {
       assert(defined(invoker.cc), "toolchain is missing 'cc'")
       tool("asm") {
+        if (pw_command_launcher != "") {
+          command_launcher = pw_command_launcher
+        }
         depfile = "{{output}}.d"
         command = string_join(" ",
                               [
@@ -70,6 +102,9 @@
       }
 
       tool("cc") {
+        if (pw_command_launcher != "") {
+          command_launcher = pw_command_launcher
+        }
         depfile = "{{output}}.d"
         command = string_join(" ",
                               [
@@ -91,6 +126,9 @@
 
       assert(defined(invoker.cxx), "toolchain is missing 'cxx'")
       tool("cxx") {
+        if (pw_command_launcher != "") {
+          command_launcher = pw_command_launcher
+        }
         depfile = "{{output}}.d"
         command = string_join(" ",
                               [
@@ -111,6 +149,9 @@
       }
 
       tool("objc") {
+        if (pw_command_launcher != "") {
+          command_launcher = pw_command_launcher
+        }
         depfile = "{{output}}.d"
         command =
             string_join(" ",
@@ -133,6 +174,9 @@
       }
 
       tool("objcxx") {
+        if (pw_command_launcher != "") {
+          command_launcher = pw_command_launcher
+        }
         depfile = "{{output}}.d"
         command =
             string_join(" ",
@@ -175,9 +219,7 @@
         "{{ldflags}}",
       ]
 
-      is_host_toolchain =
-          defined(invoker.is_host_toolchain) && invoker.is_host_toolchain
-      if (is_host_toolchain && host_os == "mac") {
+      if (toolchain_os == "mac" || toolchain_os == "ios") {
         _link_flags += [
           # Output a map file that shows symbols and their location.
           "-Wl,-map,$_link_mapfile",
@@ -189,19 +231,13 @@
         _link_flags += [
           # Output a map file that shows symbols and their location.
           "-Wl,-Map,$_link_mapfile",
-        ]
 
-        # TODO(hepler): Re-add gc-sections to host when supported by tokenizer.
-        if (!is_host_toolchain) {
-          _link_flags += [
-            # Delete unreferenced sections. Helpful with -ffunction-sections.
-            "-Wl,--gc-sections",
-          ]
-        }
+          # Delete unreferenced sections. Helpful with -ffunction-sections.
+          "-Wl,--gc-sections",
+        ]
       }
 
-      _link_group =
-          defined(invoker.link_group) && invoker.link_group && host_os != "mac"
+      _link_group = defined(invoker.link_group) && invoker.link_group
       if (_link_group) {
         _link_flags += [ "-Wl,--start-group" ]
       }
@@ -241,7 +277,7 @@
 
         if (defined(invoker.final_binary_extension)) {
           default_output_extension = invoker.final_binary_extension
-        } else if (is_host_toolchain && host_os == "win") {
+        } else if (toolchain_os == "win") {
           default_output_extension = ".exe"
         } else {
           default_output_extension = ""
@@ -257,40 +293,17 @@
       }
 
       tool("stamp") {
-        if (host_os == "win") {
-          command = "cmd /c type nul > \"{{output}}\""
-        } else {
-          command = "touch {{output}}"
-        }
-        description = "stamp {{output}}"
+        # GN-ism: GN gets mad if you directly forward the contents of
+        # pw_universal_stamp.
+        _stamp = pw_universal_stamp
+        forward_variables_from(_stamp, "*")
       }
 
       tool("copy") {
-        if (host_os != "win") {
-          # Use a hard link if possible as this is faster. Also, Mac doesn't
-          # preserve timestamps properly with cp -af.
-          fallback_command = string_join(" ",
-                                         [
-                                           "rm -rf",
-                                           "{{output}}",
-                                           "&&",
-                                           "cp -af",
-                                           "{{source}}",
-                                           "{{output}}",
-                                         ])
-          command = string_join(" ",
-                                [
-                                  "ln -f",
-                                  "{{source}}",
-                                  "{{output}}",
-                                  "2>/dev/null",
-                                  "||",
-                                  "($fallback_command)",
-                                ])
-        } else {
-          command = "cp -af {{source}} {{output}}"
-        }
-        description = "cp {{source}} {{output}}"
+        # GN-ism: GN gets mad if you directly forward the contents of
+        # pw_universal_copy.
+        _copy = pw_universal_copy
+        forward_variables_from(_copy, "*")
       }
 
       # Build arguments to be overridden when compiling cross-toolchain:
@@ -302,14 +315,13 @@
       #   pw_toolchain_SCOPE: A copy of the invoker scope that defines the
       #     toolchain. Used for generating derivative toolchains.
       #
-      assert(defined(invoker.defaults), "toolchain is missing 'defaults'")
       toolchain_args = {
         pw_toolchain_SCOPE = {
         }
         pw_toolchain_SCOPE = {
           forward_variables_from(invoker, "*")
         }
-        forward_variables_from(invoker.defaults, "*")
+        forward_variables_from(invoker_toolchain_args, "*")
       }
     }
   } else {
diff --git a/pw_toolchain/host_clang/BUILD.gn b/pw_toolchain/host_clang/BUILD.gn
index 529b6e1..4e4ddb7 100644
--- a/pw_toolchain/host_clang/BUILD.gn
+++ b/pw_toolchain/host_clang/BUILD.gn
@@ -12,7 +12,6 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 # See https://github.com/google/sanitizers
@@ -33,7 +32,7 @@
 
 # Locate XCode's sysroot for Clang.
 config("xcode_sysroot") {
-  if (host_os == "mac") {
+  if (current_os == "mac") {
     _xcode_sysroot = exec_script("$dir_pw_build/py/pw_build/exec.py",
                                  [
                                    "--",
@@ -53,7 +52,7 @@
 #
 # Pull the appropriate pathd from our Pigweed env setup.
 config("no_system_libcpp") {
-  if (host_os == "mac") {
+  if (current_os == "mac") {
     install_dir = getenv("PW_PIGWEED_CIPD_INSTALL_DIR")
     assert(install_dir != "",
            "You forgot to activate the Pigweed environment; " +
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_toolchain/host_clang/toolchain.cmake
similarity index 71%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_toolchain/host_clang/toolchain.cmake
index 3c3be32..2628ee6 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_toolchain/host_clang/toolchain.cmake
@@ -12,8 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
+pw_set_backend(pw_log pw_log_basic)
+pw_set_backend(pw_assert pw_assert_log)
+pw_set_backend(pw_sys_io pw_sys_io_stdio)
+
+set(CMAKE_C_COMPILER clang)
+set(CMAKE_CXX_COMPILER clang++)
diff --git a/pw_toolchain/host_clang/toolchains.gni b/pw_toolchain/host_clang/toolchains.gni
index 8976118..b4569ab 100644
--- a/pw_toolchain/host_clang/toolchains.gni
+++ b/pw_toolchain/host_clang/toolchains.gni
@@ -12,8 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
+
 declare_args() {
   # Sets the sanitizer to pass to clang. Valid values are those for "-fsanitize"
   # listed in https://clang.llvm.org/docs/UsersManual.html#id9.
@@ -50,7 +50,7 @@
     "$dir_pw_toolchain/host_clang:xcode_sysroot",
   ]
   if (pw_toolchain_SANITIZER != "") {
-    configs +=
+    default_configs +=
         [ "$dir_pw_toolchain/host_clang:sanitize_$pw_toolchain_SANITIZER" ]
   }
   if (pw_toolchain_OSS_FUZZ_ENABLED) {
diff --git a/pw_toolchain/host_gcc/BUILD.gn b/pw_toolchain/host_gcc/BUILD.gn
index 230f3fd..e0cdbf1 100644
--- a/pw_toolchain/host_gcc/BUILD.gn
+++ b/pw_toolchain/host_gcc/BUILD.gn
@@ -12,7 +12,6 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 # Disable obnoxious ABI warning.
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_toolchain/host_gcc/toolchain.cmake
similarity index 71%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_toolchain/host_gcc/toolchain.cmake
index 3c3be32..0826543 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_toolchain/host_gcc/toolchain.cmake
@@ -12,8 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
+pw_set_backend(pw_log pw_log_basic)
+pw_set_backend(pw_assert pw_assert_log)
+pw_set_backend(pw_sys_io pw_sys_io_stdio)
+
+set(CMAKE_C_COMPILER gcc)
+set(CMAKE_CXX_COMPILER g++)
diff --git a/pw_toolchain/host_gcc/toolchains.gni b/pw_toolchain/host_gcc/toolchains.gni
index 27042f0..66144bb 100644
--- a/pw_toolchain/host_gcc/toolchains.gni
+++ b/pw_toolchain/host_gcc/toolchains.gni
@@ -12,7 +12,6 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 # Specifies the tools used by host GCC toolchains.
diff --git a/pw_toolchain/universal_tools.gni b/pw_toolchain/universal_tools.gni
new file mode 100644
index 0000000..8fcc062
--- /dev/null
+++ b/pw_toolchain/universal_tools.gni
@@ -0,0 +1,50 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+pw_universal_copy = {
+  if (host_os == "win") {
+    command = "cp -af {{source}} {{output}}"
+  } else {
+    # Use a hard link if possible as this is faster. Also, Mac doesn't
+    # preserve timestamps properly with cp -af.
+    fallback_command = string_join(" ",
+                                   [
+                                     "rm -rf",
+                                     "{{output}}",
+                                     "&&",
+                                     "cp -af",
+                                     "{{source}}",
+                                     "{{output}}",
+                                   ])
+    command = string_join(" ",
+                          [
+                            "ln -f",
+                            "{{source}}",
+                            "{{output}}",
+                            "2>/dev/null",
+                            "||",
+                            "($fallback_command)",
+                          ])
+  }
+  description = "cp {{source}} {{output}}"
+}
+
+pw_universal_stamp = {
+  if (host_os == "win") {
+    command = "cmd /c type nul > \"{{output}}\""
+  } else {
+    command = "touch {{output}}"
+  }
+  description = "stamp {{output}}"
+}
diff --git a/pw_trace/BUILD.gn b/pw_trace/BUILD.gn
index b71fb82..25111d2 100644
--- a/pw_trace/BUILD.gn
+++ b/pw_trace/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/facade.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 declare_args() {
   # Backend for the pw_trace module.
   pw_trace_BACKEND = ""
diff --git a/pw_trace/CMakeLists.txt b/pw_trace/CMakeLists.txt
index 50f67ba..7a4fe84 100644
--- a/pw_trace/CMakeLists.txt
+++ b/pw_trace/CMakeLists.txt
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_add_facade(pw_trace
   PUBLIC_DEPS
     pw_preprocessor
diff --git a/pw_trace/docs.rst b/pw_trace/docs.rst
index 37e7d89..60aa359 100644
--- a/pw_trace/docs.rst
+++ b/pw_trace/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-trace:
-
-.. default-domain:: cpp
-
-.. highlight:: cpp
+.. _module-pw_trace:
 
 ========
 pw_trace
diff --git a/pw_trace/public/pw_trace/internal/trace_internal.h b/pw_trace/public/pw_trace/internal/trace_internal.h
index c21b7ef..f3604db 100644
--- a/pw_trace/public/pw_trace/internal/trace_internal.h
+++ b/pw_trace/public/pw_trace/internal/trace_internal.h
@@ -16,7 +16,7 @@
 
 #pragma once
 
-#include "pw_preprocessor/macro_arg_count.h"
+#include "pw_preprocessor/arguments.h"
 #include "pw_trace_backend/trace_backend.h"
 
 // Default: Flag value if none set
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_trace/py/BUILD.gn
similarity index 71%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_trace/py/BUILD.gn
index 3c3be32..caf8bc5 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_trace/py/BUILD.gn
@@ -12,8 +12,15 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_trace/__init__.py",
+    "pw_trace/trace.py",
+  ]
+  tests = [ "trace_test.py" ]
 }
diff --git a/pw_trace/py/pw_trace/py.typed b/pw_trace/py/pw_trace/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_trace/py/pw_trace/py.typed
diff --git a/pw_trace/py/pw_trace/trace.py b/pw_trace/py/pw_trace/trace.py
index f30ba2f..c63d931 100755
--- a/pw_trace/py/pw_trace/trace.py
+++ b/pw_trace/py/pw_trace/trace.py
@@ -47,7 +47,7 @@
     event_type: TraceType
     module: str
     label: str
-    timestamp: int
+    timestamp_us: int
     group: str = ""
     trace_id: int = 0
     flags: int = 0
@@ -58,8 +58,9 @@
 
 def event_has_trace_id(event_type):
     return event_type in {
-        "kPwTraceEvent_AsyncStart", "kPwTraceEvent_AsyncStep",
-        "kPwTraceEvent_AsyncEnd"
+        "PW_TRACE_EVENT_TYPE_ASYNC_START",
+        "PW_TRACE_EVENT_TYPE_ASYNC_STEP",
+        "PW_TRACE_EVENT_TYPE_ASYNC_END",
     }
 
 
@@ -67,7 +68,7 @@
     """Generates a list of JSON lines from provided trace events."""
     json_lines = []
     for event in events:
-        if event.module is None or event.timestamp is None or \
+        if event.module is None or event.timestamp_us is None or \
            event.event_type is None or event.label is None:
             _LOG.error("Invalid sample")
             continue
@@ -75,7 +76,7 @@
         line = {
             "pid": event.module,
             "name": (event.label),
-            "ts": event.timestamp
+            "ts": event.timestamp_us
         }
         if event.event_type == TraceType.DurationStart:
             line["ph"] = "B"
@@ -139,6 +140,8 @@
                 for i, item in enumerate(items):
                     args["data_" + str(i)] = item
                 line["args"] = args
+            else:
+                line["args"] = {"data": event.data.hex()}
 
         # Encode as JSON
         json_lines.append(json.dumps(line))
diff --git a/pw_trace/py/setup.py b/pw_trace/py/setup.py
index 65e6126..9fa3379 100644
--- a/pw_trace/py/setup.py
+++ b/pw_trace/py/setup.py
@@ -13,7 +13,7 @@
 # the License.
 """The pw_trace package."""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='pw_trace',
@@ -22,5 +22,6 @@
     author_email='pigweed-developers@googlegroups.com',
     description='Tools for dealing with trace data',
     packages=setuptools.find_packages(),
-    test_suite='setup.test_suite',
+    package_data={'pw_trace': ['py.typed']},
+    zip_safe=False,
 )
diff --git a/pw_trace/py/trace_test.py b/pw_trace/py/trace_test.py
index f9bab22..889a4b1 100755
--- a/pw_trace/py/trace_test.py
+++ b/pw_trace/py/trace_test.py
@@ -54,7 +54,7 @@
         event = trace.TraceEvent(event_type=trace.TraceType.Instantaneous,
                                  module="module",
                                  label="label",
-                                 timestamp=10)
+                                 timestamp_us=10)
         json_lines = trace.generate_trace_json([event])
         self.assertEqual(1, len(json_lines))
         self.assertEqual(json.loads(json_lines[0]), {
@@ -76,7 +76,7 @@
             event_type=trace.TraceType.Instantaneous,
             module="module",
             label="",  # Is replaced by data string
-            timestamp=10,
+            timestamp_us=10,
             has_data=True,
             data_fmt="@pw_arg_label",
             data=bytes("arg", "utf-8"))
@@ -94,7 +94,7 @@
         event = trace.TraceEvent(event_type=trace.TraceType.InstantaneousGroup,
                                  module="module",
                                  label="label",
-                                 timestamp=10,
+                                 timestamp_us=10,
                                  has_data=True,
                                  data_fmt="@pw_arg_group",
                                  data=bytes("arg", "utf-8"))
@@ -114,7 +114,7 @@
         event = trace.TraceEvent(event_type=trace.TraceType.Instantaneous,
                                  module="module",
                                  label="counter",
-                                 timestamp=10,
+                                 timestamp_us=10,
                                  has_data=True,
                                  data_fmt="@pw_arg_counter",
                                  data=(5).to_bytes(4, byteorder="little"))
@@ -136,7 +136,7 @@
         event = trace.TraceEvent(event_type=trace.TraceType.Instantaneous,
                                  module="module",
                                  label="counter",
-                                 timestamp=10,
+                                 timestamp_us=10,
                                  has_data=True,
                                  data_fmt="@pw_py_struct_fmt:H",
                                  data=(5).to_bytes(2, byteorder="little"))
@@ -158,7 +158,7 @@
         event = trace.TraceEvent(event_type=trace.TraceType.Instantaneous,
                                  module="module",
                                  label="counter",
-                                 timestamp=10,
+                                 timestamp_us=10,
                                  has_data=True,
                                  data_fmt="@pw_py_struct_fmt:Hl",
                                  data=struct.pack("Hl", 5, 2))
diff --git a/pw_trace_tokenized/BUILD b/pw_trace_tokenized/BUILD
index 6b853ea..4c3175f 100644
--- a/pw_trace_tokenized/BUILD
+++ b/pw_trace_tokenized/BUILD
@@ -28,6 +28,7 @@
 pw_cc_library(
     name = "headers",
     hdrs = [
+        "public/pw_trace_tokenized/config.h",
         "public/pw_trace_tokenized/internal/trace_tokenized_internal.h",
         "public/pw_trace_tokenized/trace_callback.h",
         "public/pw_trace_tokenized/trace_tokenized.h",
@@ -87,6 +88,22 @@
 )
 
 pw_cc_library(
+    name = "pw_trace_tokenized_buffer_log",
+    hdrs = [
+        "public/pw_trace_tokenized/trace_buffer_log.h",
+    ],
+    srcs = [
+        "trace_buffer_log.cc",
+    ],
+    deps = [
+        ":trace_buffer_headers",
+	"//pw_base64",
+	"//pw_log",
+	"//pw_string",
+    ],
+)
+
+pw_cc_library(
     name = "pw_trace_tokenized_fake_time",
     srcs = [
         "fake_trace_time.cc",
@@ -128,6 +145,20 @@
     ],
 )
 
+pw_cc_test(
+    name = "trace_tokenized_buffer_log_test",
+    srcs = [
+        "trace_buffer_log_test.cc",
+    ],
+    deps = [
+        ":backend",
+        ":facade",
+        ":pw_trace_log",
+        "//pw_preprocessor",
+        "//pw_unit_test",
+    ],
+)
+
 pw_cc_library(
     name = "pw_trace_host_trace_time",
     includes = [ "example/public" ],
diff --git a/pw_trace_tokenized/BUILD.gn b/pw_trace_tokenized/BUILD.gn
index 74843b2..c031e89 100644
--- a/pw_trace_tokenized/BUILD.gn
+++ b/pw_trace_tokenized/BUILD.gn
@@ -12,47 +12,63 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
+import("$dir_pw_build/module_config.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 declare_args() {
+  # The build target that overrides the default configuration options for this
+  # module. This should point to a source set that provides defines through a
+  # public config (which may -include a file or add defines directly).
+  pw_trace_CONFIG = pw_build_DEFAULT_MODULE_CONFIG
+
   # Tokenizer trace time, gets included if provided
   pw_trace_tokenizer_time = ""
+
+  # Trace buffer size in bytes. Set to 0 to disable.
+  pw_trace_tokenized_BUFFER_SIZE = 256
 }
 
-config("default_config") {
+config("public_include_path") {
   include_dirs = [ "public" ]
+  visibility = [ ":*" ]
 }
 
 config("backend_config") {
   include_dirs = [ "public_overrides" ]
 }
 
+pw_source_set("config") {
+  public_deps = [ pw_trace_CONFIG ]
+  public_configs = [ ":public_include_path" ]
+  public = [ "public/pw_trace_tokenized/config.h" ]
+}
+
 pw_test_group("tests") {
   tests = [
     ":trace_tokenized_test",
     ":tokenized_trace_buffer_test",
+    ":tokenized_trace_buffer_log_test",
   ]
 }
 
 pw_source_set("pw_trace_tokenized") {
   public_configs = [
     ":backend_config",
-    ":default_config",
+    ":public_include_path",
   ]
   public_deps = [
+    ":config",
     ":pw_trace_tokenized_core",
     "$dir_pw_tokenizer",
   ]
-
   if (pw_trace_tokenizer_time != "") {
     deps = [ "$pw_trace_tokenizer_time" ]
   }
 
   public = [ "public_overrides/pw_trace_backend/trace_backend.h" ]
-  sources = public
 }
 
 pw_test("trace_tokenized_test") {
@@ -65,13 +81,23 @@
   sources = [ "trace_test.cc" ]
 }
 
+config("trace_buffer_size") {
+  defines = [ "PW_TRACE_BUFFER_SIZE_BYTES=${pw_trace_tokenized_BUFFER_SIZE}" ]
+}
+
 pw_source_set("tokenized_trace_buffer") {
   deps = [ ":pw_trace_tokenized_core" ]
   public_deps = [
+    ":config",
     "$dir_pw_ring_buffer",
+    "$dir_pw_tokenizer",
     "$dir_pw_varint",
   ]
   sources = [ "trace_buffer.cc" ]
+  public_configs = [
+    ":public_include_path",
+    ":trace_buffer_size",
+  ]
   public = [ "public/pw_trace_tokenized/trace_buffer.h" ]
 }
 
@@ -85,6 +111,26 @@
   sources = [ "trace_buffer_test.cc" ]
 }
 
+pw_source_set("tokenized_trace_buffer_log") {
+  deps = [
+    "$dir_pw_base64",
+    "$dir_pw_log",
+    "$dir_pw_string",
+  ]
+  public_deps = [ ":tokenized_trace_buffer" ]
+  sources = [ "trace_buffer_log.cc" ]
+  public = [ "public/pw_trace_tokenized/trace_buffer_log.h" ]
+}
+
+pw_test("tokenized_trace_buffer_log_test") {
+  enable_if = pw_trace_tokenizer_time != ""
+  deps = [
+    ":tokenized_trace_buffer_log",
+    "$dir_pw_trace",
+  ]
+  sources = [ "trace_buffer_log_test.cc" ]
+}
+
 pw_source_set("fake_trace_time") {
   deps = [ ":pw_trace_tokenized_core" ]
   sources = [ "fake_trace_time.cc" ]
@@ -98,13 +144,16 @@
 pw_source_set("pw_trace_tokenized_core") {
   public_configs = [
     ":backend_config",
-    ":default_config",
+    ":public_include_path",
   ]
-  public_deps = [ "$dir_pw_tokenizer" ]
+  public_deps = [
+    "$dir_pw_status",
+    "$dir_pw_tokenizer",
+  ]
   deps = [
+    ":config",
     "$dir_pw_assert",
     "$dir_pw_ring_buffer",
-    "$dir_pw_status",
     "$dir_pw_trace:facade",
     "$dir_pw_varint",
   ]
@@ -113,7 +162,7 @@
     "public/pw_trace_tokenized/trace_callback.h",
     "public/pw_trace_tokenized/trace_tokenized.h",
   ]
-  sources = public + [ "trace.cc" ]
+  sources = [ "trace.cc" ]
 }
 
 pw_doc_group("docs") {
diff --git a/pw_trace_tokenized/CMakeLists.txt b/pw_trace_tokenized/CMakeLists.txt
index c981dcc..e4bd46a 100644
--- a/pw_trace_tokenized/CMakeLists.txt
+++ b/pw_trace_tokenized/CMakeLists.txt
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_auto_add_simple_module(pw_trace_tokenized
   IMPLEMENTS_FACADE
     pw_trace
@@ -24,5 +26,3 @@
     pw_trace:facade
     pw_varint
 )
-
-target_include_directories(pw_trace_tokenized PUBLIC public_overrides)
diff --git a/pw_trace_tokenized/docs.rst b/pw_trace_tokenized/docs.rst
index c588106..c1c27ac 100644
--- a/pw_trace_tokenized/docs.rst
+++ b/pw_trace_tokenized/docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-pw-trace_tokenized:
-
-.. default-domain:: cpp
-
-.. highlight:: cpp
+.. _module-pw_trace_tokenized:
 
 ==================
 pw_trace_tokenized
@@ -182,6 +178,31 @@
 ``pw_varint``
 
 
+-------
+Logging
+-------
+The optional trace buffer logging adds support to dump trace buffers to the log.
+Buffers are converted to base64-encoding then split across log lines. Trace logs
+are surrounded by 'begin' and 'end' tags.
+
+Ex. Invoking PW_TRACE_INSTANT with 'test1' and 'test2', then calling this
+function would produce this in the output logs:
+
+.. code:: sh
+
+  [TRACE] begin
+  [TRACE] data: BWdDMRoABWj52YMB
+  [TRACE] end
+
+Added dependencies
+------------------
+``pw_base64``
+``pw_log``
+``pw_ring_buffer``
+``pw_string``
+``pw_tokenizer``
+``pw_varint``
+
 --------
 Examples
 --------
diff --git a/pw_trace_tokenized/fake_trace_time.cc b/pw_trace_tokenized/fake_trace_time.cc
index 531beda..353d33f 100644
--- a/pw_trace_tokenized/fake_trace_time.cc
+++ b/pw_trace_tokenized/fake_trace_time.cc
@@ -28,4 +28,4 @@
 // Return 1 for ticks per second, as it doesn't apply to fake timer.
 size_t pw_trace_GetTraceTimeTicksPerSecond() { return 1; }
 
-void pw_trace_ResetFakeTraceTimer() { time_counter = 0; }
\ No newline at end of file
+void pw_trace_ResetFakeTraceTimer() { time_counter = 0; }
diff --git a/pw_trace_tokenized/host_trace_time.cc b/pw_trace_tokenized/host_trace_time.cc
index b9135b4..ed004dc 100644
--- a/pw_trace_tokenized/host_trace_time.cc
+++ b/pw_trace_tokenized/host_trace_time.cc
@@ -22,13 +22,13 @@
 
 namespace {
 
-auto start = system_clock::now();
+auto start = steady_clock::now();
 
 }  // namespace
 
 // Define trace time as a counter for tests.
 PW_TRACE_TIME_TYPE pw_trace_GetTraceTime() {
-  auto delta = system_clock::now() - start;
+  auto delta = steady_clock::now() - start;
   return duration_cast<microseconds>(delta).count();
 }
 
diff --git a/pw_trace_tokenized/public/pw_trace_tokenized/config.h b/pw_trace_tokenized/public/pw_trace_tokenized/config.h
new file mode 100644
index 0000000..db77230
--- /dev/null
+++ b/pw_trace_tokenized/public/pw_trace_tokenized/config.h
@@ -0,0 +1,149 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Configurable options for the tokenized trace module.
+#pragma once
+
+// Since not all strings are tokenizeable, labels can be passed as arguments.
+// PW_TRACE_CONFIG_ARG_LABEL_SIZE_BYTES configures the maximum number of
+// characters to include, if more are provided the string will be clipped.
+#ifndef PW_TRACE_CONFIG_ARG_LABEL_SIZE_BYTES
+#define PW_TRACE_CONFIG_ARG_LABEL_SIZE_BYTES 20
+#endif  // PW_TRACE_CONFIG_ARG_LABEL_SIZE_BYTES
+
+// PW_TRACE_QUEUE_SIZE_EVENTS configures the number of events which can be
+// queued up internally. This is needed to support concurrent trace events.
+#ifndef PW_TRACE_QUEUE_SIZE_EVENTS
+#define PW_TRACE_QUEUE_SIZE_EVENTS 5
+#endif  // PW_TRACE_QUEUE_SIZE_EVENTS
+
+// --- Config options for time source ----
+
+// PW_TRACE_TIME_TYPE sets the type for trace time.
+#ifndef PW_TRACE_TIME_TYPE
+#define PW_TRACE_TIME_TYPE uint32_t
+#endif  // PW_TRACE_TIME_TYPE
+
+// PW_TRACE_GET_TIME is the macro which is called to get the current time for a
+// trace event. It's default is to use pw_trace_GetTraceTime() which needs to be
+// provided by the platform.
+#ifndef PW_TRACE_GET_TIME
+#define PW_TRACE_GET_TIME() pw_trace_GetTraceTime()
+extern PW_TRACE_TIME_TYPE pw_trace_GetTraceTime();
+#endif  // PW_TRACE_GET_TIME
+
+// PW_TRACE_GET_TIME_TICKS_PER_SECOND is the macro which is called to determine
+// the unit of the trace time. It's default is to use
+// pw_trace_GetTraceTimeTicksPerSecond() which needs to be provided by the
+// platform.
+#ifndef PW_TRACE_GET_TIME_TICKS_PER_SECOND
+#define PW_TRACE_GET_TIME_TICKS_PER_SECOND() \
+  pw_trace_GetTraceTimeTicksPerSecond()
+extern size_t pw_trace_GetTraceTimeTicksPerSecond();
+#endif  // PW_TRACE_GET_TIME_TICKS_PER_SECOND
+
+// PW_TRACE_GET_TIME_DELTA is te macro which is called to determine
+// the delta between two PW_TRACE_TIME_TYPE variables. It should return a
+// delta of the two times, in the same type.
+// The default implementation just subtracts the two, which is suitable if
+// values either never wrap, or are unsigned and do not wrap multiple times
+// between trace events. If either of these are not the case a different
+// implemention should be used.
+#ifndef PW_TRACE_GET_TIME_DELTA
+#define PW_TRACE_GET_TIME_DELTA(last_time, current_time) \
+  ((current_time) - (last_time))
+#ifdef __cplusplus
+static_assert(
+    std::is_unsigned<PW_TRACE_TIME_TYPE>::value,
+    "Default time delta implementation only works for unsigned time types.");
+#endif  // __cplusplus
+#endif  // PW_TRACE_GET_TIME_DELTA
+
+// --- Config options for callbacks ----
+
+// PW_TRACE_CONFIG_MAX_EVENT_CALLBACKS is the maximum number of event callbacks
+// which can be registered at a time.
+#ifndef PW_TRACE_CONFIG_MAX_EVENT_CALLBACKS
+#define PW_TRACE_CONFIG_MAX_EVENT_CALLBACKS 2
+#endif  // PW_TRACE_CONFIG_MAX_EVENT_CALLBACKS
+
+// PW_TRACE_CONFIG_MAX_SINKS is the maximum number of encoded event sinks which
+// can be registered at a time.
+#ifndef PW_TRACE_CONFIG_MAX_SINKS
+#define PW_TRACE_CONFIG_MAX_SINKS 2
+#endif  // PW_TRACE_CONFIG_MAX_SINKS
+
+// --- Config options for locks ---
+
+// PW_TRACE_LOCK  Is is also called when registering and unregistering callbacks
+// and sinks.
+#ifndef PW_TRACE_LOCK
+#define PW_TRACE_LOCK()
+#endif  // PW_TRACE_LOCK
+
+// PW_TRACE_TRY_LOCK is is called when events need to be emptied from the queue,
+// if multiple trace events happened at the same time only one task needs to get
+// this lock and will empty the queue for all tasks, therefore there is no need
+// to block in trace events.
+// This should lock the same object as PW_TRACE_LOCK, and be unlocked using
+// PW_TRACE_UNLOCK
+// Returns true if lock was acquired and false if the lock is currently held and
+// could not be aquired.
+#ifndef PW_TRACE_TRY_LOCK
+#define PW_TRACE_TRY_LOCK() (true)  // Returns true if lock successful
+#endif  // PW_TRACE_TRY_LOCK
+
+#ifndef PW_TRACE_UNLOCK
+#define PW_TRACE_UNLOCK()
+#endif  // PW_TRACE_UNLOCK
+
+// PW_TRACE_QUEUE_* is used to lock while queueing an event, this is a quick
+// copy operation and was designed to be suitable in a critical section to
+// avoid unneccessary blocking and task switches.
+#ifndef PW_TRACE_QUEUE_LOCK
+#define PW_TRACE_QUEUE_LOCK()
+#endif  // PW_TRACE_QUEUE_LOCK
+
+#ifndef PW_TRACE_QUEUE_UNLOCK
+#define PW_TRACE_QUEUE_UNLOCK()
+#endif  // PW_TRACE_QUEUE_UNLOCK
+
+// --- Config options for optional trace buffer ---
+
+// PW_TRACE_BUFFER_SIZE_BYTES is the size in bytes of the optional trace buffer.
+// The buffer is automatically registered at boot if the buffer size is not 0.
+#ifndef PW_TRACE_BUFFER_SIZE_BYTES
+#define PW_TRACE_BUFFER_SIZE_BYTES 256
+#endif  // PW_TRACE_BUFFER_SIZE_BYTES
+
+// PW_TRACE_BUFFER_MAX_BLOCK_SIZE_BYTES indicates the maximum size any
+// individual encoded trace event could be. This is used internally to buffer up
+// a sample before saving into the buffer.
+#ifndef PW_TRACE_BUFFER_MAX_BLOCK_SIZE_BYTES
+// The below calaculation is provided to help determine a suitable value, using
+// the max data size bytes.
+#ifndef PW_TRACE_BUFFER_MAX_DATA_SIZE_BYTES
+#define PW_TRACE_BUFFER_MAX_DATA_SIZE_BYTES (32)
+#endif  // PW_TRACE_BUFFER_MAX_BLOCK_SIZE_BYTES
+
+#ifndef PW_TRACE_BUFFER_MAX_HEADER_SIZE_BYTES
+#define PW_TRACE_BUFFER_MAX_HEADER_SIZE_BYTES                                  \
+  (pw::varint::kMaxVarint64SizeBytes) +     /* worst case delta time varint */ \
+      (sizeof(uint32_t)) +                  /* trace token size */             \
+      (pw::varint::kMaxVarint64SizeBytes) + /* worst case trace id varint */
+#endif  // PW_TRACE_BUFFER_MAX_HEADER_SIZE_BYTES
+
+#define PW_TRACE_BUFFER_MAX_BLOCK_SIZE_BYTES \
+  PW_TRACE_BUFFER_MAX_HEADER_SIZE_BYTES + PW_TRACE_BUFFER_MAX_DATA_SIZE_BYTES
+#endif  // PW_TRACE_BUFFER_MAX_BLOCK_SIZE_BYTES
diff --git a/pw_trace_tokenized/public/pw_trace_tokenized/internal/trace_tokenized_internal.h b/pw_trace_tokenized/public/pw_trace_tokenized/internal/trace_tokenized_internal.h
index 1ab8536..5038887 100644
--- a/pw_trace_tokenized/public/pw_trace_tokenized/internal/trace_tokenized_internal.h
+++ b/pw_trace_tokenized/public/pw_trace_tokenized/internal/trace_tokenized_internal.h
@@ -19,7 +19,7 @@
 #include <stdbool.h>
 #include <stdint.h>
 
-#include "pw_preprocessor/macro_arg_count.h"
+#include "pw_preprocessor/arguments.h"
 
 // Because __FUNCTION__ is not a string literal to the preprocessor it can't be
 // tokenized. So this backend redefines the implementation to instead use the
diff --git a/pw_trace_tokenized/public/pw_trace_tokenized/trace_buffer.h b/pw_trace_tokenized/public/pw_trace_tokenized/trace_buffer.h
index beff405..66572f1 100644
--- a/pw_trace_tokenized/public/pw_trace_tokenized/trace_buffer.h
+++ b/pw_trace_tokenized/public/pw_trace_tokenized/trace_buffer.h
@@ -17,32 +17,10 @@
 #pragma once
 
 #include "pw_ring_buffer/prefixed_entry_ring_buffer.h"
+#include "pw_trace_tokenized/config.h"
 #include "pw_trace_tokenized/trace_tokenized.h"
 #include "pw_varint/varint.h"
 
-// Configurable options
-// The buffer is automatically registered at boot if the buffer size is not 0.
-#ifndef PW_TRACE_BUFFER_SIZE_BYTES
-#define PW_TRACE_BUFFER_SIZE_BYTES 256
-#endif  // PW_TRACE_BUFFER_SIZE_BYTES
-
-// PW_TRACE_BUFFER_MAX_BLOCK_SIZE_BYTES indicates the maximum size any
-// individual encoded trace event could be. This is used internally to buffer up
-// a sample before saving into the buffer.
-#ifndef PW_TRACE_BUFFER_MAX_BLOCK_SIZE_BYTES
-// The below calaculation is provided to help determine a suitable value, using
-// the max data size bytes.
-#ifndef PW_TRACE_BUFFER_MAX_DATA_SIZE_BYTES
-#define PW_TRACE_BUFFER_MAX_DATA_SIZE_BYTES (32)
-#endif  // PW_TRACE_BUFFER_MAX_BLOCK_SIZE_BYTES
-
-#define PW_TRACE_BUFFER_MAX_BLOCK_SIZE_BYTES                                 \
-  (pw::varint::kMaxVarintSizeBytes) +     /* worst case delta time varint */ \
-      (sizeof(uint32_t)) +                /* trace token size */             \
-      (pw::varint::kMaxVarintSizeBytes) + /* worst case trace id varint */   \
-      PW_TRACE_BUFFER_MAX_DATA_SIZE_BYTES
-#endif  // PW_TRACE_BUFFER_MAX_BLOCK_SIZE_BYTES
-
 namespace pw {
 namespace trace {
 
diff --git a/pw_trace_tokenized/public/pw_trace_tokenized/trace_buffer_log.h b/pw_trace_tokenized/public/pw_trace_tokenized/trace_buffer_log.h
new file mode 100644
index 0000000..d6aa4f0
--- /dev/null
+++ b/pw_trace_tokenized/public/pw_trace_tokenized/trace_buffer_log.h
@@ -0,0 +1,37 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+//==============================================================================
+//
+// This files provides support to dump the trace buffer to the logging module.
+#pragma once
+
+#include "pw_status/status.h"
+
+namespace pw {
+namespace trace {
+
+// Dumps the trace buffer to the log. The output format to the log is the
+// base64-encoded buffer, split into lines of an implementation-defined length.
+// The trace logs are surrounded by 'begin' and 'end' tags.
+//
+// Ex. Invoking PW_TRACE_INSTANT with 'test1' and 'test2', then calling this
+// function would produce this in the output logs:
+//
+// [TRACE] begin
+// [TRACE] data: BWdDMRoABWj52YMB
+// [TRACE] end
+pw::Status DumpTraceBufferToLog();
+
+}  // namespace trace
+}  // namespace pw
diff --git a/pw_trace_tokenized/public/pw_trace_tokenized/trace_callback.h b/pw_trace_tokenized/public/pw_trace_tokenized/trace_callback.h
index bfa3d8b..426820c 100644
--- a/pw_trace_tokenized/public/pw_trace_tokenized/trace_callback.h
+++ b/pw_trace_tokenized/public/pw_trace_tokenized/trace_callback.h
@@ -25,21 +25,9 @@
 #include <span>
 
 #include "pw_status/status.h"
+#include "pw_trace_tokenized/config.h"
 #include "pw_trace_tokenized/trace_tokenized.h"
 
-// Config options
-// PW_TRACE_CONFIG_MAX_EVENT_CALLBACKS is the maximum number of event callbacks
-// which can be registered at a time.
-#ifndef PW_TRACE_CONFIG_MAX_EVENT_CALLBACKS
-#define PW_TRACE_CONFIG_MAX_EVENT_CALLBACKS 2
-#endif  // PW_TRACE_CONFIG_MAX_EVENT_CALLBACKS
-
-// PW_TRACE_CONFIG_MAX_SINKS is the maximum number of encoded event sinks which
-// can be registered at a time.
-#ifndef PW_TRACE_CONFIG_MAX_SINKS
-#define PW_TRACE_CONFIG_MAX_SINKS 2
-#endif  // PW_TRACE_CONFIG_MAX_SINKS
-
 PW_EXTERN_C_START
 // The pw_trace_EventCallback is called before the sample is encoded or sent
 // to the sinks. Bits in the return argument can be set to change the behaviour
@@ -101,7 +89,7 @@
 // start, allowing buffers to allocate the required amount at the start when
 // necessary.
 //
-// If Status::OK is not returned from Start, the events bytes will be skipped.
+// If Status::Ok() is not returned from Start, the events bytes will be skipped.
 //
 // NOTE: Called while tracing is locked (which might be a critical section
 // depending on application), so quick/simple operations only. One trace event
diff --git a/pw_trace_tokenized/public/pw_trace_tokenized/trace_tokenized.h b/pw_trace_tokenized/public/pw_trace_tokenized/trace_tokenized.h
index cb0b8b4..a7e5ca4 100644
--- a/pw_trace_tokenized/public/pw_trace_tokenized/trace_tokenized.h
+++ b/pw_trace_tokenized/public/pw_trace_tokenized/trace_tokenized.h
@@ -28,79 +28,99 @@
 #endif  // __cplusplus
 #endif  // PW_TRACE_GET_TIME_DELTA
 
+#include "pw_status/status.h"
 #include "pw_tokenizer/tokenize.h"
+#include "pw_trace_tokenized/config.h"
 #include "pw_trace_tokenized/internal/trace_tokenized_internal.h"
 
-// Configurable options
-
-// Since not all strings are tokenizeable, labels can be passed as arguments.
-// PW_TRACE_CONFIG_ARG_LABEL_SIZE_BYTES configures the maximum number of
-// characters to include, if more are provided the string will be clipped.
-#ifndef PW_TRACE_CONFIG_ARG_LABEL_SIZE_BYTES
-#define PW_TRACE_CONFIG_ARG_LABEL_SIZE_BYTES 20
-#endif  // PW_TRACE_CONFIG_ARG_LABEL_SIZE_BYTES
-
-// PW_TRACE_TIME_TYPE sets the type for trace time.
-#ifndef PW_TRACE_TIME_TYPE
-#define PW_TRACE_TIME_TYPE uint32_t
-#endif  // PW_TRACE_TIME_TYPE
-
-// PW_TRACE_GET_TIME is the macro which is called to get the current time for a
-// trace event. It's default is to use pw_trace_GetTraceTime() which needs to be
-// provided by the platform.
-#ifndef PW_TRACE_GET_TIME
-#define PW_TRACE_GET_TIME() pw_trace_GetTraceTime()
-extern PW_TRACE_TIME_TYPE pw_trace_GetTraceTime();
-#endif  // PW_TRACE_GET_TIME
-
-// PW_TRACE_GET_TIME_TICKS_PER_SECOND is the macro which is called to determine
-// the unit of the trace time. It's default is to use
-// pw_trace_GetTraceTimeTicksPerSecond() which needs to be provided by the
-// platform.
-#ifndef PW_TRACE_GET_TIME_TICKS_PER_SECOND
-#define PW_TRACE_GET_TIME_TICKS_PER_SECOND() \
-  pw_trace_GetTraceTimeTicksPerSecond()
-extern size_t pw_trace_GetTraceTimeTicksPerSecond();
-#endif  // PW_TRACE_GET_TIME_TICKS_PER_SECOND
-
-// PW_TRACE_GET_TIME_DELTA is te macro which is called to determine
-// the delta between two PW_TRACE_TIME_TYPE variables. It should return a
-// delta of the two times, in the same type.
-// The default implementation just subtracts the two, which is suitable if
-// values either never wrap, or are unsigned and do not wrap multiple times
-// between trace events. If either of these are not the case a different
-// implemention should be used.
-#ifndef PW_TRACE_GET_TIME_DELTA
-#define PW_TRACE_GET_TIME_DELTA(last_time, current_time) \
-  ((current_time) - (last_time))
-#ifdef __cplusplus
-static_assert(
-    std::is_unsigned<PW_TRACE_TIME_TYPE>::value,
-    "Default time delta implementation only works for unsigned time types.");
-#endif  // __cplusplus
-#endif  // PW_TRACE_GET_TIME_DELTA
-
-// PW_TRACE_LOCK is called when a new event is being processed to ensure only
-// one event is sent to the sinks at a time. Is is also called when registering
-// and unregistering callbacks and sinks.
-#ifndef PW_TRACE_LOCK
-#define PW_TRACE_LOCK()
-#endif  // PW_TRACE_LOCK
-
-// PW_TRACE_UNLOCK is called after sending the data to all the sinks.
-#ifndef PW_TRACE_UNLOCK
-#define PW_TRACE_UNLOCK()
-#endif  // PW_TRACE_UNLOCK
-
 #ifdef __cplusplus
 namespace pw {
 namespace trace {
 
 using EventType = pw_trace_EventType;
 
+namespace internal {
+
+// Simple ring buffer which is suitable for use in a critical section.
+template <size_t kSize>
+class TraceQueue {
+ public:
+  struct QueueEventBlock {
+    uint32_t trace_token;
+    EventType event_type;
+    const char* module;
+    uint32_t trace_id;
+    uint8_t flags;
+    size_t data_size;
+    std::byte data_buffer[PW_TRACE_BUFFER_MAX_DATA_SIZE_BYTES];
+  };
+
+  pw::Status TryPushBack(uint32_t trace_token,
+                         EventType event_type,
+                         const char* module,
+                         uint32_t trace_id,
+                         uint8_t flags,
+                         const void* data_buffer,
+                         size_t data_size) {
+    if (IsFull()) {
+      return pw::Status::RESOURCE_EXHAUSTED;
+    }
+    event_queue_[head_].trace_token = trace_token;
+    event_queue_[head_].event_type = event_type;
+    event_queue_[head_].module = module;
+    event_queue_[head_].trace_id = trace_id;
+    event_queue_[head_].flags = flags;
+    event_queue_[head_].data_size = data_size;
+    for (size_t i = 0; i < data_size; i++) {
+      event_queue_[head_].data_buffer[i] =
+          reinterpret_cast<const std::byte*>(data_buffer)[i];
+    }
+    head_ = (head_ + 1) % kSize;
+    is_empty_ = false;
+    return pw::Status::OK;
+  }
+
+  const volatile QueueEventBlock* PeekFront() const {
+    if (IsEmpty()) {
+      return nullptr;
+    }
+    return &event_queue_[tail_];
+  }
+
+  void PopFront() {
+    if (!IsEmpty()) {
+      tail_ = (tail_ + 1) % kSize;
+      is_empty_ = (tail_ == head_);
+    }
+  }
+
+  void Clear() {
+    head_ = 0;
+    tail_ = 0;
+    is_empty_ = true;
+  }
+
+  bool IsEmpty() const { return is_empty_; }
+  bool IsFull() const { return !is_empty_ && (head_ == tail_); }
+
+ private:
+  std::array<volatile QueueEventBlock, kSize> event_queue_;
+  volatile size_t head_ = 0;  // Next write
+  volatile size_t tail_ = 0;  // Next read
+  volatile bool is_empty_ =
+      true;  // Used to distinquish if head==tail is empty or full
+};
+
+}  // namespace internal
+
 class TokenizedTraceImpl {
  public:
-  void Enable(bool enable) { enabled_ = enable; }
+  void Enable(bool enable) {
+    if (enable != enabled_ && enable) {
+      event_queue_.Clear();
+    }
+    enabled_ = enable;
+  }
   bool IsEnabled() const { return enabled_; }
 
   void HandleTraceEvent(uint32_t trace_token,
@@ -112,8 +132,13 @@
                         size_t data_size);
 
  private:
+  using TraceQueue = internal::TraceQueue<PW_TRACE_QUEUE_SIZE_EVENTS>;
   PW_TRACE_TIME_TYPE last_trace_time_ = 0;
   bool enabled_ = false;
+  TraceQueue event_queue_;
+
+  void HandleNextItemInQueue(
+      const volatile TraceQueue::QueueEventBlock* event_block);
 };
 
 // A singleton object of the TokenizedTraceImpl class which can be used to
@@ -167,10 +192,13 @@
 //    "1|2|test_module|group|label|%d"
 // The trace_id, and data value are runtime values and not included in the
 // token string.
-#define PW_TRACE_REF(event_type, module, label, flags, group)   \
-  PW_TOKENIZE_STRING(PW_STRINGIFY(event_type) "|" PW_STRINGIFY( \
-      flags) "|" module "|" group "|" label)
+#define PW_TRACE_REF(event_type, module, label, flags, group)          \
+  PW_TOKENIZE_STRING_DOMAIN("trace",                                   \
+                            PW_STRINGIFY(event_type) "|" PW_STRINGIFY( \
+                                flags) "|" module "|" group "|" label)
 
-#define PW_TRACE_REF_DATA(event_type, module, label, flags, group, type) \
-  PW_TOKENIZE_STRING(PW_STRINGIFY(event_type) "|" PW_STRINGIFY(          \
-      flags) "|" module "|" group "|" label "|" type)
+#define PW_TRACE_REF_DATA(event_type, module, label, flags, group, type)    \
+  PW_TOKENIZE_STRING_DOMAIN(                                                \
+      "trace",                                                              \
+      PW_STRINGIFY(event_type) "|" PW_STRINGIFY(flags) "|" module "|" group \
+                                                       "|" label "|" type)
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_trace_tokenized/py/BUILD.gn
similarity index 71%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_trace_tokenized/py/BUILD.gn
index 3c3be32..bae2cde 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_trace_tokenized/py/BUILD.gn
@@ -12,8 +12,14 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_trace_tokenized/__init__.py",
+    "pw_trace_tokenized/trace_tokenized.py",
+  ]
 }
diff --git a/pw_trace_tokenized/py/pw_trace_tokenized/__init__.py b/pw_trace_tokenized/py/pw_trace_tokenized/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_trace_tokenized/py/pw_trace_tokenized/__init__.py
diff --git a/pw_trace_tokenized/py/pw_trace_tokenized/py.typed b/pw_trace_tokenized/py/pw_trace_tokenized/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_trace_tokenized/py/pw_trace_tokenized/py.typed
diff --git a/pw_trace_tokenized/py/trace_tokenized.py b/pw_trace_tokenized/py/pw_trace_tokenized/trace_tokenized.py
similarity index 92%
rename from pw_trace_tokenized/py/trace_tokenized.py
rename to pw_trace_tokenized/py/pw_trace_tokenized/trace_tokenized.py
index 917c06f..3e88a8a 100755
--- a/pw_trace_tokenized/py/trace_tokenized.py
+++ b/pw_trace_tokenized/py/pw_trace_tokenized/trace_tokenized.py
@@ -90,13 +90,13 @@
     return len(token_values) > TokenIdx.data_fmt
 
 
-def create_trace_event(token_string, timestamp, trace_id, data):
+def create_trace_event(token_string, timestamp_us, trace_id, data):
     token_values = token_string.split("|")
     return trace.TraceEvent(event_type=get_trace_type(
         token_values[TokenIdx.EventType]),
                             module=token_values[TokenIdx.Module],
                             label=token_values[TokenIdx.Label],
-                            timestamp=timestamp,
+                            timestamp_us=timestamp_us,
                             group=token_values[TokenIdx.Group],
                             trace_id=trace_id,
                             flags=token_values[TokenIdx.Flag],
@@ -106,7 +106,8 @@
                             data=data if has_data(token_string) else b'')
 
 
-def parse_trace_event(buffer, db, last_time):
+def parse_trace_event(buffer, db, last_time, ticks_per_second=1000):
+    us_per_tick = 1000000 / ticks_per_second
     idx = 0
     # Read token
     token = struct.unpack('I', buffer[idx:idx + 4])[0]
@@ -114,12 +115,12 @@
 
     # Decode token
     if len(db.token_to_entries[token]) == 0:
-        _LOG.error("token not found")
+        _LOG.error("token not found: %08x", token)
     token_string = str(db.token_to_entries[token][0])
 
     # Read time
     time_delta, time_bytes = varint_decode(buffer[idx:])
-    timestamp = last_time + time_delta
+    timestamp_us = last_time + us_per_tick * time_delta
     idx += time_bytes
 
     # Trace ID
@@ -134,7 +135,7 @@
         data = buffer[idx:]
 
     # Create trace event
-    return create_trace_event(token_string, timestamp, trace_id, data)
+    return create_trace_event(token_string, timestamp_us, trace_id, data)
 
 
 def get_trace_events_from_file(databases, input_file_name):
@@ -156,7 +157,7 @@
 
             event = parse_trace_event(bytes_read[idx + 1:idx + 1 + size], db,
                                       last_timestamp)
-            last_timestamp = event.timestamp
+            last_timestamp = event.timestamp_us
             events.append(event)
             idx = idx + size + 1
     return events
@@ -171,7 +172,7 @@
     parser.add_argument(
         'databases',
         nargs='+',
-        action=database._LoadTokenDatabases,  # pylint: disable=protected-access
+        action=database.LoadTokenDatabases,
         help='Databases (ELF, binary, or CSV) to use to lookup tokens.')
     parser.add_argument(
         '-i',
diff --git a/pw_trace_tokenized/py/setup.py b/pw_trace_tokenized/py/setup.py
new file mode 100644
index 0000000..cea2439
--- /dev/null
+++ b/pw_trace_tokenized/py/setup.py
@@ -0,0 +1,27 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""The pw_trace_tokenized package."""
+
+import setuptools  # type: ignore
+
+setuptools.setup(
+    name='pw_trace_tokenized',
+    version='0.0.1',
+    author='Pigweed Authors',
+    author_email='pigweed-developers@googlegroups.com',
+    description='pw_trace backend to tokenize trace events',
+    packages=setuptools.find_packages(),
+    package_data={'pw_trace_tokenized': ['py.typed']},
+    zip_safe=False,
+)
diff --git a/pw_trace_tokenized/trace.cc b/pw_trace_tokenized/trace.cc
index 229214d..3769ca2 100644
--- a/pw_trace_tokenized/trace.cc
+++ b/pw_trace_tokenized/trace.cc
@@ -40,10 +40,48 @@
     return;
   }
 
-  pw_trace_TraceEventReturnFlags ret_flags = 0;
+  // Create trace event
+  PW_TRACE_QUEUE_LOCK();
+  if (!event_queue_
+           .TryPushBack(trace_token,
+                        event_type,
+                        module,
+                        trace_id,
+                        flags,
+                        data_buffer,
+                        data_size)
+           .ok()) {
+    // Queue full dropping sample
+    // TODO(rgoliver): Allow other strategies, for example: drop oldest, try
+    // empty queue, or block.
+  }
+  PW_TRACE_QUEUE_UNLOCK();
 
-  PW_TRACE_LOCK();
+  // Sample is now in queue (if not dropped), try to empty the queue if not
+  // already being emptied.
+  if (PW_TRACE_TRY_LOCK()) {
+    while (!event_queue_.IsEmpty()) {
+      HandleNextItemInQueue(event_queue_.PeekFront());
+      event_queue_.PopFront();
+    }
+    PW_TRACE_UNLOCK();
+  }
+}
+
+void TokenizedTraceImpl::HandleNextItemInQueue(
+    const volatile TraceQueue::QueueEventBlock* event_block) {
+  // Get next item in queue
+  uint32_t trace_token = event_block->trace_token;
+  EventType event_type = event_block->event_type;
+  const char* module = event_block->module;
+  uint32_t trace_id = event_block->trace_id;
+  uint8_t flags = event_block->flags;
+  const std::byte* data_buffer =
+      const_cast<const std::byte*>(event_block->data_buffer);
+  size_t data_size = event_block->data_size;
+
   // Call any event callback which is registered to receive every event.
+  pw_trace_TraceEventReturnFlags ret_flags = 0;
   ret_flags |=
       Callbacks::Instance().CallEventCallbacks(CallbacksImpl::kCallOnEveryEvent,
                                                trace_token,
@@ -53,11 +91,10 @@
                                                flags);
   // Return if disabled.
   if ((PW_TRACE_EVENT_RETURN_FLAGS_SKIP_EVENT & ret_flags) || !enabled_) {
-    PW_TRACE_UNLOCK();
     return;
   }
 
-  // Call any event callback which is registered to receive every event.
+  // Call any event callback not already called.
   ret_flags |= Callbacks::Instance().CallEventCallbacks(
       CallbacksImpl::kCallOnlyWhenEnabled,
       trace_token,
@@ -68,14 +105,13 @@
   // Return if disabled (from a callback) or if a callback has indicated the
   // sample should be skipped.
   if ((PW_TRACE_EVENT_RETURN_FLAGS_SKIP_EVENT & ret_flags) || !enabled_) {
-    PW_TRACE_UNLOCK();
     return;
   }
 
   // Create header to store trace info
   static constexpr size_t kMaxHeaderSize =
-      sizeof(trace_token) + pw::varint::kMaxVarintSizeBytes +  // time
-      pw::varint::kMaxVarintSizeBytes;                         // trace_id
+      sizeof(trace_token) + pw::varint::kMaxVarint64SizeBytes +  // time
+      pw::varint::kMaxVarint64SizeBytes;                         // trace_id
   std::byte header[kMaxHeaderSize];
   memcpy(header, &trace_token, sizeof(trace_token));
   size_t header_size = sizeof(trace_token);
@@ -108,8 +144,6 @@
   if (PW_TRACE_EVENT_RETURN_FLAGS_DISABLE_AFTER_PROCESSING & ret_flags) {
     enabled_ = false;
   }
-
-  PW_TRACE_UNLOCK();
 }
 
 pw_trace_TraceEventReturnFlags CallbacksImpl::CallEventCallbacks(
@@ -284,12 +318,14 @@
                                 pw_trace_SinkEndBlock end_block_func,
                                 void* user_data,
                                 pw_trace_SinkHandle* handle) {
-  return Callbacks::Instance().RegisterSink(
-      start_func, add_bytes_func, end_block_func, user_data, handle);
+  return Callbacks::Instance()
+      .RegisterSink(
+          start_func, add_bytes_func, end_block_func, user_data, handle)
+      .code();
 }
 
 pw_Status pw_trace_UnregisterSink(pw_trace_EventCallbackHandle handle) {
-  return Callbacks::Instance().UnregisterSink(handle);
+  return Callbacks::Instance().UnregisterSink(handle).code();
 }
 
 pw_Status pw_trace_RegisterEventCallback(
@@ -297,19 +333,21 @@
     pw_trace_ShouldCallOnEveryEvent called_on_every_event,
     void* user_data,
     pw_trace_EventCallbackHandle* handle) {
-  return Callbacks::Instance().RegisterEventCallback(
-      callback,
-      static_cast<CallbacksImpl::CallOnEveryEvent>(called_on_every_event),
-      user_data,
-      handle);
+  return Callbacks::Instance()
+      .RegisterEventCallback(
+          callback,
+          static_cast<CallbacksImpl::CallOnEveryEvent>(called_on_every_event),
+          user_data,
+          handle)
+      .code();
 }
 
 pw_Status pw_trace_UnregisterEventCallback(
     pw_trace_EventCallbackHandle handle) {
-  return Callbacks::Instance().UnregisterEventCallback(handle);
+  return Callbacks::Instance().UnregisterEventCallback(handle).code();
 }
 
 PW_EXTERN_C_END
 
 }  // namespace trace
-}  // namespace pw
\ No newline at end of file
+}  // namespace pw
diff --git a/pw_trace_tokenized/trace_buffer_log.cc b/pw_trace_tokenized/trace_buffer_log.cc
new file mode 100644
index 0000000..3d9e5a6
--- /dev/null
+++ b/pw_trace_tokenized/trace_buffer_log.cc
@@ -0,0 +1,85 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+//==============================================================================
+//
+#include "pw_trace_tokenized/trace_buffer_log.h"
+
+#include <span>
+
+#include "pw_base64/base64.h"
+#include "pw_log/log.h"
+#include "pw_string/string_builder.h"
+#include "pw_trace_tokenized/trace_buffer.h"
+
+namespace pw {
+namespace trace {
+namespace {
+
+constexpr int kMaxEntrySize = PW_TRACE_BUFFER_MAX_BLOCK_SIZE_BYTES;
+constexpr int kMaxEntrySizeBase64 = pw::base64::EncodedSize(kMaxEntrySize);
+constexpr int kLineLength = 80;
+
+class ScopedTracePause {
+ public:
+  ScopedTracePause() : was_enabled_(pw_trace_IsEnabled()) {
+    PW_TRACE_SET_ENABLED(false);
+  }
+  ~ScopedTracePause() { PW_TRACE_SET_ENABLED(was_enabled_); }
+
+ private:
+  bool was_enabled_;
+};
+
+}  // namespace
+
+pw::Status DumpTraceBufferToLog() {
+  std::byte line_buffer[kLineLength] = {};
+  std::byte entry_buffer[kMaxEntrySize + 1] = {};
+  char entry_base64_buffer[kMaxEntrySizeBase64] = {};
+  pw::StringBuilder line_builder(line_buffer);
+  ScopedTracePause pause_trace;
+  pw::ring_buffer::PrefixedEntryRingBuffer* trace_buffer =
+      pw::trace::GetBuffer();
+  size_t bytes_read = 0;
+  PW_LOG_INFO("[TRACE] begin");
+  while (trace_buffer->PeekFront(std::span(entry_buffer).subspan(1),
+                                 &bytes_read) != pw::Status::OutOfRange()) {
+    trace_buffer->PopFront();
+    entry_buffer[0] = static_cast<std::byte>(bytes_read);
+    // The entry buffer is formatted as (size, entry) with an extra byte as
+    // a header to the entry. The calcuation of bytes_read + 1 represents
+    // the extra size header.
+    size_t to_write =
+        pw::base64::Encode(std::span(entry_buffer, bytes_read + 1),
+                           std::span(entry_base64_buffer));
+    size_t space_left = line_builder.max_size() - line_builder.size();
+    size_t written = 0;
+    while (to_write - written >= space_left) {
+      line_builder.append(entry_base64_buffer + written, space_left);
+      PW_LOG_INFO("[TRACE] data: %s", line_builder.c_str());
+      line_builder.clear();
+      written += space_left;
+      space_left = line_builder.max_size();
+    }
+    line_builder.append(entry_base64_buffer + written, to_write - written);
+  }
+  if (line_builder.size() > 0) {
+    PW_LOG_INFO("[TRACE] data: %s", line_builder.c_str());
+  }
+  PW_LOG_INFO("[TRACE] end");
+  return pw::Status::Ok();
+}
+
+}  // namespace trace
+}  // namespace pw
diff --git a/pw_trace_tokenized/trace_buffer_log_test.cc b/pw_trace_tokenized/trace_buffer_log_test.cc
new file mode 100644
index 0000000..e2327e2
--- /dev/null
+++ b/pw_trace_tokenized/trace_buffer_log_test.cc
@@ -0,0 +1,43 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#define PW_TRACE_MODULE_NAME "TST"
+
+#include "pw_trace_tokenized/trace_buffer_log.h"
+
+#include "gtest/gtest.h"
+#include "pw_trace/trace.h"
+
+TEST(TokenizedTrace, DumpSmallBuffer) {
+  // TODO(pwbug/266): This test only verifies that the dump function does not
+  // crash, and requires manual inspection to confirm that the log output is
+  // correct. When there is support to mock and verify the calls to pw_log,
+  // these tests should be improved to validate the output.
+  PW_TRACE_SET_ENABLED(true);
+  PW_TRACE_INSTANT("test1");
+  PW_TRACE_INSTANT("test2");
+  pw::trace::DumpTraceBufferToLog();
+}
+
+TEST(TokenizedTrace, DumpLargeBuffer) {
+  // TODO(pwbug/266): This test only verifies that the dump function does not
+  // crash, and requires manual inspection to confirm that the log output is
+  // correct. When there is support to mock and verify the calls to pw_log,
+  // these tests should be improved to validate the output.
+  PW_TRACE_SET_ENABLED(true);
+  for (int i = 0; i < 100; i++) {
+    PW_TRACE_INSTANT("test");
+  }
+  pw::trace::DumpTraceBufferToLog();
+}
diff --git a/pw_trace_tokenized/trace_buffer_test.cc b/pw_trace_tokenized/trace_buffer_test.cc
index 0133c9e..bbe7dc8 100644
--- a/pw_trace_tokenized/trace_buffer_test.cc
+++ b/pw_trace_tokenized/trace_buffer_test.cc
@@ -84,7 +84,7 @@
   std::byte value[expected_max_bytes_used];
   size_t bytes_read = 0;
   EXPECT_EQ(buf->PeekFront(std::span<std::byte>(value), &bytes_read),
-            pw::Status::OK);
+            pw::Status::Ok());
 
   // read size is minus 1, since doesn't include varint size
   EXPECT_GE(bytes_read, expected_min_bytes_used - 1);
@@ -123,8 +123,8 @@
     std::byte value[PW_TRACE_BUFFER_MAX_BLOCK_SIZE_BYTES];
     size_t bytes_read = 0;
     EXPECT_EQ(buf->PeekFront(std::span<std::byte>(value), &bytes_read),
-              pw::Status::OK);
-    EXPECT_EQ(buf->PopFront(), pw::Status::OK);
+              pw::Status::Ok());
+    EXPECT_EQ(buf->PopFront(), pw::Status::Ok());
     EXPECT_EQ(*reinterpret_cast<size_t*>(&value[bytes_read - sizeof(size_t)]),
               expected_count);
     expected_count++;
diff --git a/pw_trace_tokenized/trace_test.cc b/pw_trace_tokenized/trace_test.cc
index fa8f64d..5e6d2a4 100644
--- a/pw_trace_tokenized/trace_test.cc
+++ b/pw_trace_tokenized/trace_test.cc
@@ -537,3 +537,73 @@
                     "i");  // TODO(rgoliver): check data
   EXPECT_TRUE(test_interface.GetEvents().empty());
 }
+
+// Create some helper macros that generated some test trace data based from a
+// number, and can check that it is correct.
+constexpr std::byte kTestData[] = {
+    std::byte{0}, std::byte{1}, std::byte{2}, std::byte{3}, std::byte{4}};
+#define QUEUE_TESTS_ARGS(num)                               \
+  (num), static_cast<pw_trace_EventType>((num) % 10),       \
+      "module_" PW_STRINGIFY(num), (num), (num), kTestData, \
+      (num) % PW_ARRAY_SIZE(kTestData)
+#define QUEUE_CHECK_RESULT(queue_size, result, num)                            \
+  result && ((result->trace_token) == (num)) &&                                \
+      ((result->event_type) == static_cast<pw_trace_EventType>((num) % 10)) && \
+      (strncmp(result->module,                                                 \
+               "module_" PW_STRINGIFY(num),                                    \
+               strlen("module_" PW_STRINGIFY(num))) == 0) &&                   \
+      ((result->trace_id) == (num)) && ((result->flags) == (num)) &&           \
+      (memcmp(const_cast<const pw::trace::internal::TraceQueue<                \
+                  queue_size>::QueueEventBlock*>(result)                       \
+                  ->data_buffer,                                               \
+              kTestData,                                                       \
+              result->data_size) == 0) &&                                      \
+      (result->data_size == (num) % PW_ARRAY_SIZE(kTestData))
+
+TEST(TokenizedTrace, QueueSimple) {
+  constexpr size_t kQueueSize = 5;
+  pw::trace::internal::TraceQueue<kQueueSize> queue;
+  constexpr size_t kTestNum = 1;
+  queue.TryPushBack(QUEUE_TESTS_ARGS(kTestNum));
+  EXPECT_FALSE(queue.IsEmpty());
+  EXPECT_FALSE(queue.IsFull());
+  EXPECT_TRUE(QUEUE_CHECK_RESULT(kQueueSize, queue.PeekFront(), kTestNum));
+  queue.PopFront();
+  EXPECT_TRUE(queue.IsEmpty());
+  EXPECT_TRUE(queue.PeekFront() == nullptr);
+  EXPECT_FALSE(queue.IsFull());
+}
+
+TEST(TokenizedTrace, QueueFull) {
+  constexpr size_t kQueueSize = 5;
+  pw::trace::internal::TraceQueue<kQueueSize> queue;
+  for (size_t i = 0; i < kQueueSize; i++) {
+    EXPECT_EQ(queue.TryPushBack(QUEUE_TESTS_ARGS(i)), pw::Status::OK);
+  }
+  EXPECT_FALSE(queue.IsEmpty());
+  EXPECT_TRUE(queue.IsFull());
+  EXPECT_EQ(queue.TryPushBack(QUEUE_TESTS_ARGS(1)),
+            pw::Status::RESOURCE_EXHAUSTED);
+
+  for (size_t i = 0; i < kQueueSize; i++) {
+    EXPECT_TRUE(QUEUE_CHECK_RESULT(kQueueSize, queue.PeekFront(), i));
+    queue.PopFront();
+  }
+  EXPECT_TRUE(queue.IsEmpty());
+  EXPECT_TRUE(queue.PeekFront() == nullptr);
+  EXPECT_FALSE(queue.IsFull());
+}
+
+TEST(TokenizedTrace, Clear) {
+  constexpr size_t kQueueSize = 5;
+  pw::trace::internal::TraceQueue<kQueueSize> queue;
+  for (size_t i = 0; i < kQueueSize; i++) {
+    EXPECT_EQ(queue.TryPushBack(QUEUE_TESTS_ARGS(i)), pw::Status::OK);
+  }
+  EXPECT_FALSE(queue.IsEmpty());
+  EXPECT_TRUE(queue.IsFull());
+  queue.Clear();
+  EXPECT_TRUE(queue.IsEmpty());
+  EXPECT_TRUE(queue.PeekFront() == nullptr);
+  EXPECT_FALSE(queue.IsFull());
+}
diff --git a/pw_unit_test/BUILD.gn b/pw_unit_test/BUILD.gn
index 5184919..c4494ae 100644
--- a/pw_unit_test/BUILD.gn
+++ b/pw_unit_test/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [
     "public",
@@ -38,7 +38,7 @@
     "public/pw_unit_test/framework.h",
     "public_overrides/gtest/gtest.h",
   ]
-  sources = [ "framework.cc" ] + public
+  sources = [ "framework.cc" ]
 }
 
 # Library providing an event handler which outputs human-readable text.
@@ -48,7 +48,7 @@
     "$dir_pw_preprocessor",
   ]
   public = [ "public/pw_unit_test/simple_printing_event_handler.h" ]
-  sources = [ "simple_printing_event_handler.cc" ] + public
+  sources = [ "simple_printing_event_handler.cc" ]
 }
 
 # Library providing a standard desktop main function for the pw_unit_test
@@ -75,7 +75,7 @@
     "$dir_pw_preprocessor",
   ]
   public = [ "public/pw_unit_test/logging_event_handler.h" ]
-  sources = [ "logging_event_handler.cc" ] + public
+  sources = [ "logging_event_handler.cc" ]
 }
 
 pw_source_set("logging_main") {
diff --git a/pw_unit_test/CMakeLists.txt b/pw_unit_test/CMakeLists.txt
index 3767b4e..983fe30 100644
--- a/pw_unit_test/CMakeLists.txt
+++ b/pw_unit_test/CMakeLists.txt
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_add_module_library(pw_unit_test
   SOURCES
     framework.cc
diff --git a/pw_unit_test/docs.rst b/pw_unit_test/docs.rst
index 7955f6c..5bec124 100644
--- a/pw_unit_test/docs.rst
+++ b/pw_unit_test/docs.rst
@@ -1,7 +1,4 @@
-.. _chapter-pw-unit-test:
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_unit_test:
 
 ------------
 pw_unit_test
diff --git a/pw_unit_test/framework.cc b/pw_unit_test/framework.cc
index 1a27eda..5b7975f 100644
--- a/pw_unit_test/framework.cc
+++ b/pw_unit_test/framework.cc
@@ -16,7 +16,8 @@
 
 #include <cstring>
 
-namespace pw::unit_test {
+namespace pw {
+namespace unit_test {
 
 void RegisterEventHandler(EventHandler* event_handler) {
   internal::Framework::Get().RegisterEventHandler(event_handler);
@@ -121,4 +122,5 @@
 }
 
 }  // namespace internal
-}  // namespace pw::unit_test
+}  // namespace unit_test
+}  // namespace pw
diff --git a/pw_unit_test/public/pw_unit_test/framework.h b/pw_unit_test/public/pw_unit_test/framework.h
index cee6a70..b9db35b 100644
--- a/pw_unit_test/public/pw_unit_test/framework.h
+++ b/pw_unit_test/public/pw_unit_test/framework.h
@@ -36,11 +36,10 @@
   _PW_TEST(test_suite_name, test_name, ::pw::unit_test::Test)
 
 // TEST() is a pretty generic macro name which could conflict with other code.
-// If PW_TEST_DONT_DEFINE_TEST is set, don't alias PW_TEST to TEST.
-// GTEST_DONT_DEFINE_TEST is also accepted for compatibility.
-#if !PW_TEST_DONT_DEFINE_TEST && !GTEST_DONT_DEFINE_TEST
+// If GTEST_DONT_DEFINE_TEST is set, don't alias PW_TEST to TEST.
+#if !(defined(GTEST_DONT_DEFINE_TEST) && GTEST_DONT_DEFINE_TEST)
 #define TEST PW_TEST
-#endif  // !PW_TEST_DONT_DEFINE_TEST && !GTEST_DONT_DEFINE_TEST
+#endif  // !GTEST_DONT_DEFINE_TEST
 
 #define TEST_F(test_fixture, test_name) \
   _PW_TEST(test_fixture, test_name, test_fixture)
@@ -76,18 +75,18 @@
 
 // Define either macro to 1 to omit the definition of FAIL(), which is a
 // generic name and clashes with some other libraries.
-#if !PW_TEST_DONT_DEFINE_FAIL && !GTEST_DONT_DEFINE_FAIL
+#if !(defined(GTEST_DONT_DEFINE_FAIL) && GTEST_DONT_DEFINE_FAIL)
 #define FAIL() GTEST_FAIL()
-#endif  // !PW_TEST_DONT_DEFINE_FAIL && !GTEST_DONT_DEFINE_FAIL
+#endif  // !GTEST_DONT_DEFINE_FAIL
 
 // Generates a success with a generic message.
 #define GTEST_SUCCEED() _PW_TEST_MESSAGE("(success)", "(success)", true)
 
 // Define either macro to 1 to omit the definition of SUCCEED(), which
 // is a generic name and clashes with some other libraries.
-#if !PW_TEST_DONT_DEFINE_SUCCEED && !GTEST_DONT_DEFINE_SUCCEED
+#if !(defined(GTEST_DONT_DEFINE_SUCCEED) && GTEST_DONT_DEFINE_SUCCEED)
 #define SUCCEED() GTEST_SUCCEED()
-#endif  // !PW_TEST_DONT_DEFINE_SUCCEED && !GTEST_DONT_DEFINE_SUCCEED
+#endif  // !GTEST_DONT_DEFINE_SUCCEED
 
 // pw_unit_test framework entry point. Runs every registered test case and
 // dispatches the results through the event handler. Returns a status of zero
@@ -198,6 +197,10 @@
     Framework& framework = Get();
     framework.StartTest(test_info);
 
+    // Reset the memory pool to a marker value to help detect use of
+    // uninitialized memory.
+    std::memset(&framework.memory_pool_, 0xa5, sizeof(framework.memory_pool_));
+
     // Construct the test object within the static memory pool. The StartTest
     // function has already been called by the TestInfo at this point.
     TestInstance* test_instance = new (&framework.memory_pool_) TestInstance;
@@ -422,9 +425,14 @@
   ::pw::unit_test::internal::Framework::Get().ExpectationResult( \
       expected, actual, __LINE__, success)
 
-#define _PW_TEST_OP(expect_or_assert, lhs, rhs, op) \
-  expect_or_assert(                                 \
-      lhs, rhs, [](const auto& l, const auto& r) { return l op r; }, #op)
+#define _PW_TEST_OP(expect_or_assert, lhs, rhs, op)  \
+  expect_or_assert(                                  \
+      lhs,                                           \
+      rhs,                                           \
+      [](const auto& _pw_lhs, const auto& _pw_rhs) { \
+        return _pw_lhs op _pw_rhs;                   \
+      },                                             \
+      #op)
 
 // Implement boolean expectations in a C++11-compatible way.
 #define _PW_EXPECT_BOOL(expr, value)                             \
@@ -443,18 +451,22 @@
     }                                    \
   } while (0)
 
-#define _PW_TEST_STREQ(expect_or_assert, lhs, rhs)                         \
-  expect_or_assert(                                                        \
-      lhs,                                                                 \
-      rhs,                                                                 \
-      [](const auto& l, const auto& r) { return std::strcmp(l, r) == 0; }, \
+#define _PW_TEST_STREQ(expect_or_assert, lhs, rhs)   \
+  expect_or_assert(                                  \
+      lhs,                                           \
+      rhs,                                           \
+      [](const auto& _pw_lhs, const auto& _pw_rhs) { \
+        return std::strcmp(_pw_lhs, _pw_rhs) == 0;   \
+      },                                             \
       "equals")
 
-#define _PW_TEST_STRNE(expect_or_assert, lhs, rhs)                         \
-  expect_or_assert(                                                        \
-      lhs,                                                                 \
-      rhs,                                                                 \
-      [](const auto& l, const auto& r) { return std::strcmp(l, r) != 0; }, \
+#define _PW_TEST_STRNE(expect_or_assert, lhs, rhs)   \
+  expect_or_assert(                                  \
+      lhs,                                           \
+      rhs,                                           \
+      [](const auto& _pw_lhs, const auto& _pw_rhs) { \
+        return std::strcmp(_pw_lhs, _pw_rhs) != 0;   \
+      },                                             \
       "does not equal")
 
 // Alias Test as ::testing::Test for Googletest compatibility.
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_unit_test/py/BUILD.gn
similarity index 69%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_unit_test/py/BUILD.gn
index 3c3be32..8f37417 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_unit_test/py/BUILD.gn
@@ -12,8 +12,15 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_unit_test/__init__.py",
+    "pw_unit_test/test_runner.py",
+  ]
+  python_deps = [ "$dir_pw_cli/py" ]
 }
diff --git a/pw_unit_test/py/pw_unit_test/__init__.py b/pw_unit_test/py/pw_unit_test/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_unit_test/py/pw_unit_test/__init__.py
diff --git a/pw_unit_test/py/pw_unit_test/py.typed b/pw_unit_test/py/pw_unit_test/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_unit_test/py/pw_unit_test/py.typed
diff --git a/pw_unit_test/py/setup.py b/pw_unit_test/py/setup.py
index bfe6b39..ea467d5 100644
--- a/pw_unit_test/py/setup.py
+++ b/pw_unit_test/py/setup.py
@@ -13,7 +13,7 @@
 # the License.
 """pw_unit_test"""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='pw_unit_test',
@@ -22,6 +22,8 @@
     author_email='pigweed-developers@googlegroups.com',
     description='Unit tests for Pigweed projects',
     packages=setuptools.find_packages(),
+    package_data={'pw_unit_test': ['py.typed']},
+    zip_safe=False,
     install_requires=[
         'pw_cli',
     ],
diff --git a/pw_unit_test/test.gni b/pw_unit_test/test.gni
index d7d2417..58ae9aa 100644
--- a/pw_unit_test/test.gni
+++ b/pw_unit_test/test.gni
@@ -12,11 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
-import("$dir_pw_build/python_script.gni")
+import("$dir_pw_build/python_action.gni")
 import("$dir_pw_build/target_types.gni")
+
 declare_args() {
   # Path to a test runner to automatically run unit tests after they are built.
   #
@@ -50,7 +50,7 @@
     # If the target is disabled, create an empty target in its place. Use an
     # action with the original target's sources as inputs to ensure that
     # the source files exist (even if they don't compile).
-    pw_python_script(target_name) {
+    pw_python_action(target_name) {
       script = "$dir_pw_build/py/pw_build/nop.py"
       stamp = true
 
@@ -161,7 +161,7 @@
       }
     }
 
-    pw_python_script(_test_to_run + "_run") {
+    pw_python_action(_test_to_run + "_run") {
       deps = [ ":$_test_target_name" ]
       inputs = [ pw_unit_test_AUTOMATIC_RUNNER ]
       script = "$dir_pw_unit_test/py/pw_unit_test/test_runner.py"
@@ -205,7 +205,7 @@
         _group_deps_metadata += [
           {
             type = "dep"
-            group = get_path_info(dep, "abspath")
+            group = get_label_info(dep, "label_no_toolchain")
           },
         ]
       }
@@ -220,7 +220,7 @@
         self = [
           {
             type = "self"
-            name = get_path_info(":$_group_target", "abspath")
+            name = get_label_info(":$_group_target", "label_no_toolchain")
           },
         ]
 
diff --git a/pw_varint/BUILD.gn b/pw_varint/BUILD.gn
index 5b55b7b..2f06bac 100644
--- a/pw_varint/BUILD.gn
+++ b/pw_varint/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_unit_test/test.gni")
+
 config("default_config") {
   include_dirs = [ "public" ]
 }
diff --git a/pw_varint/CMakeLists.txt b/pw_varint/CMakeLists.txt
index 6e66eb1..84da984 100644
--- a/pw_varint/CMakeLists.txt
+++ b/pw_varint/CMakeLists.txt
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
 pw_auto_add_simple_module(pw_varint
   PUBLIC_DEPS
     pw_preprocessor
diff --git a/pw_varint/docs.rst b/pw_varint/docs.rst
index 49db88e..3327ccd 100644
--- a/pw_varint/docs.rst
+++ b/pw_varint/docs.rst
@@ -1,6 +1,4 @@
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _module-pw_varint:
 
 ---------
 pw_varint
diff --git a/pw_varint/public/pw_varint/varint.h b/pw_varint/public/pw_varint/varint.h
index b9f6e59..a8db949 100644
--- a/pw_varint/public/pw_varint/varint.h
+++ b/pw_varint/public/pw_varint/varint.h
@@ -43,14 +43,14 @@
 #include <span>
 #include <type_traits>
 
-#include "pw_polyfill/language_features.h"
+#include "pw_polyfill/language_feature_macros.h"
 
 namespace pw {
 namespace varint {
 
-// The maximum number of bytes occupied by an encoded varint. The maximum
-// uint64_t occupies 10 bytes when encoded.
-PW_INLINE_VARIABLE constexpr size_t kMaxVarintSizeBytes = 10;
+// The maximum number of bytes occupied by an encoded varint.
+PW_INLINE_VARIABLE constexpr size_t kMaxVarint32SizeBytes = 5;
+PW_INLINE_VARIABLE constexpr size_t kMaxVarint64SizeBytes = 10;
 
 // ZigZag encodes a signed integer. This maps small negative numbers to small,
 // unsigned positive numbers, which improves their density for LEB128 encoding.
@@ -119,7 +119,7 @@
 //     size_t bytes = Decode(data, &value);
 //
 //     if (bytes == 0u) {
-//       return Status::DATA_LOSS;
+//       return Status::DataLoss();
 //     }
 //     results.push_back(value);
 //     data = data.subspan(bytes)
@@ -134,13 +134,13 @@
 }
 
 // Returns a size of an integer when encoded as a varint.
-inline size_t EncodedSize(uint64_t integer) {
-  return pw_VarintEncodedSize(integer);
+constexpr size_t EncodedSize(uint64_t integer) {
+  return integer == 0 ? 1 : (64 - __builtin_clzll(integer) + 6) / 7;
 }
 
 // Returns a size of an signed integer when ZigZag encoded as a varint.
-inline size_t ZigZagEncodedSize(int64_t integer) {
-  return pw_VarintZigZagEncodedSize(integer);
+constexpr size_t ZigZagEncodedSize(int64_t integer) {
+  return EncodedSize(ZigZagEncode(integer));
 }
 
 }  // namespace varint
diff --git a/pw_varint/varint.cc b/pw_varint/varint.cc
index 9b782c0..e2d1eb2 100644
--- a/pw_varint/varint.cc
+++ b/pw_varint/varint.cc
@@ -53,7 +53,7 @@
   const std::byte* buffer = static_cast<const std::byte*>(input);
 
   // The largest 64-bit ints require 10 B.
-  const size_t max_count = std::min(kMaxVarintSizeBytes, input_size);
+  const size_t max_count = std::min(kMaxVarint64SizeBytes, input_size);
 
   while (true) {
     if (count >= max_count) {
@@ -84,11 +84,11 @@
 }
 
 extern "C" size_t pw_VarintEncodedSize(uint64_t integer) {
-  return integer == 0 ? 1 : (64 - __builtin_clzll(integer) + 6) / 7;
+  return EncodedSize(integer);
 }
 
 extern "C" size_t pw_VarintZigZagEncodedSize(int64_t integer) {
-  return pw_VarintEncodedSize(ZigZagEncode(integer));
+  return ZigZagEncodedSize(integer);
 }
 
 }  // namespace varint
diff --git a/pw_watch/BUILD.gn b/pw_watch/BUILD.gn
index 3ba75f4..c0c9988 100644
--- a/pw_watch/BUILD.gn
+++ b/pw_watch/BUILD.gn
@@ -12,10 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_docgen/docs.gni")
+
 pw_doc_group("docs") {
   inputs = [ "doc_resources/pw_watch_on_device_demo.gif" ]
   sources = [ "docs.rst" ]
diff --git a/pw_watch/docs.rst b/pw_watch/docs.rst
index e1967d2..f5f0c6c 100644
--- a/pw_watch/docs.rst
+++ b/pw_watch/docs.rst
@@ -1,8 +1,4 @@
-.. default-domain:: python
-
-.. highlight:: sh
-
-.. _chapter-watch:
+.. _module-pw_watch:
 
 --------
 pw_watch
@@ -18,7 +14,7 @@
 
 .. note::
 
-  ``pw_watch`` currently only works with Pigweed's GN build.
+  ``pw_watch`` currently only works with Pigweed's GN and CMake builds.
 
 Module Usage
 ============
@@ -64,4 +60,4 @@
 affected by a file change are run when ``pw_watch`` triggers a build. By
 default, host builds using ``pw_watch`` will run unit tests. To run unit tests
 on a device as part of ``pw_watch``, refer to your device's
-:ref:`target documentation<chapter-targets>`.
+:ref:`target documentation<docs-targets>`.
diff --git a/pw_protobuf_compiler/nanopb.gni b/pw_watch/py/BUILD.gn
similarity index 68%
copy from pw_protobuf_compiler/nanopb.gni
copy to pw_watch/py/BUILD.gn
index 3c3be32..a8fe59d 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/pw_watch/py/BUILD.gn
@@ -12,8 +12,16 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "pw_watch/__init__.py",
+    "pw_watch/debounce.py",
+    "pw_watch/watch.py",
+    "pw_watch/watch_test.py",
+  ]
 }
diff --git a/pw_watch/py/pw_watch/__init__.py b/pw_watch/py/pw_watch/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_watch/py/pw_watch/__init__.py
diff --git a/pw_watch/py/pw_watch/debounce.py b/pw_watch/py/pw_watch/debounce.py
index dd9005d..73c5e97 100644
--- a/pw_watch/py/pw_watch/debounce.py
+++ b/pw_watch/py/pw_watch/debounce.py
@@ -24,17 +24,17 @@
 class DebouncedFunction(ABC):
     """Function to be run by Debouncer"""
     @abstractmethod
-    def run(self):
+    def run(self) -> None:
         """Run the function"""
 
     @abstractmethod
-    def cancel(self):
+    def cancel(self) -> bool:
         """Cancel an in-progress run of the function.
         Must be called from different thread than run().
         Returns true if run was successfully cancelled, false otherwise"""
 
     @abstractmethod
-    def on_complete(self, cancelled=False):
+    def on_complete(self, cancelled: bool = False) -> bool:
         """Called after run() finishes. If true, cancelled indicates
         cancel() was invoked during the last run()"""
 
diff --git a/pw_watch/py/pw_watch/py.typed b/pw_watch/py/pw_watch/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pw_watch/py/pw_watch/py.typed
diff --git a/pw_watch/py/pw_watch/watch.py b/pw_watch/py/pw_watch/watch.py
index 287661f..7f02980 100755
--- a/pw_watch/py/pw_watch/watch.py
+++ b/pw_watch/py/pw_watch/watch.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
 # Copyright 2020 The Pigweed Authors
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may not
@@ -25,10 +26,9 @@
 import threading
 from typing import List, NamedTuple, Optional, Sequence, Tuple
 
-from watchdog.events import FileSystemEventHandler
-from watchdog.observers import Observer
-from watchdog.utils import has_attribute
-from watchdog.utils import unicode_paths
+from watchdog.events import FileSystemEventHandler  # type: ignore
+from watchdog.observers import Observer  # type: ignore
+from watchdog.utils import has_attribute, unicode_paths  # type: ignore
 
 import pw_cli.branding
 import pw_cli.color
@@ -103,22 +103,23 @@
         self,
         patterns: Sequence[str] = (),
         ignore_patterns: Sequence[str] = (),
-        case_sensitive: bool = False,
         build_commands: Sequence[BuildCommand] = (),
         ignore_dirs=Optional[List[str]],
         charset: WatchCharset = _ASCII_CHARSET,
+        restart: bool = False,
     ):
-        super(PigweedBuildWatcher, self).__init__()
+        super().__init__()
 
         self.patterns = patterns
         self.ignore_patterns = ignore_patterns
-        self.case_sensitive = case_sensitive
         self.build_commands = build_commands
         self.ignore_dirs = ignore_dirs or []
         self.ignore_dirs.extend(cmd.build_dir for cmd in self.build_commands)
-        self.cooldown_finish_time = None
         self.charset: WatchCharset = charset
 
+        self.restart_on_changes = restart
+        self._current_build: Optional[subprocess.Popen] = None
+
         self.debouncer = Debouncer(self)
 
         # Track state of a build. These need to be members instead of locals
@@ -194,7 +195,8 @@
         if self.matching_path is None:
             self.matching_path = matching_path
 
-        self.debouncer.press('File change detected')
+        self.debouncer.press(
+            f'File change detected: {os.path.relpath(matching_path)}')
 
     # Implementation of DebouncedFunction.run()
     #
@@ -216,18 +218,22 @@
         self.builds_succeeded = []
         num_builds = len(self.build_commands)
         _LOG.info('Starting build with %d directories', num_builds)
+
+        env = os.environ.copy()
+        # Force colors in Pigweed subcommands run through the watcher.
+        env['PW_USE_COLOR'] = '1'
+
         for i, cmd in enumerate(self.build_commands, 1):
             _LOG.info('[%d/%d] Starting build: %s', i, num_builds, cmd)
 
             # Run the build. Put a blank before/after for visual separation.
             print()
-            env = os.environ.copy()
-            # Force colors in Pigweed subcommands run through the watcher.
-            env['PW_USE_COLOR'] = '1'
-            result = subprocess.run(['ninja', '-C', *cmd.args()], env=env)
+            self._current_build = subprocess.Popen(
+                ['ninja', '-C', *cmd.args()], env=env)
+            returncode = self._current_build.wait()
             print()
 
-            build_ok = (result.returncode == 0)
+            build_ok = (returncode == 0)
             if build_ok:
                 level = logging.INFO
                 tag = '(OK)'
@@ -240,9 +246,10 @@
 
     # Implementation of DebouncedFunction.cancel()
     def cancel(self):
-        # TODO: Finish implementing this by supporting cancelling the currently
-        # running build. This will require some subprocess shenanigans and
-        # so will leave this for later.
+        if self.restart_on_changes:
+            self._current_build.terminate()
+            return True
+
         return False
 
     # Implementation of DebouncedFunction.run()
@@ -297,6 +304,8 @@
     '*.c',
     '*.cc',
     '*.cpp',
+    '*.cmake',
+    'CMakeLists.txt',
     '*.gn',
     '*.gni',
     '*.go',
@@ -326,7 +335,9 @@
                         nargs='+',
                         help=('directories to ignore during pw watch'),
                         default=[])
-
+    parser.add_argument('--restart',
+                        action='store_true',
+                        help='restart an ongoing build if files change')
     parser.add_argument(
         'build_targets',
         nargs='*',
@@ -441,20 +452,69 @@
     return subdirectories_to_watch
 
 
-def get_exclude_list(exclude_list):
-    # Preset exclude list for pigweed directory.
-    pigweed_exclude_list = [
-        pathlib.Path(os.environ['PW_ROOT'], x)
-        for x in ['.cipd', '.git', 'out', '.python3-env', '.presubmit']
+def gitignore_patterns():
+    """Load patterns in pw_root_dir/.gitignore and return as [str]"""
+    pw_root_dir = pathlib.Path(os.environ['PW_ROOT'])
+
+    # Get top level .gitignore entries
+    gitignore_path = pw_root_dir / pathlib.Path('.gitignore')
+    if gitignore_path.exists():
+        for line in gitignore_path.read_text().splitlines():
+            globname = line.strip()
+            # If line is empty or a comment.
+            if not globname or globname.startswith('#'):
+                continue
+            yield line
+
+
+def get_common_excludes():
+    """Find commonly excluded directories, and return them as a [Path]"""
+    exclude_list = []
+
+    # Preset exclude list for Pigweed's upstream directories.
+    pw_root_dir = pathlib.Path(os.environ['PW_ROOT'])
+    exclude_list.extend([
+        pw_root_dir / ignored_directory for ignored_directory in [
+            '.environment',  # Bootstrap-created CIPD and Python venv.
+            '.presubmit',  # Presubmit-created CIPD and Python venv.
+            '.git',  # Pigweed's git repo.
+            '.mypy_cache',  # Python static analyzer.
+            '.cargo',  # Rust package manager.
+            'out',  # Typical build directory.
+        ]
+    ])
+
+    # Preset exclude for common downstream project structures.
+    #
+    # By convention, Pigweed projects use "out" as a build directory, so if
+    # watch is invoked outside the Pigweed root, also ignore the local out
+    # directory.
+    cur_dir = pathlib.Path.cwd()
+    if cur_dir != pw_root_dir:
+        exclude_list.append(cur_dir / 'out')
+
+    # Check for and warn about legacy directories.
+    legacy_directories = [
+        '.cipd',  # Legacy CIPD location.
+        '.python3-venv',  # Legacy Python venv location.
     ]
-    return exclude_list + pigweed_exclude_list
+    found_legacy = False
+    for legacy_directory in legacy_directories:
+        full_legacy_directory = pw_root_dir / legacy_directory
+        if full_legacy_directory.is_dir():
+            _LOG.warning('Legacy environment directory found: %s',
+                         str(full_legacy_directory))
+            exclude_list.append(full_legacy_directory)
+            found_legacy = True
+    if found_legacy:
+        _LOG.warning('Found legacy environment directory(s); these '
+                     'should be deleted')
+
+    return exclude_list
 
 
-def watch(build_targets=None,
-          build_directory=None,
-          patterns=None,
-          ignore_patterns=None,
-          exclude_list=None):
+def watch(build_targets, build_directory, patterns, ignore_patterns,
+          exclude_list, restart: bool):
     """TODO(keir) docstring"""
 
     _LOG.info('Starting Pigweed build watcher')
@@ -463,16 +523,15 @@
     if os.environ['PW_ROOT'] is None:
         _exit_due_to_pigweed_not_installed()
     path_of_pigweed = pathlib.Path(os.environ['PW_ROOT'])
-    cur_dir = pathlib.Path(os.getcwd())
+    cur_dir = pathlib.Path.cwd()
     if (not (is_subdirectory(path_of_pigweed, cur_dir)
              or path_of_pigweed == cur_dir)):
         _exit_due_to_pigweed_not_installed()
 
     # Preset exclude list for pigweed directory.
-    exclude_list = get_exclude_list(exclude_list)
+    exclude_list += get_common_excludes()
 
-    subdirectories_to_watch \
-        = minimal_watch_directories(cur_dir, exclude_list)
+    subdirectories_to_watch = minimal_watch_directories(cur_dir, exclude_list)
 
     # If no build directory was specified, search the tree for GN build
     # directories and try to build them all. In the future this may cause
@@ -514,22 +573,17 @@
 
     _LOG.debug('Patterns: %s', patterns)
 
-    path_of_directory_to_watch = '.'
-
     # Try to make a short display path for the watched directory that has
     # "$HOME" instead of the full home directory. This is nice for users
     # who have deeply nested home directories.
-    path_to_log = pathlib.Path(path_of_directory_to_watch).resolve()
-    try:
-        path_to_log = path_to_log.relative_to(pathlib.Path.home())
-        path_to_log = f'$HOME/{path_to_log}'
-    except ValueError:
-        # The directory is somewhere other than inside the users home.
-        path_to_log = path_of_directory_to_watch
+    path_to_log = str(pathlib.Path().resolve()).replace(
+        str(pathlib.Path.home()), '$HOME')
 
     # Ignore the user-specified patterns.
     ignore_patterns = (ignore_patterns.split(_WATCH_PATTERN_DELIMITER)
                        if ignore_patterns else [])
+    # Ignore top level pw_root_dir/.gitignore patterns.
+    ignore_patterns += gitignore_patterns()
 
     ignore_dirs = ['.presubmit', '.python3-env']
 
@@ -545,6 +599,7 @@
         build_commands=build_commands,
         ignore_dirs=ignore_dirs,
         charset=charset,
+        restart=restart,
     )
 
     try:
diff --git a/pw_watch/py/setup.py b/pw_watch/py/setup.py
index f099427..7958eab 100644
--- a/pw_watch/py/setup.py
+++ b/pw_watch/py/setup.py
@@ -13,7 +13,7 @@
 # the License.
 """pw_watch"""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='pw_watch',
@@ -22,6 +22,8 @@
     author_email='pigweed-developers@googlegroups.com',
     description='Pigweed automatic builder',
     packages=setuptools.find_packages(),
+    package_data={'pw_watch': ['py.typed']},
+    zip_safe=False,
     install_requires=[
         'watchdog',
     ],
diff --git a/pw_web_ui/BUILD b/pw_web_ui/BUILD
index c50d109..118fba4 100644
--- a/pw_web_ui/BUILD
+++ b/pw_web_ui/BUILD
@@ -12,3 +12,18 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 exports_files(["rollup.config.js"], visibility = ["//:__subpackages__"])
+
+alias(
+    name = "devserver",
+    actual = "//pw_web_ui/src/frontend:devserver",
+)
+
+alias(
+    name = "prodserver",
+    actual = "//pw_web_ui/src/frontend:prodserver",
+)
+
+alias(
+    name = "app_bundle",
+    actual = "//pw_web_ui/src/frontend:app_bundle",
+)
diff --git a/pw_web_ui/BUILD.gn b/pw_web_ui/BUILD.gn
index 601472c..dd021e8 100644
--- a/pw_web_ui/BUILD.gn
+++ b/pw_web_ui/BUILD.gn
@@ -12,10 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_docgen/docs.gni")
+
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
 }
diff --git a/pw_web_ui/docs.rst b/pw_web_ui/docs.rst
index 05ca8c5..18d5f21 100644
--- a/pw_web_ui/docs.rst
+++ b/pw_web_ui/docs.rst
@@ -1,14 +1,11 @@
-.. _chapter-pw-web-ui:
+.. _module-pw_web_ui:
 
-.. default-domain:: **js**
-
------------
+---------
 pw_web_ui
------------
+---------
 
 This module is a set of npm libraries for building web UIs
 for pigweed devices.
 
 Note that this module and its documentation are currently incomplete and
 experimental.
-
diff --git a/pw_web_ui/src/frontend/BUILD b/pw_web_ui/src/frontend/BUILD
index fbd4609..03da0b0 100644
--- a/pw_web_ui/src/frontend/BUILD
+++ b/pw_web_ui/src/frontend/BUILD
@@ -14,7 +14,7 @@
 
 package(default_visibility = ["//visibility:public"])
 
-load("@npm_bazel_typescript//:index.bzl", "ts_library", "ts_devserver")
+load("@npm//@bazel/typescript:index.bzl", "ts_library", "ts_devserver")
 load("//pw_web_ui:web_bundle.bzl", "web_bundle")
 
 ts_library(
@@ -50,3 +50,13 @@
         ":app_bundle",
     ]
 )
+
+ts_devserver(
+    # Bundles and serves the production bundle for testing.
+    # Should NOT be used for serving in production.
+    name = "prodserver",
+    static_files = [
+        "index.html",
+        ":app_bundle",
+    ]
+)
diff --git a/pw_web_ui/src/transport/BUILD b/pw_web_ui/src/transport/BUILD
index 3cb402e..14959a1 100644
--- a/pw_web_ui/src/transport/BUILD
+++ b/pw_web_ui/src/transport/BUILD
@@ -14,10 +14,10 @@
 
 package(default_visibility = ["//visibility:public"])
 
-load("@npm_bazel_typescript//:index.bzl", "ts_library")
-load("@npm_bazel_jasmine//:index.bzl", "jasmine_node_test")
+load("@npm//@bazel/typescript:index.bzl", "ts_library")
+load("@npm//@bazel/jasmine:index.bzl", "jasmine_node_test")
 load("//pw_web_ui:web_bundle.bzl", "web_bundle")
-load("@npm_bazel_karma//:index.bzl", "karma_web_test")
+load("@npm//@bazel/karma:index.bzl", "karma_web_test")
 
 ts_library(
     name = "device_transport_lib",
diff --git a/pw_web_ui/src/transport/web_serial_transport.ts b/pw_web_ui/src/transport/web_serial_transport.ts
index 5150eb7..b45ee8e 100644
--- a/pw_web_ui/src/transport/web_serial_transport.ts
+++ b/pw_web_ui/src/transport/web_serial_transport.ts
@@ -13,24 +13,49 @@
 // the License.
 
 /* eslint-env browser */
-import {BehaviorSubject, Subject} from 'rxjs';
+import {BehaviorSubject, Subject, Subscription, Observable} from 'rxjs';
 import DeviceTransport from './device_transport';
 
-const DEFAULT_SERIAL_OPTIONS: SerialOptions = {
+const DEFAULT_SERIAL_OPTIONS: SerialOptions & {baudRate: number} = {
+  // Some versions of chrome use `baudrate` (linux)
   baudrate: 921600,
+  // Some versions use `baudRate` (chromebook)
+  baudRate: 921600,
   databits: 8,
   parity: 'none',
   stopbits: 1,
 };
 
+interface PortReadConnection {
+  chunks: Observable<Uint8Array>;
+  errors: Observable<Error>;
+}
+
+interface PortConnection extends PortReadConnection {
+  sendChunk: (chunk: Uint8Array) => Promise<void>;
+}
+
+export class DeviceLostError extends Error {
+  message = 'The device has been lost';
+}
+
+export class DeviceLockedError extends Error {
+  message =
+    "The device's port is locked. Try unplugging it" +
+    ' and plugging it back in.';
+}
+
 /**
  * WebSerialTransport sends and receives UInt8Arrays to and
  * from a serial device connected over USB.
  */
 export class WebSerialTransport implements DeviceTransport {
   chunks = new Subject<Uint8Array>();
+  errors = new Subject<Error>();
   connected = new BehaviorSubject<boolean>(false);
-  private writer?: WritableStreamDefaultWriter<Uint8Array>;
+  private portConnections: Map<SerialPort, PortConnection> = new Map();
+  private activePortConnectionConnection: PortConnection | undefined;
+  private rxSubscriptions: Subscription[] = [];
 
   constructor(
     private serial: Serial = navigator.serial,
@@ -43,9 +68,8 @@
    * @param {Uint8Array} chunk The chunk to send
    */
   async sendChunk(chunk: Uint8Array): Promise<void> {
-    if (this.writer !== undefined && this.connected.getValue()) {
-      await this.writer.ready;
-      return this.writer.write(chunk);
+    if (this.activePortConnectionConnection) {
+      return this.activePortConnectionConnection.sendChunk(chunk);
     }
     throw new Error('Device not connected');
   }
@@ -56,41 +80,116 @@
    * be called in response to user interaction.
    */
   async connect(): Promise<void> {
-    let port: SerialPort;
-    try {
-      port = await this.serial.requestPort({filters: this.filters});
-    } catch (e) {
-      // Ignore errors where the user did not select a port.
-      if (!(e instanceof DOMException)) {
-        throw e;
-      }
-      return;
-    }
-
-    await port.open(this.serialOptions);
-    this.writer = port.writable.getWriter();
-
-    this.getChunks(port);
+    const port = await this.serial.requestPort({filters: this.filters});
+    await this.connectPort(port);
   }
 
-  private getChunks(port: SerialPort) {
-    port.readable.pipeTo(
-      new WritableStream({
-        write: chunk => {
+  private disconnect() {
+    for (const subscription of this.rxSubscriptions) {
+      subscription.unsubscribe();
+    }
+    this.rxSubscriptions = [];
+
+    this.activePortConnectionConnection = undefined;
+    this.connected.next(false);
+  }
+
+  /**
+   * Connect to a given SerialPort. This involves no user interaction.
+   * and can be called whenever a port is available.
+   */
+  async connectPort(port: SerialPort): Promise<void> {
+    this.disconnect();
+
+    this.activePortConnectionConnection =
+      this.portConnections.get(port) ?? (await this.conectNewPort(port));
+
+    this.connected.next(true);
+
+    this.rxSubscriptions.push(
+      this.activePortConnectionConnection.chunks.subscribe(
+        chunk => {
           this.chunks.next(chunk);
         },
-        close: () => {
-          port.close();
-          this.writer?.releaseLock();
-          this.connected.next(false);
+        err => {
+          throw new Error(`Chunks observable had an unexpeted error ${err}`);
         },
-        abort: () => {
-          // Reconnect to the port
+        () => {
           this.connected.next(false);
-          this.getChunks(port);
-        },
+          this.portConnections.delete(port);
+          // Don't complete the chunks observable because then it would not
+          // be able to forward any future chunks.
+        }
+      )
+    );
+
+    this.rxSubscriptions.push(
+      this.activePortConnectionConnection.errors.subscribe(error => {
+        this.errors.next(error);
+        if (error instanceof DeviceLostError) {
+          // The device has been lost
+          this.connected.next(false);
+        }
       })
     );
-    this.connected.next(true);
+  }
+
+  private async conectNewPort(port: SerialPort): Promise<PortConnection> {
+    await port.open(this.serialOptions);
+    const writer = port.writable.getWriter();
+
+    async function sendChunk(chunk: Uint8Array) {
+      await writer.ready;
+      await writer.write(chunk);
+    }
+
+    const {chunks, errors} = this.getChunks(port);
+
+    const connection: PortConnection = {sendChunk, chunks, errors};
+    this.portConnections.set(port, connection);
+    return connection;
+  }
+
+  private getChunks(port: SerialPort): PortReadConnection {
+    const chunks = new Subject<Uint8Array>();
+    const errors = new Subject<Error>();
+
+    async function read() {
+      if (!port.readable) {
+        throw new DeviceLostError();
+      }
+      if (port.readable.locked) {
+        throw new DeviceLockedError();
+      }
+      await port.readable.pipeTo(
+        new WritableStream({
+          write: chunk => {
+            chunks.next(chunk);
+          },
+          close: () => {
+            chunks.complete();
+            errors.complete();
+          },
+          abort: () => {
+            // Reconnect to the port.
+            connect();
+          },
+        })
+      );
+    }
+
+    function connect() {
+      read().catch(err => {
+        // Don't error the chunks observable since that stops it from
+        // reading any more packets, and we often want to continue
+        // despite an error. Instead, push errors to the 'errors'
+        // observable.
+        errors.next(err);
+      });
+    }
+
+    connect();
+
+    return {chunks, errors};
   }
 }
diff --git a/pw_web_ui/src/transport/web_serial_transport_test.ts b/pw_web_ui/src/transport/web_serial_transport_test.ts
index a194015..ee57058 100644
--- a/pw_web_ui/src/transport/web_serial_transport_test.ts
+++ b/pw_web_ui/src/transport/web_serial_transport_test.ts
@@ -15,7 +15,7 @@
 /* eslint-env browser, jasmine */
 import {last, take} from 'rxjs/operators';
 import {SerialMock} from './serial_mock';
-import {WebSerialTransport} from './web_serial_transport';
+import {WebSerialTransport, DeviceLockedError} from './web_serial_transport';
 
 describe('WebSerialTransport', () => {
   let serialMock: SerialMock;
@@ -48,13 +48,15 @@
     expect(serialMock.serialPort.writable.locked).toBeTrue();
   });
 
-  it('stops reading when it reaches the final chunk', async () => {
+  it('is disconnected when it reaches the final chunk', async () => {
     const transport = new WebSerialTransport(serialMock as Serial);
     await transport.connect();
-    const closePromise = transport.connected.pipe(take(2), last()).toPromise();
+    const disconnectPromise = transport.connected
+      .pipe(take(2), last())
+      .toPromise();
     serialMock.closeFromDevice();
 
-    expect(await closePromise).toBeFalse();
+    expect(await disconnectPromise).toBeFalse();
   });
 
   it('waits for the writer to be ready', async () => {
@@ -85,4 +87,21 @@
     await transport.sendChunk(data);
     expect(await dataToDevice).toEqual(data);
   });
+
+  it('throws an error on failing to connect', async () => {
+    const connectError = new Error('Example connection error');
+    spyOn(serialMock, 'requestPort').and.throwError(connectError);
+    const transport = new WebSerialTransport(serialMock as Serial);
+    await expectAsync(transport.connect()).toBeRejectedWith(connectError);
+  });
+
+  it("emits connection errors in the 'errors' observable", async () => {
+    const transport = new WebSerialTransport(serialMock as Serial);
+    await transport.connect();
+
+    const reportedErrorPromise = transport.errors.pipe(take(1)).toPromise();
+    serialMock.serialPort.errorFromDevice(new Error());
+
+    expect(await reportedErrorPromise).toEqual(new DeviceLockedError());
+  });
 });
diff --git a/pw_web_ui/types/BUILD b/pw_web_ui/types/BUILD
index 217767f..886610a 100644
--- a/pw_web_ui/types/BUILD
+++ b/pw_web_ui/types/BUILD
@@ -14,7 +14,7 @@
 
 package(default_visibility = ["//visibility:public"])
 
-load("@npm_bazel_typescript//:index.bzl", "ts_library")
+load("@npm//@bazel/typescript:index.bzl", "ts_library")
 
 ts_library(
     name = "serial_lib",
diff --git a/pw_web_ui/types/serial.d.ts b/pw_web_ui/types/serial.d.ts
index 307dde8..70557a2 100644
--- a/pw_web_ui/types/serial.d.ts
+++ b/pw_web_ui/types/serial.d.ts
@@ -1,19 +1,20 @@
 /**
- * Copyright 2019 Google LLC
+ * Copyright 2020 The Pigweed Authors
  *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
  *
  *     https://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
  */
 
+
 /** @see https://wicg.github.io/serial/#paritytype-enum */
 type ParityType = 'none'|'even'|'odd';
 
diff --git a/pw_web_ui/web_bundle.bzl b/pw_web_ui/web_bundle.bzl
index 73c4083..0ac17ea 100644
--- a/pw_web_ui/web_bundle.bzl
+++ b/pw_web_ui/web_bundle.bzl
@@ -12,7 +12,7 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-load("@npm_bazel_rollup//:index.bzl", "rollup_bundle")
+load("@npm//@bazel/rollup:index.bzl", "rollup_bundle")
 
 def web_bundle(name, deps, entry_point):
   rollup_bundle(
diff --git a/pw_protobuf_compiler/nanopb.gni b/targets/arduino/BUILD
similarity index 64%
copy from pw_protobuf_compiler/nanopb.gni
copy to targets/arduino/BUILD
index 3c3be32..458ddff 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/targets/arduino/BUILD
@@ -12,8 +12,22 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_library",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])  # Apache License 2.0
+
+pw_cc_library(
+    name = "pre_init",
+    srcs = [
+        "init.cc",
+    ],
+    deps = [
+        "//pw_preprocessor",
+        "//pw_sys_io_arduino",
+    ],
+)
\ No newline at end of file
diff --git a/targets/arduino/BUILD.gn b/targets/arduino/BUILD.gn
new file mode 100644
index 0000000..146e784
--- /dev/null
+++ b/targets/arduino/BUILD.gn
@@ -0,0 +1,104 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_arduino_build/arduino.gni")
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_toolchain/generate_toolchain.gni")
+pw_doc_group("target_docs") {
+  sources = [ "target_docs.rst" ]
+}
+
+if (dir_pw_third_party_arduino != "") {
+  import("target_toolchains.gni")
+
+  generate_toolchains("target_toolchains") {
+    toolchains = pw_target_toolchain_arduino_list
+  }
+
+  if (current_toolchain != default_toolchain) {
+    config("arduino_build") {
+      # Debug: Print out arduinobuilder.py args
+      # print(string_join(" ", [rebase_path(arduino_builder_script)] + arduino_show_command_args))
+
+      # Run prebuilds
+      # TODO(tonymd) This only needs to be run once but it's happening multiple times.
+      exec_script(arduino_builder_script,
+                  filter_exclude(arduino_run_command_args,
+                                 [ "--save-config" ]) + [ "--run-prebuilds" ],
+                  "string")
+
+      _exclude_flags = [ "-std=gnu++14" ]
+
+      _cflags = exec_script(arduino_builder_script,
+                            arduino_show_command_args + [ "--c-flags" ],
+                            "list lines")
+      cflags = filter_exclude(_cflags, _exclude_flags)
+
+      asmflags = exec_script(arduino_builder_script,
+                             arduino_show_command_args + [ "--s-only-flags" ],
+                             "list lines")
+
+      _cflags_cc =
+          exec_script(arduino_builder_script,
+                      arduino_show_command_args + [ "--cpp-only-flags" ],
+                      "list lines")
+      cflags_cc = filter_exclude(_cflags_cc, _exclude_flags)
+
+      _ldflags = exec_script(arduino_builder_script,
+                             arduino_show_command_args + [ "--ld-flags" ],
+                             "list lines")
+      ldflags =
+          filter_exclude(_ldflags,
+                         [
+                           # Remove arguments ending in 'core.a', for example:
+                           # C:/Users/username/pigweed/out/core.a
+                           "*core.a\b",
+
+                           # Remove .elf output file, for example:
+                           # -o C:/Users/username/pigweed/out/pigweed.elf
+                           "\b-o\b",
+                           "*.elf\b",
+
+                           # Remove the Arduino {object_files} variable
+                           "{object_files}",
+                         ])
+
+      # TODO(tonymd): Determine if libs are needed.
+      #   Teensy4 core recipe uses: '-larm_cortexM7lfsp_math -lm -lstdc++'
+      # libs = exec_script(arduino_builder_script,
+      #     arduino_show_command_args + [ "--ld-lib-names" ],
+      #     "list lines")
+    }
+
+    pw_source_set("pre_init") {
+      sources = [ "init.cc" ]
+      public_deps = [
+        "$dir_pw_sys_io_arduino",
+        "$dir_pw_third_party_arduino:arduino_core_sources",
+      ]
+      deps = [
+        "$dir_pw_arduino_build:arduino_init.facade",
+        "$dir_pw_preprocessor",
+      ]
+    }
+  }
+} else {
+  config("arduino_build") {
+  }
+  group("pre_init") {
+  }
+}
diff --git a/pw_protobuf_compiler/nanopb.gni b/targets/arduino/arduino_executable.gni
similarity index 61%
copy from pw_protobuf_compiler/nanopb.gni
copy to targets/arduino/arduino_executable.gni
index 3c3be32..a14da17 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/targets/arduino/arduino_executable.gni
@@ -12,8 +12,16 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
+import("//build_overrides/pigweed.gni")
+import("$dir_pw_malloc/backend.gni")
+
+# Executable wrapper that includes some baremetal startup code.
+template("arduino_executable") {
+  target("executable", target_name) {
+    forward_variables_from(invoker, "*")
+    if (!defined(deps)) {
+      deps = []
+    }
+    deps += [ "$dir_pw_arduino_build:arduino_main_wrapper" ]
+  }
 }
diff --git a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c b/targets/arduino/init.cc
similarity index 65%
copy from pw_sys_io_baremetal_lm3s6965evb/early_boot.c
copy to targets/arduino/init.cc
index 1670b7d..8ea12c9 100644
--- a/pw_sys_io_baremetal_lm3s6965evb/early_boot.c
+++ b/targets/arduino/init.cc
@@ -12,6 +12,11 @@
 // License for the specific language governing permissions and limitations under
 // the License.
 
-#include "pw_boot_armv7m/boot.h"
+#include "pw_arduino_build/init.h"
 
-void pw_PreStaticConstructorInit() {}
\ No newline at end of file
+#include "pw_sys_io_arduino/init.h"
+
+// Arduino target specific init. For Pigweed, this calls pw_sys_io's init. User
+// projects may chose to provide something different if they need more pre-main
+// init functionality.
+extern "C" void pw_arduino_Init() { pw_sys_io_Init(); }
diff --git a/targets/arduino/target_docs.rst b/targets/arduino/target_docs.rst
new file mode 100644
index 0000000..5db55e8
--- /dev/null
+++ b/targets/arduino/target_docs.rst
@@ -0,0 +1,259 @@
+.. _target-arduino:
+
+-------
+Arduino
+-------
+
+This target supports building Pigweed on a few Arduino cores.
+
+.. seealso::
+   There are a few caveats when running Pigweed on top of the Arduino API. See
+   :ref:`module-pw_arduino_build` for details.
+
+Supported Boards
+================
+
+Currently only Teensy 4.x and 3.x boards are supported.
+
++------------------------------------------------------------------+-------------------------------------------------------------------+-----------+----------+-------------+
+| Core                                                             | Board Name                                                        | Compiling | Flashing | Test Runner |
++==================================================================+===================================================================+===========+==========+=============+
+| `teensy <https://www.pjrc.com/teensy/td_download.html>`_         | `Teensy 4.1 <https://www.pjrc.com/store/teensy41.html>`_          | ✓         | ✓        | ✓           |
++------------------------------------------------------------------+-------------------------------------------------------------------+-----------+----------+-------------+
+| `teensy <https://www.pjrc.com/teensy/td_download.html>`_         | `Teensy 4.0 <https://www.pjrc.com/store/teensy40.html>`_          | ✓         | ✓        | ✓           |
++------------------------------------------------------------------+-------------------------------------------------------------------+-----------+----------+-------------+
+| `teensy <https://www.pjrc.com/teensy/td_download.html>`_         | `Teensy 3.6 <https://www.pjrc.com/store/teensy36.html>`_          | ✓         | ✓        | ✓           |
++------------------------------------------------------------------+-------------------------------------------------------------------+-----------+----------+-------------+
+| `teensy <https://www.pjrc.com/teensy/td_download.html>`_         | `Teensy 3.5 <https://www.pjrc.com/store/teensy35.html>`_          | ✓         | ✓        | ✓           |
++------------------------------------------------------------------+-------------------------------------------------------------------+-----------+----------+-------------+
+| `teensy <https://www.pjrc.com/teensy/td_download.html>`_         | `Teensy 3.2 <https://www.pjrc.com/store/teensy32.html>`_          | ✓         | ✓        | ✓           |
++------------------------------------------------------------------+-------------------------------------------------------------------+-----------+----------+-------------+
+| `arduino-samd <https://github.com/arduino/ArduinoCore-samd>`_    | `Arduino Zero <https://store.arduino.cc/usa/arduino-zero>`_       |           |          |             |
++------------------------------------------------------------------+-------------------------------------------------------------------+-----------+----------+-------------+
+| `arduino-sam <https://github.com/arduino/ArduinoCore-sam>`_      | `Arduino Due <https://store.arduino.cc/usa/due>`_                 |           |          |             |
++------------------------------------------------------------------+-------------------------------------------------------------------+-----------+----------+-------------+
+| `adafruit-samd <https://github.com/adafruit/ArduinoCore-samd>`_  | `Adafruit Feather M0 <https://www.adafruit.com/?q=feather+m0>`_   |           |          |             |
++------------------------------------------------------------------+-------------------------------------------------------------------+-----------+----------+-------------+
+| `adafruit-samd <https://github.com/adafruit/ArduinoCore-samd>`_  | `Adafruit SAMD51 Boards <https://www.adafruit.com/category/952>`_ |           |          |             |
++------------------------------------------------------------------+-------------------------------------------------------------------+-----------+----------+-------------+
+| `stm32duino <https://github.com/stm32duino/Arduino_Core_STM32>`_ |                                                                   |           |          |             |
++------------------------------------------------------------------+-------------------------------------------------------------------+-----------+----------+-------------+
+
+Setup
+=====
+
+You must first install an Arduino core or let Pigweed know where you have cores
+installed using the ``dir_pw_third_party_arduino`` and ``arduino_package_path``
+build arguments.
+
+Installing Arduino Cores
+------------------------
+
+The ``arduino_builder`` utility can install Arduino cores automatically. It's
+recommended to install them to into ``third_party/arduino/cores/``.
+
+.. code:: sh
+
+  # Setup pigweed environment.
+  source activate.sh
+  # Install an arduino core
+  arduino_builder install-core --prefix ./third_party/arduino/cores/ --core-name teensy
+
+Building
+========
+To build for this Pigweed target, simply build the top-level "arduino" Ninja
+target. You can set Arduino build options using ``gn args out`` or by running:
+
+.. code:: sh
+
+  gn gen out --args='dir_pw_third_party_arduino="//third_party/arduino"
+                     arduino_core_name="teensy"
+                     arduino_board="teensy40"
+                     arduino_menu_options=["menu.usb.serial", "menu.keys.en-us"]'
+
+On a Windows machine it's easier to run:
+
+.. code:: sh
+
+  gn args out
+
+That will open a text file where you can paste the args in:
+
+.. code:: text
+
+  dir_pw_third_party_arduino="//third_party/arduino"
+  arduino_core_name="teensy"
+  arduino_board="teensy40"
+  arduino_menu_options=["menu.usb.serial", "menu.keys.en-us"]
+
+Save the file and close the text editor.
+
+Then build with:
+
+.. code:: sh
+
+  ninja -C out arduino
+
+To see supported boards and Arduino menu options for a given core:
+
+.. code:: sh
+
+  arduino_builder --arduino-package-path ./third_party/arduino/cores/teensy \
+                  --arduino-package-name teensy/avr \
+                  list-boards
+
+.. code:: text
+
+  Board Name  Description
+  teensy41    Teensy 4.1
+  teensy40    Teensy 4.0
+  teensy36    Teensy 3.6
+  teensy35    Teensy 3.5
+  teensy31    Teensy 3.2 / 3.1
+
+You may wish to set different arduino build options in
+``arduino_menu_options``. Run this to see what's available for your core:
+
+.. code:: sh
+
+  arduino_builder --arduino-package-path ./third_party/arduino/cores/teensy \
+                  --arduino-package-name teensy/avr \
+                  list-menu-options --board teensy40
+
+That will show all menu options that can be added to ``gn args out``.
+
+.. code:: text
+
+  All Options
+  ----------------------------------------------------------------
+  menu.usb.serial             Serial
+  menu.usb.serial2            Dual Serial
+  menu.usb.serial3            Triple Serial
+  menu.usb.keyboard           Keyboard
+  menu.usb.touch              Keyboard + Touch Screen
+  menu.usb.hidtouch           Keyboard + Mouse + Touch Screen
+  menu.usb.hid                Keyboard + Mouse + Joystick
+  menu.usb.serialhid          Serial + Keyboard + Mouse + Joystick
+  menu.usb.midi               MIDI
+  ...
+
+  Default Options
+  --------------------------------------
+  menu.usb.serial             Serial
+  menu.speed.600              600 MHz
+  menu.opt.o2std              Faster
+  menu.keys.en-us             US English
+
+Testing
+=======
+When working in upstream Pigweed, building this target will build all Pigweed
+modules' unit tests.  These tests can be run on-device in a few different ways.
+
+Run a unit test
+---------------
+If using ``out`` as a build directory, tests will be located in
+``out/arduino_debug/obj/[module name]/[test_name].elf``.
+
+Tests can be flashed and run using the `arduino_unit_test_runner` tool. Here is
+a sample bash script to run all tests on a Linux machine.
+
+.. code:: sh
+
+  #!/bin/bash
+  gn gen out --export-compile-commands \
+      --args='dir_pw_third_party_arduino="//third_party/arduino"
+              arduino_core_name="teensy"
+              arduino_board="teensy40"
+              arduino_menu_options=["menu.usb.serial", "menu.keys.en-us"]' && \
+    ninja -C out arduino
+
+  for f in $(find out/arduino_debug/obj/ -iname "*.elf"); do
+      arduino_unit_test_runner --verbose \
+          --config-file ./out/arduino_debug/gen/arduino_builder_config.json \
+          --upload-tool teensyloader \
+          out/arduino_debug/obj/pw_string/test/format_test.elf
+  done
+
+Using the test server
+---------------------
+
+Tests may also be run using the `pw_arduino_use_test_server = true` GN arg.
+The server must be run with an `arduino_builder` config file so it can locate
+the correct Arduino core, compiler path, and Arduino board used.
+
+.. code:: sh
+
+  arduino_test_server --verbose \
+      --config-file ./out/arduino_debug/gen/arduino_builder_config.json
+
+.. TODO(tonymd): Flesh out this section similar to the stm32f429i target docs.
+
+Flashing Known Issues
+---------------------
+
+Teensy Boards
+^^^^^^^^^^^^^
+
+By default Teensyduino uses the `Teensy Loader Application
+<https://www.pjrc.com/teensy/loader.html>`_ which has a couple limitations:
+
+- Requires a GUI (or X11 on Linux).
+- Can only flash one board at a time.
+
+GN Target Example
+=================
+
+Here is an example `pw_executable` gn rule that includes some Teensyduino
+libraries.
+
+.. code:: text
+
+  import("//build_overrides/pigweed.gni")
+  import("$dir_pw_arduino_build/arduino.gni")
+  import("$dir_pw_build/target_types.gni")
+
+  _library_args = [
+    "--library-path",
+    rebase_path(
+        "$dir_pw_third_party_arduino/cores/teensy/hardware/teensy/avr/libraries"
+    ),
+    "--library-names",
+    "Time",
+    "Wire",
+  ]
+
+  pw_executable("my_app") {
+    # All Library Sources
+    _library_c_files = exec_script(
+            arduino_builder_script,
+            arduino_show_command_args + _library_args + [
+              "--library-c-files"
+            ],
+            "list lines")
+    _library_cpp_files = exec_script(
+            arduino_builder_script,
+            arduino_show_command_args + _library_args + [
+              "--library-cpp-files"
+            ],
+            "list lines")
+
+    sources = [ "main.cc" ] + _library_c_files + _library_cpp_files
+
+    deps = [
+      "$dir_pw_hex_dump",
+      "$dir_pw_log",
+      "$dir_pw_string",
+    ]
+
+    include_dirs = exec_script(arduino_builder_script,
+                               arduino_show_command_args + _library_args +
+                                   [ "--library-include-dirs" ],
+                               "list lines")
+
+    # Required if using Arduino.h and any Arduino API functions
+    if (dir_pw_third_party_arduino != "") {
+      remove_configs = [ "$dir_pw_build:strict_warnings" ]
+      deps += [ "$dir_pw_third_party_arduino:arduino_core_sources" ]
+    }
+  }
+
diff --git a/targets/arduino/target_toolchains.gni b/targets/arduino/target_toolchains.gni
new file mode 100644
index 0000000..e06725b
--- /dev/null
+++ b/targets/arduino/target_toolchains.gni
@@ -0,0 +1,115 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_toolchain/arm_gcc/toolchains.gni")
+
+declare_args() {
+  # Enable the pw_target_runner for on-device testing.
+  pw_arduino_use_test_server = false
+}
+_target_config = {
+  # Use the logging main.
+  pw_unit_test_MAIN = "$dir_pw_unit_test:simple_printing_main"
+
+  # Configuration options for Pigweed executable targets.
+  pw_build_EXECUTABLE_TARGET_TYPE = "arduino_executable"
+
+  pw_build_EXECUTABLE_TARGET_TYPE_FILE =
+      get_path_info("arduino_executable.gni", "abspath")
+
+  # Path to the bloaty config file for the output binaries.
+  pw_bloat_BLOATY_CONFIG = "$dir_pw_boot_armv7m/bloaty_config.bloaty"
+
+  if (pw_arduino_use_test_server) {
+    _test_runner_script =
+        "$dir_pw_arduino_build/py/pw_arduino_build/unit_test_client.py"
+    pw_unit_test_AUTOMATIC_RUNNER =
+        get_path_info(_test_runner_script, "abspath")
+  }
+
+  # Facade backends
+  pw_assert_BACKEND = dir_pw_assert_basic
+  pw_log_BACKEND = dir_pw_log_basic
+  pw_sys_io_BACKEND = dir_pw_sys_io_arduino
+  pw_arduino_build_INIT_BACKEND = "$dir_pigweed/targets/arduino:pre_init"
+
+  current_cpu = "arm"
+  current_os = ""
+}
+
+_toolchain_properties = {
+  final_binary_extension = ".elf"
+}
+
+_target_default_configs = [
+  "$dir_pw_toolchain/arm_gcc:enable_float_printf",
+  "$dir_pigweed/targets/arduino:arduino_build",
+]
+
+pw_target_toolchain_arduino = {
+  _excluded_members = [
+    "defaults",
+    "name",
+  ]
+
+  debug = {
+    name = "arduino_debug"
+    _toolchain_base = pw_toolchain_arm_gcc.cortex_m4f_debug
+    forward_variables_from(_toolchain_base, "*", _excluded_members)
+    forward_variables_from(_toolchain_properties, "*")
+    defaults = {
+      forward_variables_from(_toolchain_base.defaults, "*")
+      forward_variables_from(_target_config, "*")
+      default_configs = []
+      default_configs = _target_default_configs
+    }
+  }
+
+  speed_optimized = {
+    name = "arduino_speed_optimized"
+    _toolchain_base = pw_toolchain_arm_gcc.cortex_m4f_speed_optimized
+    forward_variables_from(_toolchain_base, "*", _excluded_members)
+    forward_variables_from(_toolchain_properties, "*")
+    defaults = {
+      forward_variables_from(_toolchain_base.defaults, "*")
+      forward_variables_from(_target_config, "*")
+      default_configs = []
+      default_configs = _target_default_configs
+    }
+  }
+
+  size_optimized = {
+    name = "arduino_size_optimized"
+    _toolchain_base = pw_toolchain_arm_gcc.cortex_m4f_size_optimized
+    forward_variables_from(_toolchain_base, "*", _excluded_members)
+    forward_variables_from(_toolchain_properties, "*")
+    defaults = {
+      forward_variables_from(_toolchain_base.defaults, "*")
+      forward_variables_from(_target_config, "*")
+      default_configs = []
+      default_configs = _target_default_configs
+    }
+  }
+}
+
+# This list just contains the members of the above scope for convenience to make
+# it trivial to generate all the toolchains in this file via a
+# `generate_toolchains` target.
+pw_target_toolchain_arduino_list = [
+  pw_target_toolchain_arduino.debug,
+  pw_target_toolchain_arduino.speed_optimized,
+  pw_target_toolchain_arduino.size_optimized,
+]
diff --git a/targets/docs/BUILD.gn b/targets/docs/BUILD.gn
index 72b68fe..73b1606 100644
--- a/targets/docs/BUILD.gn
+++ b/targets/docs/BUILD.gn
@@ -12,51 +12,30 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
+import("$dir_pigweed/targets/stm32f429i-disc1/target_toolchains.gni")
 import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_protobuf_compiler/proto.gni")
+import("$dir_pw_third_party/nanopb/nanopb.gni")
 import("$dir_pw_toolchain/arm_gcc/toolchains.gni")
 import("$dir_pw_toolchain/generate_toolchain.gni")
 
 # Toolchain for generating upstream Pigweed documentation.
 generate_toolchain("docs") {
-  # Use the Cortex M4 toolchain for regular pw_size_report targets.
-  forward_variables_from(pw_toolchain_arm_gcc.cortex_m4_size_optimized,
+  # Use the stm32f429i-disc1 toolchain for pw_size_report targets.
+  _base_toolchain = pw_target_toolchain_stm32f429i_disc1.size_optimized
+  forward_variables_from(_base_toolchain,
                          "*",
                          [
                            "defaults",
                            "name",
                          ])
   defaults = {
-    _base_toolchain = pw_toolchain_arm_gcc.cortex_m4_size_optimized
     forward_variables_from(_base_toolchain.defaults, "*")
 
     # This is the docs target.
     pw_docgen_BUILD_DOCS = true
-
-    _arm_bloaty_config = "$dir_pw_boot_armv7m/bloaty_config.bloaty"
-
-    pw_bloat_BLOATY_CONFIG = _arm_bloaty_config
-
-    # Toolchains to compare in documentation size reports.
-    pw_bloat_TOOLCHAINS = [
-      {
-        name = "arm-none-eabi-gcc -Og -mcpu=cortex-m4"
-        target = "$dir_pw_toolchain:arm_gcc_cortex_m4_debug"
-        bloaty_config = _arm_bloaty_config
-      },
-      {
-        name = "arm-none-eabi-gcc -Os -mcpu=cortex-m4"
-        target = "$dir_pw_toolchain:arm_gcc_cortex_m4_size_optimized"
-        bloaty_config = _arm_bloaty_config
-      },
-      {
-        name = "arm-none-eabi-gcc -O2 -mcpu=cortex-m4"
-        target = "$dir_pw_toolchain:arm_gcc_cortex_m4_speed_optimized"
-        bloaty_config = _arm_bloaty_config
-      },
-    ]
   }
 }
 
diff --git a/targets/docs/target_docs.rst b/targets/docs/target_docs.rst
index 4c7601a..37db6c2 100644
--- a/targets/docs/target_docs.rst
+++ b/targets/docs/target_docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-docs:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _target-docs:
 
 ----
 docs
diff --git a/targets/host/BUILD.gn b/targets/host/BUILD.gn
index a29795a..801ef9f 100644
--- a/targets/host/BUILD.gn
+++ b/targets/host/BUILD.gn
@@ -12,12 +12,12 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_toolchain/generate_toolchain.gni")
 import("target_toolchains.gni")
+
 generate_toolchains("host_toolchains") {
   toolchains = pw_target_toolchain_host_list
 }
diff --git a/targets/host/target_docs.rst b/targets/host/target_docs.rst
index bf83926..de087a1 100644
--- a/targets/host/target_docs.rst
+++ b/targets/host/target_docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-host:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _target-host:
 
 ----
 host
diff --git a/targets/host/target_toolchains.gni b/targets/host/target_toolchains.gni
index 98b094f..51be296 100644
--- a/targets/host/target_toolchains.gni
+++ b/targets/host/target_toolchains.gni
@@ -12,13 +12,13 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
-import("$dir_pw_protobuf_compiler/nanopb.gni")
 import("$dir_pw_protobuf_compiler/proto.gni")
+import("$dir_pw_third_party/nanopb/nanopb.gni")
 import("$dir_pw_toolchain/host_clang/toolchains.gni")
 import("$dir_pw_toolchain/host_gcc/toolchains.gni")
+
 _host_common = {
   # Use logging-based test output on host.
   pw_unit_test_MAIN = "$dir_pw_unit_test:logging_main"
@@ -38,12 +38,9 @@
   # Tokenizer trace time.
   pw_trace_tokenizer_time = "$dir_pw_trace_tokenized:host_trace_time"
 
-  # Allow nanopb to be toggled via a build arg on host for easy testing.
-  _has_nanopb = pw_protobuf_GENERATORS + [ "nanopb" ] - [ "nanopb" ] !=
-                pw_protobuf_GENERATORS
-  if (dir_pw_third_party_nanopb != "" && !_has_nanopb) {
-    pw_protobuf_GENERATORS += [ "nanopb" ]
-  }
+  # Specify builtin GN variables.
+  current_os = host_os
+  current_cpu = host_cpu
 }
 
 # Linux-specific target configuration.
@@ -76,6 +73,8 @@
   }
 }
 
+_target_default_configs = [ "$dir_pw_build:extra_strict_warnings" ]
+
 pw_target_toolchain_host = {
   _excluded_members = [
     "defaults",
@@ -90,6 +89,7 @@
       forward_variables_from(_toolchain_base.defaults, "*")
       forward_variables_from(_host_common, "*")
       forward_variables_from(_os_specific_config, "*")
+      default_configs += _target_default_configs
     }
   }
 
@@ -101,6 +101,7 @@
       forward_variables_from(_toolchain_base.defaults, "*")
       forward_variables_from(_host_common, "*")
       forward_variables_from(_os_specific_config, "*")
+      default_configs += _target_default_configs
     }
   }
 
@@ -112,6 +113,7 @@
       forward_variables_from(_toolchain_base.defaults, "*")
       forward_variables_from(_host_common, "*")
       forward_variables_from(_os_specific_config, "*")
+      default_configs += _target_default_configs
     }
   }
 
@@ -123,6 +125,7 @@
       forward_variables_from(_toolchain_base.defaults, "*")
       forward_variables_from(_host_common, "*")
       forward_variables_from(_os_specific_config, "*")
+      default_configs += _target_default_configs
     }
   }
 
@@ -134,6 +137,7 @@
       forward_variables_from(_toolchain_base.defaults, "*")
       forward_variables_from(_host_common, "*")
       forward_variables_from(_os_specific_config, "*")
+      default_configs += _target_default_configs
     }
   }
 
@@ -145,6 +149,7 @@
       forward_variables_from(_toolchain_base.defaults, "*")
       forward_variables_from(_host_common, "*")
       forward_variables_from(_os_specific_config, "*")
+      default_configs += _target_default_configs
     }
   }
 }
diff --git a/targets/lm3s6965evb-qemu/BUILD b/targets/lm3s6965evb-qemu/BUILD
new file mode 100644
index 0000000..307d555
--- /dev/null
+++ b/targets/lm3s6965evb-qemu/BUILD
@@ -0,0 +1,35 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_library",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])  # Apache License 2.0
+
+pw_cc_library(
+    name = "pre_init",
+    srcs = [
+        "boot.cc",
+        "vector_table.cc"
+    ],
+    deps = [
+        "//pw_boot_armv7m",
+        "//pw_preprocessor",
+        "//pw_sys_io_baremetal_lm3s6965evb",
+    ],
+)
diff --git a/targets/lm3s6965evb-qemu/BUILD.gn b/targets/lm3s6965evb-qemu/BUILD.gn
index a326df0..beb52f3 100644
--- a/targets/lm3s6965evb-qemu/BUILD.gn
+++ b/targets/lm3s6965evb-qemu/BUILD.gn
@@ -12,16 +12,31 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
+import("$dir_pw_build/target_types.gni")
 import("$dir_pw_docgen/docs.gni")
 import("$dir_pw_toolchain/generate_toolchain.gni")
 import("target_toolchains.gni")
+
 generate_toolchains("target_toolchains") {
   toolchains = pw_target_toolchain_lm3s6965evb_qemu_list
 }
 
+if (current_toolchain != default_toolchain) {
+  pw_source_set("pre_init") {
+    public_deps = [
+      "$dir_pw_boot_armv7m",
+      "$dir_pw_sys_io_baremetal_lm3s6965evb",
+    ]
+    deps = [ "$dir_pw_preprocessor" ]
+    sources = [
+      "boot.cc",
+      "vector_table.cc",
+    ]
+  }
+}
+
 pw_doc_group("target_docs") {
   sources = [ "target_docs.rst" ]
 }
diff --git a/targets/lm3s6965evb-qemu/boot.cc b/targets/lm3s6965evb-qemu/boot.cc
new file mode 100644
index 0000000..f4a537c
--- /dev/null
+++ b/targets/lm3s6965evb-qemu/boot.cc
@@ -0,0 +1,48 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_boot_armv7m/boot.h"
+
+#include "pw_preprocessor/compiler.h"
+#include "pw_sys_io_baremetal_lm3s6965evb/init.h"
+
+// Note that constexpr is used inside of this function instead of using a static
+// constexpr or declaring it outside of this function in an anonymous namespace,
+// because constexpr makes it available for the compiler to evaluate during
+// copmile time but does NOT require it to be evaluated at compile team and we
+// have to be incredibly careful that this does not end up in the .data section.
+void pw_boot_PreStaticMemoryInit() {
+  // Force RCC to be at default at boot.
+  constexpr uint32_t kRccDefault = 0x078E3AD1U;
+  volatile uint32_t& rcc = *reinterpret_cast<volatile uint32_t*>(0x400FE070U);
+  rcc = kRccDefault;
+  constexpr uint32_t kRcc2Default = 0x07802810U;
+  volatile uint32_t& rcc2 = *reinterpret_cast<volatile uint32_t*>(0x400FE070U);
+  rcc2 = kRcc2Default;
+}
+
+void pw_boot_PreStaticConstructorInit() {}
+
+void pw_boot_PreMainInit() { pw_sys_io_Init(); }
+
+PW_NO_RETURN void pw_boot_PostMain() {
+  // QEMU requires a special command to tell the VM to shut down.
+  volatile uint32_t* aircr = (uint32_t*)(0xE000ED0CU);
+  *aircr = 0x5fa0004;
+
+  // In case main() returns, just sit here until the device is reset.
+  while (true) {
+  }
+  PW_UNREACHABLE;
+}
diff --git a/targets/lm3s6965evb-qemu/lm3s6965evb_executable.gni b/targets/lm3s6965evb-qemu/lm3s6965evb_executable.gni
index 7576fce..70a87cb 100644
--- a/targets/lm3s6965evb-qemu/lm3s6965evb_executable.gni
+++ b/targets/lm3s6965evb-qemu/lm3s6965evb_executable.gni
@@ -12,7 +12,6 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 # Executable wrapper that includes some baremetal startup code.
@@ -22,6 +21,6 @@
     if (!defined(deps)) {
       deps = []
     }
-    deps += [ dir_pw_sys_io_baremetal_lm3s6965evb ]
+    deps += [ "$dir_pigweed/targets/lm3s6965evb-qemu:pre_init" ]
   }
 }
diff --git a/pw_protobuf_compiler/nanopb.gni b/targets/lm3s6965evb-qemu/py/BUILD.gn
similarity index 70%
copy from pw_protobuf_compiler/nanopb.gni
copy to targets/lm3s6965evb-qemu/py/BUILD.gn
index 3c3be32..8962902 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/targets/lm3s6965evb-qemu/py/BUILD.gn
@@ -12,8 +12,14 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/python.gni")
+
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "lm3s6965evb_qemu_utils/__init__.py",
+    "lm3s6965evb_qemu_utils/unit_test_runner.py",
+  ]
 }
diff --git a/targets/lm3s6965evb-qemu/py/lm3s6965evb_qemu_utils/py.typed b/targets/lm3s6965evb-qemu/py/lm3s6965evb_qemu_utils/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/targets/lm3s6965evb-qemu/py/lm3s6965evb_qemu_utils/py.typed
diff --git a/targets/lm3s6965evb-qemu/py/setup.py b/targets/lm3s6965evb-qemu/py/setup.py
index a60dc3f..4223685 100644
--- a/targets/lm3s6965evb-qemu/py/setup.py
+++ b/targets/lm3s6965evb-qemu/py/setup.py
@@ -13,7 +13,7 @@
 # the License.
 """lm3s6965evb_qemu_utils"""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='lm3s6965evb_qemu_utils',
@@ -23,6 +23,8 @@
     description=
     'Target-specific python scripts for the lm3s6965evb-qemu target',
     packages=setuptools.find_packages(),
+    package_data={'lm3s6965evb_qemu_utils': ['py.typed']},
+    zip_safe=False,
     entry_points={
         'console_scripts': [
             'lm3s6965evb_qemu_unit_test_runner = '
diff --git a/targets/lm3s6965evb-qemu/target_docs.rst b/targets/lm3s6965evb-qemu/target_docs.rst
index 0b95d89..6e8c12f 100644
--- a/targets/lm3s6965evb-qemu/target_docs.rst
+++ b/targets/lm3s6965evb-qemu/target_docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-lm3s6965evb-qemu:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _target-lm3s6965evb-qemu:
 
 ----------------
 lm3s6965evb-qemu
diff --git a/targets/lm3s6965evb-qemu/target_toolchains.gni b/targets/lm3s6965evb-qemu/target_toolchains.gni
index 69e5528..25d7151 100644
--- a/targets/lm3s6965evb-qemu/target_toolchains.gni
+++ b/targets/lm3s6965evb-qemu/target_toolchains.gni
@@ -12,10 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_toolchain/arm_gcc/toolchains.gni")
+
 _test_runner_script = "py/lm3s6965evb_qemu_utils/unit_test_runner.py"
 
 _target_config = {
@@ -33,9 +33,6 @@
 
   pw_unit_test_AUTOMATIC_RUNNER = get_path_info(_test_runner_script, "abspath")
 
-  # Tell QEMU to shut down after running a binary.
-  pw_boot_armv7m_QEMU_SHUTDOWN = true
-
   # Facade backends
   pw_assert_BACKEND = dir_pw_assert_basic
   pw_boot_BACKEND = dir_pw_boot_armv7m
@@ -58,9 +55,15 @@
     "PW_BOOT_VECTOR_TABLE_BEGIN=0x00000000",
     "PW_BOOT_VECTOR_TABLE_SIZE=512",
   ]
+
+  current_cpu = "arm"
+  current_os = ""
 }
 
-_target_default_configs = [ "$dir_pw_toolchain/arm_gcc:enable_float_printf" ]
+_target_default_configs = [
+  "$dir_pw_build:extra_strict_warnings",
+  "$dir_pw_toolchain/arm_gcc:enable_float_printf",
+]
 
 pw_target_toolchain_lm3s6965evb_qemu = {
   _excluded_members = [
diff --git a/targets/lm3s6965evb-qemu/vector_table.cc b/targets/lm3s6965evb-qemu/vector_table.cc
new file mode 100644
index 0000000..575a673
--- /dev/null
+++ b/targets/lm3s6965evb-qemu/vector_table.cc
@@ -0,0 +1,59 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_boot_armv7m/boot.h"
+
+namespace {
+
+// Default handler to insert into the ARMv7-M vector table (below).
+// This function exists for convenience. If a device isn't doing what you
+// expect, it might have hit a fault and ended up here.
+void DefaultFaultHandler(void) {
+  while (true) {
+    // Wait for debugger to attach.
+  }
+}
+
+// This is the device's interrupt vector table. It's not referenced in any
+// code because the platform (STM32F4xx) expects this table to be present at the
+// beginning of flash. The exact address is specified in the pw_boot_armv7m
+// configuration as part of the target config.
+//
+// For more information, see ARMv7-M Architecture Reference Manual DDI 0403E.b
+// section B1.5.3.
+
+// This typedef is for convenience when building the vector table. With the
+// exception of SP_main (0th entry in the vector table), all the entries of the
+// vector table are function pointers.
+typedef void (*InterruptHandler)();
+
+PW_KEEP_IN_SECTION(".vector_table")
+const InterruptHandler vector_table[] = {
+    // The starting location of the stack pointer.
+    // This address is NOT an interrupt handler/function pointer, it is simply
+    // the address that the main stack pointer should be initialized to. The
+    // value is reinterpret casted because it needs to be in the vector table.
+    [0] = reinterpret_cast<InterruptHandler>(&pw_boot_stack_high_addr),
+
+    // Reset handler, dictates how to handle reset interrupt. This is the
+    // address that the Program Counter (PC) is initialized to at boot.
+    [1] = pw_boot_Entry,
+
+    // NMI handler.
+    [2] = DefaultFaultHandler,
+    // HardFault handler.
+    [3] = DefaultFaultHandler,
+};
+
+}  // namespace
diff --git a/targets/stm32f429i-disc1/BUILD b/targets/stm32f429i-disc1/BUILD
index a3b72d7..1414169 100644
--- a/targets/stm32f429i-disc1/BUILD
+++ b/targets/stm32f429i-disc1/BUILD
@@ -22,9 +22,9 @@
 licenses(["notice"])  # Apache License 2.0
 
 pw_cc_library(
-    name = "pw_pre_init",
+    name = "pre_init",
     srcs = [
-        "early_boot.c",
+        "boot.cc",
         "vector_table.cc"
     ],
     deps = [
diff --git a/targets/stm32f429i-disc1/BUILD.gn b/targets/stm32f429i-disc1/BUILD.gn
index 29d81b2..6b18a9e 100644
--- a/targets/stm32f429i-disc1/BUILD.gn
+++ b/targets/stm32f429i-disc1/BUILD.gn
@@ -12,7 +12,6 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_build/target_types.gni")
@@ -20,6 +19,7 @@
 import("$dir_pw_malloc/backend.gni")
 import("$dir_pw_toolchain/generate_toolchain.gni")
 import("target_toolchains.gni")
+
 generate_toolchains("target_toolchains") {
   toolchains = pw_target_toolchain_stm32f429i_disc1_list
 }
@@ -31,7 +31,7 @@
 }
 
 if (current_toolchain != default_toolchain) {
-  pw_source_set("pw_pre_init") {
+  pw_source_set("pre_init") {
     configs = [ ":pw_malloc_active" ]
     public_deps = [
       "$dir_pw_boot_armv7m",
@@ -42,7 +42,7 @@
       "$dir_pw_preprocessor",
     ]
     sources = [
-      "early_boot.c",
+      "boot.cc",
       "vector_table.cc",
     ]
   }
diff --git a/targets/stm32f429i-disc1/boot.cc b/targets/stm32f429i-disc1/boot.cc
new file mode 100644
index 0000000..a0d0fc2
--- /dev/null
+++ b/targets/stm32f429i-disc1/boot.cc
@@ -0,0 +1,67 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_boot_armv7m/boot.h"
+
+#include "pw_malloc/malloc.h"
+#include "pw_preprocessor/compiler.h"
+#include "pw_sys_io_baremetal_stm32f429/init.h"
+
+// Note that constexpr is used inside of this function instead of using a static
+// constexpr or declaring it outside of this function in an anonymous namespace,
+// because constexpr makes it available for the compiler to evaluate during
+// copmile time but does NOT require it to be evaluated at compile team and we
+// have to be incredibly careful that this does not end up in the .data section.
+void pw_boot_PreStaticMemoryInit() {
+  // TODO(pwbug/17): Optionally enable Replace when Pigweed config system is
+  // added.
+#if PW_ARMV7M_ENABLE_FPU
+  // Enable FPU if built using hardware FPU instructions.
+  // CPCAR mask that enables FPU. (ARMv7-M Section B3.2.20)
+  constexpr uint32_t kFpuEnableMask = (0xFu << 20);
+
+  // Memory mapped register to enable FPU. (ARMv7-M Section B3.2.2, Table B3-4)
+  volatile uint32_t& arm_v7m_cpacr =
+      *reinterpret_cast<volatile uint32_t*>(0xE000ED88u);
+  arm_v7m_cpacr |= kFpuEnableMask;
+
+  // Ensure the FPU configuration is committed and enabled before continuing and
+  // potentially executing any FPU instructions, however rare that may be during
+  // startup.
+  asm volatile(
+      " dsb \n"
+      " isb \n"
+      // clang-format off
+      : /*output=*/
+      : /*input=*/
+      : /*clobbers=*/"memory"
+      // clang-format on
+  );
+#endif  // PW_ARMV7M_ENABLE_FPU
+}
+
+void pw_boot_PreStaticConstructorInit() {
+#if PW_MALLOC_ACTIVE
+  pw_MallocInit();
+#endif  // PW_MALLOC_ACTIVE
+}
+
+void pw_boot_PreMainInit() { pw_sys_io_Init(); }
+
+PW_NO_RETURN void pw_boot_PostMain() {
+  // In case main() returns, just sit here until the device is reset.
+  while (true) {
+  }
+  PW_UNREACHABLE;
+}
diff --git a/targets/stm32f429i-disc1/early_boot.c b/targets/stm32f429i-disc1/early_boot.c
deleted file mode 100644
index 0345ffc..0000000
--- a/targets/stm32f429i-disc1/early_boot.c
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2020 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-//     https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#include "pw_boot_armv7m/boot.h"
-#include "pw_malloc/malloc.h"
-#include "pw_sys_io_baremetal_stm32f429/init.h"
-
-void pw_PreStaticConstructorInit() {
-  // TODO(pwbug/17): Optionally enable Replace when Pigweed config system is
-  // added.
-#if PW_ARMV7M_ENABLE_FPU
-  // Enable FPU if built using hardware FPU instructions.
-  // CPCAR mask that enables FPU. (ARMv7-M Section B3.2.20)
-  const uint32_t kFpuEnableMask = (0xFu << 20);
-
-  // Memory mapped register to enable FPU. (ARMv7-M Section B3.2.2, Table B3-4)
-  volatile uint32_t* arm_v7m_cpacr = (volatile uint32_t*)0xE000ED88u;
-
-  *arm_v7m_cpacr |= kFpuEnableMask;
-#endif  // PW_ARMV7M_ENABLE_FPU
-#if PW_MALLOC_ACTIVE
-  pw_MallocInit();
-#endif  // PW_MALLOC_ACTIVE
-}
-
-void pw_PreMainInit() { pw_sys_io_Init(); }
diff --git a/pw_rpc/test_impl/BUILD.gn b/targets/stm32f429i-disc1/py/BUILD.gn
similarity index 62%
copy from pw_rpc/test_impl/BUILD.gn
copy to targets/stm32f429i-disc1/py/BUILD.gn
index 68f88c1..b93104b 100644
--- a/pw_rpc/test_impl/BUILD.gn
+++ b/targets/stm32f429i-disc1/py/BUILD.gn
@@ -12,19 +12,17 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
-import("$dir_pw_build/target_types.gni")
-import("$dir_pw_unit_test/test.gni")
-config("config") {
-  include_dirs = [ "public_overrides" ]
-  visibility = [ ":*" ]
-}
+import("$dir_pw_build/python.gni")
 
-pw_source_set("test_impl") {
-  public_configs = [ ":config" ]
-  public = [ "public_overrides/pw_rpc/internal/method.h" ]
-  public_deps = [ "../:server_library_deps" ]
-  visibility = [ "..:*" ]
+pw_python_package("py") {
+  setup = [ "setup.py" ]
+  sources = [
+    "stm32f429i_disc1_utils/__init__.py",
+    "stm32f429i_disc1_utils/stm32f429i_detector.py",
+    "stm32f429i_disc1_utils/unit_test_client.py",
+    "stm32f429i_disc1_utils/unit_test_runner.py",
+    "stm32f429i_disc1_utils/unit_test_server.py",
+  ]
 }
diff --git a/targets/stm32f429i-disc1/py/setup.py b/targets/stm32f429i-disc1/py/setup.py
index 1c47928..309ff7e 100644
--- a/targets/stm32f429i-disc1/py/setup.py
+++ b/targets/stm32f429i-disc1/py/setup.py
@@ -13,7 +13,7 @@
 # the License.
 """stm32f429i_disc1_utils"""
 
-import setuptools
+import setuptools  # type: ignore
 
 setuptools.setup(
     name='stm32f429i_disc1_utils',
@@ -23,6 +23,8 @@
     description=
     'Target-specific python scripts for the stm32f429i-disc1 target',
     packages=setuptools.find_packages(),
+    package_data={'stm32f429i_disc1_utils': ['py.typed']},
+    zip_safe=False,
     entry_points={
         'console_scripts': [
             'stm32f429i_disc1_unit_test_runner = '
diff --git a/targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/py.typed b/targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/py.typed
diff --git a/targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/stm32f429i_detector.py b/targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/stm32f429i_detector.py
index 56b98de..8b7a0e4 100644
--- a/targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/stm32f429i_detector.py
+++ b/targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/stm32f429i_detector.py
@@ -17,8 +17,8 @@
 import logging
 import typing
 
-import coloredlogs
-import serial.tools.list_ports
+import coloredlogs  # type: ignore
+import serial.tools.list_ports  # type: ignore
 
 # Vendor/device ID to search for in USB devices.
 _ST_VENDOR_ID = 0x0483
diff --git a/targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/unit_test_runner.py b/targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/unit_test_runner.py
index d290198..328def7 100755
--- a/targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/unit_test_runner.py
+++ b/targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/unit_test_runner.py
@@ -22,8 +22,8 @@
 import threading
 from typing import List
 
-import coloredlogs
-import serial
+import coloredlogs  # type: ignore
+import serial  # type: ignore
 from stm32f429i_disc1_utils import stm32f429i_detector
 
 # Path used to access non-python resources in this python module.
diff --git a/targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/unit_test_server.py b/targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/unit_test_server.py
index 44b972f..c3bfd3d 100644
--- a/targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/unit_test_server.py
+++ b/targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/unit_test_server.py
@@ -20,9 +20,10 @@
 import tempfile
 from typing import IO, List, Optional
 
+from stm32f429i_disc1_utils import stm32f429i_detector
+
 import pw_cli.process
 import pw_cli.log
-from stm32f429i_disc1_utils import stm32f429i_detector
 
 _LOG = logging.getLogger('unit_test_server')
 
diff --git a/targets/stm32f429i-disc1/stm32f429i_executable.gni b/targets/stm32f429i-disc1/stm32f429i_executable.gni
index 55e2a4a..99190cc 100644
--- a/targets/stm32f429i-disc1/stm32f429i_executable.gni
+++ b/targets/stm32f429i-disc1/stm32f429i_executable.gni
@@ -12,7 +12,6 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 import("$dir_pw_malloc/backend.gni")
 
@@ -23,7 +22,7 @@
     if (!defined(deps)) {
       deps = []
     }
-    deps += [ "$dir_pigweed/targets/stm32f429i-disc1:pw_pre_init" ]
+    deps += [ "$dir_pigweed/targets/stm32f429i-disc1:pre_init" ]
     if (pw_malloc_BACKEND != "") {
       if (!defined(configs)) {
         configs = []
diff --git a/targets/stm32f429i-disc1/target_docs.rst b/targets/stm32f429i-disc1/target_docs.rst
index 46cdf09..68e34d8 100644
--- a/targets/stm32f429i-disc1/target_docs.rst
+++ b/targets/stm32f429i-disc1/target_docs.rst
@@ -1,8 +1,4 @@
-.. _chapter-stm32f429i-disc1:
-
-.. default-domain:: cpp
-
-.. highlight:: sh
+.. _target-stm32f429i-disc1:
 
 ----------------
 stm32f429i-disc1
@@ -101,3 +97,145 @@
 Whenever you run ``ninja -C out stm32f429i``, affected tests will be built and
 run on the attached device(s). Alternatively, you may use ``pw watch`` to set up
 Pigweed to build/test whenever it sees changes to source files.
+
+
+Debugging
+=========
+There are multiple ways to debug the device, including using commercial tools
+like SEGGER's J-Link. However, the Discovery board has an on-board STLink
+debugger, which is supported by the open source OpenOCD debugger. To debug with
+OpenOCD requires a few steps. Summary version of the steps:
+
+#. Connect OpenOCD to the device in terminal A. Leave this running
+
+   .. code:: sh
+
+     $ openocd -f targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/openocd_stm32f4xx.cfg
+
+#. Connect GDB to the running OpenOCD instance in terminal B
+
+   .. code:: sh
+
+     $ arm-none-eabi-gdb -ex "target remote :3333" \
+       out/stm32f429i_disc1_debug/obj/pw_assert/test/assert_facade_test.elf
+
+#. Flash (``load``), run (``mon reset init; continue``), and debug
+
+   .. code:: none
+
+     (gdb) set print pretty on
+     (gdb) load
+     (gdb) mon reset init
+     (gdb) continue
+
+#. You can re-flash the device after compiling by running ``load``.
+
+
+Step 1: Start an OpenOCD server and connect to the device
+---------------------------------------------------------
+OpenOCD is a persistent server that you run and leave running to bridge between
+GDB and the device. To run it for the Discovery board:
+
+.. code:: sh
+
+  $ openocd -f targets/stm32f429i-disc1/py/stm32f429i_disc1_utils/openocd_stm32f4xx.cfg
+
+Typical output:
+
+.. code:: none
+
+  Open On-Chip Debugger 0.10.0+dev-01243-ge41c0f49-dirty (2020-05-21-10:27)
+  Licensed under GNU GPL v2
+  For bug reports, read
+          http://openocd.org/doc/doxygen/bugs.html
+  DEPRECATED! use 'adapter driver' not 'interface'
+  Info : The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD
+  srst_only separate srst_nogate srst_open_drain connect_deassert_srst
+
+  Info : Listening on port 6666 for tcl connections
+  Info : Listening on port 4444 for telnet connections
+  Info : clock speed 2000 kHz
+  Info : STLINK V2J25M14 (API v2) VID:PID 0483:374B
+  Info : Target voltage: 2.871879
+  Info : stm32f4x.cpu: hardware has 6 breakpoints, 4 watchpoints
+  Info : starting gdb server for stm32f4x.cpu on 3333
+  Info : Listening on port 3333 for gdb connections
+
+Step 2: Start GDB and connect to the OpenOCD server
+---------------------------------------------------
+Start GDB pointing to the correct .elf file, and tell it to connect to the
+OpenOCD server (running on port 333 by default).
+
+.. code:: sh
+
+  $ arm-none-eabi-gdb -ex "target remote :3333" \
+    out/stm32f429i_disc1_debug/obj/pw_assert/test/assert_facade_test.elf
+
+In this case the assert facade test is debugged, but substitute your own ELF
+file. This should produce output similar to the following:
+
+.. code:: none
+
+  GNU gdb (GNU Arm Embedded Toolchain 9-2020-q2-update) 8.3.1.20191211-git
+  Copyright (C) 2019 Free Software Foundation, Inc.
+  License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
+  This is free software: you are free to change and redistribute it.
+  There is NO WARRANTY, to the extent permitted by law.
+  Type "show copying" and "show warranty" for details.
+  This GDB was configured as "--host=x86_64-apple-darwin10 --target=arm-none-eabi".
+  Type "show configuration" for configuration details.
+  For bug reporting instructions, please see:
+  <http://www.gnu.org/software/gdb/bugs/>.
+  Find the GDB manual and other documentation resources online at:
+      <http://www.gnu.org/software/gdb/documentation/>.
+
+  For help, type "help".
+  Type "apropos word" to search for commands related to "word"...
+  Reading symbols from out/stm32f429i_disc1_debug/obj/pw_assert//test/assert_facade_test.elf...
+  Remote debugging using :3333
+  pw_BootEntry () at ../pw_boot_armv7m/core_init.c:117
+  117	  }
+
+Step 3: Flash, run, and debug
+-----------------------------
+Now that the GDB instance is connected to the device, you can flash, run, and debug.
+
+To flash
+
+.. code:: none
+
+  (gdb) load
+
+This will produce output similar to:
+
+.. code:: none
+
+  (gdb) load
+  Loading section .vector_table, size 0x10 lma 0x8000000
+  Loading section .code, size 0xdb8c lma 0x8000200
+  Loading section .ARM, size 0x8 lma 0x800dd90
+  Loading section .static_init_ram, size 0x1d0 lma 0x800dd98
+  Start address 0x8007c48, load size 56692
+  Transfer rate: 25 KB/sec, 8098 bytes/write.
+
+To reset the device and halt on the first instruction (before main):
+
+.. code:: none
+
+  (gdb) mon reset init
+
+
+This will produce output similar to:
+
+.. code:: none
+
+  (gdb) mon reset init
+  Unable to match requested speed 2000 kHz, using 1800 kHz
+  Unable to match requested speed 2000 kHz, using 1800 kHz
+  target halted due to debug-request, current mode: Thread
+  xPSR: 0x01000000 pc: 0x08007930 msp: 0x20030000
+  Unable to match requested speed 8000 kHz, using 4000 kHz
+  Unable to match requested speed 8000 kHz, using 4000 kHz
+
+The device is now ready for debugging. You can place breakpoints and start the
+device with ``continue``.
diff --git a/targets/stm32f429i-disc1/target_toolchains.gni b/targets/stm32f429i-disc1/target_toolchains.gni
index 241906a..b6a95a7 100644
--- a/targets/stm32f429i-disc1/target_toolchains.gni
+++ b/targets/stm32f429i-disc1/target_toolchains.gni
@@ -12,10 +12,10 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-# gn-format disable
 import("//build_overrides/pigweed.gni")
 
 import("$dir_pw_toolchain/arm_gcc/toolchains.gni")
+
 declare_args() {
   # Enable the pw_target_runner for on-device testing.
   pw_use_test_server = false
@@ -64,13 +64,19 @@
     "PW_BOOT_VECTOR_TABLE_BEGIN=0x08000000",
     "PW_BOOT_VECTOR_TABLE_SIZE=512",
   ]
+
+  current_cpu = "arm"
+  current_os = ""
 }
 
 _toolchain_properties = {
   final_binary_extension = ".elf"
 }
 
-_target_default_configs = [ "$dir_pw_toolchain/arm_gcc:enable_float_printf" ]
+_target_default_configs = [
+  "$dir_pw_build:extra_strict_warnings",
+  "$dir_pw_toolchain/arm_gcc:enable_float_printf",
+]
 
 pw_target_toolchain_stm32f429i_disc1 = {
   _excluded_members = [
diff --git a/targets/stm32f429i-disc1/vector_table.cc b/targets/stm32f429i-disc1/vector_table.cc
index d5e3065..575a673 100644
--- a/targets/stm32f429i-disc1/vector_table.cc
+++ b/targets/stm32f429i-disc1/vector_table.cc
@@ -15,6 +15,7 @@
 #include "pw_boot_armv7m/boot.h"
 
 namespace {
+
 // Default handler to insert into the ARMv7-M vector table (below).
 // This function exists for convenience. If a device isn't doing what you
 // expect, it might have hit a fault and ended up here.
@@ -43,15 +44,16 @@
     // This address is NOT an interrupt handler/function pointer, it is simply
     // the address that the main stack pointer should be initialized to. The
     // value is reinterpret casted because it needs to be in the vector table.
-    [0] = reinterpret_cast<InterruptHandler>(&pw_stack_high_addr),
+    [0] = reinterpret_cast<InterruptHandler>(&pw_boot_stack_high_addr),
 
     // Reset handler, dictates how to handle reset interrupt. This is the
     // address that the Program Counter (PC) is initialized to at boot.
-    [1] = pw_BootEntry,
+    [1] = pw_boot_Entry,
 
     // NMI handler.
     [2] = DefaultFaultHandler,
     // HardFault handler.
     [3] = DefaultFaultHandler,
 };
+
 }  // namespace
diff --git a/third_party/arduino/.gitignore b/third_party/arduino/.gitignore
new file mode 100644
index 0000000..717f213
--- /dev/null
+++ b/third_party/arduino/.gitignore
@@ -0,0 +1 @@
+cores/
\ No newline at end of file
diff --git a/pw_protobuf_compiler/nanopb.gni b/third_party/arduino/BUILD
similarity index 77%
copy from pw_protobuf_compiler/nanopb.gni
copy to third_party/arduino/BUILD
index 3c3be32..f7a092c 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/third_party/arduino/BUILD
@@ -12,8 +12,11 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-declare_args() {
-  # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
-  dir_pw_third_party_nanopb = ""
-}
+load(
+    "//pw_build:pigweed.bzl",
+    "pw_cc_library",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])  # Apache License 2.0
diff --git a/third_party/arduino/BUILD.gn b/third_party/arduino/BUILD.gn
new file mode 100644
index 0000000..f7a6f3d
--- /dev/null
+++ b/third_party/arduino/BUILD.gn
@@ -0,0 +1,64 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_arduino_build/arduino.gni")
+import("$dir_pw_build/target_types.gni")
+
+if (dir_pw_third_party_arduino != "") {
+  pw_source_set("arduino_core_sources") {
+    remove_configs = [ "$dir_pw_build:strict_warnings" ]
+
+    _core_c_files =
+        exec_script(arduino_builder_script,
+                    arduino_show_command_args + [ "--core-c-files" ],
+                    "list lines")
+    _core_s_files = filter_exclude(
+            exec_script(arduino_builder_script,
+                        arduino_show_command_args + [ "--core-s-files" ],
+                        "list lines"),
+            # TODO(tonymd): Conditionally remove this source file unless building for cortex-m0.
+            # Exception for adafruit-samd core
+            # pulse_asm.S is for: '.cpu cortex-m0plus .fpu softvfp'
+            [ "*pulse_asm.S\b" ])
+
+    _core_cpp_files =
+        exec_script(arduino_builder_script,
+                    arduino_show_command_args + [ "--core-cpp-files" ],
+                    "list lines")
+    _variant_c_files =
+        exec_script(arduino_builder_script,
+                    arduino_show_command_args + [ "--variant-c-files" ],
+                    "list lines")
+    _variant_s_files =
+        exec_script(arduino_builder_script,
+                    arduino_show_command_args + [ "--variant-s-files" ],
+                    "list lines")
+    _variant_cpp_files =
+        exec_script(arduino_builder_script,
+                    arduino_show_command_args + [ "--variant-cpp-files" ],
+                    "list lines")
+
+    sources = _core_c_files + _core_s_files + _core_cpp_files +
+              _variant_c_files + _variant_s_files + _variant_cpp_files
+
+    # Rename main() to ArduinoMain()
+    # See //pw_arduino_build/docs.rst for details on this approach.
+    defines = [ "main(...)=ArduinoMain()" ]
+  }
+} else {
+  group("arduino_core_sources") {
+  }
+}
diff --git a/third_party/arduino/cores/.gitkeep b/third_party/arduino/cores/.gitkeep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/third_party/arduino/cores/.gitkeep
diff --git a/third_party/nanopb/BUILD.gn b/third_party/nanopb/BUILD.gn
new file mode 100644
index 0000000..1927ab1
--- /dev/null
+++ b/third_party/nanopb/BUILD.gn
@@ -0,0 +1,46 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+import("nanopb.gni")
+
+# This file defines a GN source_set for an external installation of nanopb.
+# To use, checkout the nanopb source into a directory, then set the build arg
+# dir_pw_third_party_nanopb to point to that directory. The nanopb library
+# will be available in GN at "$dir_pw_third_party/nanopb".
+if (dir_pw_third_party_nanopb != "") {
+  config("includes") {
+    include_dirs = [ dir_pw_third_party_nanopb ]
+  }
+
+  pw_source_set("nanopb") {
+    public_configs = [ ":includes" ]
+    public = [
+      "$dir_pw_third_party_nanopb/pb.h",
+      "$dir_pw_third_party_nanopb/pb_common.h",
+      "$dir_pw_third_party_nanopb/pb_decode.h",
+      "$dir_pw_third_party_nanopb/pb_encode.h",
+    ]
+    sources = [
+      "$dir_pw_third_party_nanopb/pb_common.c",
+      "$dir_pw_third_party_nanopb/pb_decode.c",
+      "$dir_pw_third_party_nanopb/pb_encode.c",
+    ]
+  }
+} else {
+  group("nanopb") {
+  }
+}
diff --git a/third_party/nanopb/CMakeLists.txt b/third_party/nanopb/CMakeLists.txt
new file mode 100644
index 0000000..1341403
--- /dev/null
+++ b/third_party/nanopb/CMakeLists.txt
@@ -0,0 +1,30 @@
+# Copyright 2020 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+set(dir_pw_third_party_nanopb "" CACHE PATH
+    "Path to a Nanopb installation to import. Set to PRESENT if Nanopb is already present in the build."
+)
+
+if("${dir_pw_third_party_nanopb}" STREQUAL "")
+  return()
+elseif(NOT "${dir_pw_third_party_nanopb}" STREQUAL PRESENT)
+  add_subdirectory("${dir_pw_third_party_nanopb}" third_party/nanopb)
+endif()
+
+add_library(pw_third_party.nanopb INTERFACE)
+target_link_libraries(pw_third_party.nanopb INTERFACE protobuf-nanopb-static)
+target_include_directories(pw_third_party.nanopb
+  INTERFACE
+    $<TARGET_PROPERTY:protobuf-nanopb-static,SOURCE_DIR>
+)
diff --git a/pw_protobuf_compiler/nanopb.gni b/third_party/nanopb/nanopb.gni
similarity index 85%
rename from pw_protobuf_compiler/nanopb.gni
rename to third_party/nanopb/nanopb.gni
index 3c3be32..7cda7c0 100644
--- a/pw_protobuf_compiler/nanopb.gni
+++ b/third_party/nanopb/nanopb.gni
@@ -14,6 +14,7 @@
 
 declare_args() {
   # If compiling protos for nanopb, this variable is set to the path to the
-  # nanopb installation.
+  # nanopb installation. When set, a pw_source_set for the nanopb library is
+  # created at "$dir_pw_third_party/nanopb".
   dir_pw_third_party_nanopb = ""
 }
diff --git a/yarn.lock b/yarn.lock
index 3fc3046..8ff6708 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -38,43 +38,51 @@
   dependencies:
     regenerator-runtime "^0.13.4"
 
-"@bazel/jasmine@^1.7.0":
-  version "1.7.0"
-  resolved "https://registry.yarnpkg.com/@bazel/jasmine/-/jasmine-1.7.0.tgz#429df76e6628aa139176340434729cc091e371d7"
-  integrity sha512-LXq6nfBBEczjsDLwFW9kesGdewRrnFiAOZzXAAivCV3xtq516xK4QnVWA9tQGq+R1DnY50IaODpCJhh8PDezdg==
+"@bazel/jasmine@^2.2.0":
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/@bazel/jasmine/-/jasmine-2.2.0.tgz#78fc4171362113d993e2473b0168ee8c255e05c9"
+  integrity sha512-NmrgbHBSWUaBXT9rFqeC+bYYWfHtPphnfgchusy7LOQMs/aWVePEo9tsbggNBt1pf6NYv+Y/cHLgtlL/EdQ6Dg==
   dependencies:
-    jasmine "~3.5.0"
-    jasmine-core "~3.5.0"
+    c8 "~7.1.0"
     jasmine-reporters "~2.3.2"
-    v8-coverage "1.0.9"
 
-"@bazel/karma@^1.7.0":
-  version "1.7.0"
-  resolved "https://registry.yarnpkg.com/@bazel/karma/-/karma-1.7.0.tgz#ec7e97a2629f5af0b2abe9a99ae30363a34af97d"
-  integrity sha512-mGYVD9DldB3v/DjxJpS39X1vUD6M32Al96DMoilwW3TSAazcRWwUAC6HY9z5Wtyeqwxyk8BY1Mg1/berWpoTxg==
+"@bazel/karma@^2.2.0":
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/@bazel/karma/-/karma-2.2.0.tgz#9bf6f6f1aa5f12b25468b1cad5e4404138436600"
+  integrity sha512-qyVE7vZ/qaibmpmcRdjS0rlorLGR0zZtlUSImVVTcPTSXqt364fp8TWBWe7oOneJ1SOVyUmTOAzyE86ArxZ/AA==
   dependencies:
     tmp "0.1.0"
 
-"@bazel/rollup@^1.7.0":
-  version "1.7.0"
-  resolved "https://registry.yarnpkg.com/@bazel/rollup/-/rollup-1.7.0.tgz#5c0f0d51d2f3f14e78781a4b9e6a9ffba87f1579"
-  integrity sha512-Pp5aCJw3gwu77zn6/fQgZ39ArrWEI5O3dja5wKadBnfOQ66PImIEr+bf7JgROoWvACH1kGxaS423rq51fiuCsA==
+"@bazel/rollup@^2.2.0":
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/@bazel/rollup/-/rollup-2.2.0.tgz#15651d545114e08db056f10a1eeaa4e76fc4df56"
+  integrity sha512-N4SyrvFkdAVc24CqFNhDtrR6P3XJTdPGziCuF7QM/BGihnsGlxF6+Dt2n5BTLJnObiB1St8vtRwCtAY8faxYWQ==
 
-"@bazel/typescript@^1.7.0":
-  version "1.7.0"
-  resolved "https://registry.yarnpkg.com/@bazel/typescript/-/typescript-1.7.0.tgz#8dc02b8a161f4fff3285186066b5f73666793452"
-  integrity sha512-M6JPXJZ+W6457QZfPHmGg/Mejnp7//YTnffGmnmeK9vDqybXeCCRWW1/iEOwopLJYQViBHfaoulde0VXelx9sA==
+"@bazel/typescript@^2.2.0":
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/@bazel/typescript/-/typescript-2.2.0.tgz#f2d3dce8715d574fe3146f19fdb8479abcc4d608"
+  integrity sha512-Thf8pXntBzE3EvJtyiTBNsfIf1QnYmGPQmUSGLcKUuuFoplUVYShMRHaxBoPZmYsnD/x+BFLgUKIzlXiEQpGqQ==
   dependencies:
     protobufjs "6.8.8"
     semver "5.6.0"
     source-map-support "0.5.9"
     tsutils "2.27.2"
 
+"@bcoe/v8-coverage@^0.2.3":
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz#75a2e8b51cb758a7553d6804a5932d7aace75c39"
+  integrity sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==
+
 "@emotion/hash@^0.8.0":
   version "0.8.0"
   resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.8.0.tgz#bbbff68978fefdbe68ccb533bc8cbe1d1afb5413"
   integrity sha512-kBJtf7PH6aWwZ6fka3zQ0p6SBYzx4fl1LoZXE2RrnYST9Xljm7WfKJrU4g/Xr3Beg72MLrp1AWNUmuYJTL7Cow==
 
+"@istanbuljs/schema@^0.1.2":
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/@istanbuljs/schema/-/schema-0.1.2.tgz#26520bf09abe4a5644cd5414e37125a8954241dd"
+  integrity sha512-tsAQNx32a8CoFhjhijUIhI4kccIAgmGhy8LZMZgGfmXcpMbPRUqn5LWmgRttILi6yeGmBJd2xsPkFMs0PzgPCw==
+
 "@material-ui/core@^4.10.2":
   version "4.10.2"
   resolved "https://registry.yarnpkg.com/@material-ui/core/-/core-4.10.2.tgz#0ef78572132fcef1a25f6969bce0d34652d42e31"
@@ -259,6 +267,16 @@
   resolved "https://registry.yarnpkg.com/@types/estree/-/estree-0.0.44.tgz#980cc5a29a3ef3bea6ff1f7d021047d7ea575e21"
   integrity sha512-iaIVzr+w2ZJ5HkidlZ3EJM8VTZb2MJLCjw3V+505yVts0gRC4UMvjw0d1HPtGqI/HQC/KdsYtayfzl+AXY2R8g==
 
+"@types/is-windows@^1.0.0":
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/@types/is-windows/-/is-windows-1.0.0.tgz#1011fa129d87091e2f6faf9042d6704cdf2e7be0"
+  integrity sha512-tJ1rq04tGKuIJoWIH0Gyuwv4RQ3+tIu7wQrC0MV47raQ44kIzXSSFKfrxFUOWVRvesoF7mrTqigXmqoZJsXwTg==
+
+"@types/istanbul-lib-coverage@^2.0.1":
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.3.tgz#4ba8ddb720221f432e443bd5f9117fd22cfd4762"
+  integrity sha512-sz7iLqvVUg1gIedBOvlkxPlc8/uVzyS5OwGz1cKjXzkl3FpL3al0crU8YGU1WoHkxn0Wxbw5tyi6hvzJKNzFsw==
+
 "@types/jasmine@^3.5.10":
   version "3.5.10"
   resolved "https://registry.yarnpkg.com/@types/jasmine/-/jasmine-3.5.10.tgz#a1a41012012b5da9d4b205ba9eba58f6cce2ab7b"
@@ -285,9 +303,9 @@
   integrity sha512-rouEWBImiRaSJsVA+ITTFM6ZxibuAlTuNOCyxVbwreu6k6+ujs7DfnU9o+PShFhET78pMBl3eH+AGSI5eOTkPA==
 
 "@types/node@^10.1.0":
-  version "10.17.26"
-  resolved "https://registry.yarnpkg.com/@types/node/-/node-10.17.26.tgz#a8a119960bff16b823be4c617da028570779bcfd"
-  integrity sha512-myMwkO2Cr82kirHY8uknNRHEVtn0wV3DTQfkrjx17jmkstDRZ24gNUdl8AHXVyVclTYI/bNjgTPTAWvWLqXqkw==
+  version "10.17.31"
+  resolved "https://registry.yarnpkg.com/@types/node/-/node-10.17.31.tgz#fd3578fed25e5946372b06dab43eae49248367fa"
+  integrity sha512-AiazLSnsm7GfTxr08GrqeqMxygR/yV78RDk5gaw+S7pOP70BIqUbTFl9vZRyUC/XubcwIqkiiHxbJNFAGvSoOw==
 
 "@types/node@^13.11.1":
   version "13.13.12"
@@ -435,16 +453,6 @@
   dependencies:
     type-fest "^0.11.0"
 
-ansi-regex@^2.0.0:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
-  integrity sha1-w7M6te42DYbg5ijwRorn7yfWVN8=
-
-ansi-regex@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-3.0.0.tgz#ed0317c322064f79466c02966bddb605ab37d998"
-  integrity sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=
-
 ansi-regex@^4.1.0:
   version "4.1.0"
   resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-4.1.0.tgz#8b9f8f08cf1acb843756a839ca8c7e3168c51997"
@@ -732,6 +740,25 @@
   resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.0.tgz#f6cf7933a360e0588fa9fde85651cdc7f805d1f6"
   integrity sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==
 
+c8@~7.1.0:
+  version "7.1.2"
+  resolved "https://registry.yarnpkg.com/c8/-/c8-7.1.2.tgz#3fd785e8d264175ceffe92c74607f5cfb12f018d"
+  integrity sha512-lCEwL9lbvWOQLxoLw8RF7PM8Cdj+rKxRp/PyWC9S8xASvYHRwXQ2gxzsNTgLhQM1Utc1YDAjzQYPQIxVEyelGg==
+  dependencies:
+    "@bcoe/v8-coverage" "^0.2.3"
+    "@istanbuljs/schema" "^0.1.2"
+    find-up "^4.0.0"
+    foreground-child "^2.0.0"
+    furi "^2.0.0"
+    istanbul-lib-coverage "^3.0.0"
+    istanbul-lib-report "^3.0.0"
+    istanbul-reports "^3.0.2"
+    rimraf "^3.0.0"
+    test-exclude "^6.0.0"
+    v8-to-istanbul "^4.1.2"
+    yargs "^15.0.0"
+    yargs-parser "^18.0.0"
+
 cacheable-request@^6.0.0:
   version "6.1.0"
   resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-6.1.0.tgz#20ffb8bd162ba4be11e9567d823db651052ca912"
@@ -764,11 +791,6 @@
     map-obj "^4.0.0"
     quick-lru "^4.0.1"
 
-camelcase@^4.1.0:
-  version "4.1.0"
-  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-4.1.0.tgz#d545635be1e33c542649c69173e5de6acfae34dd"
-  integrity sha1-1UVjW+HjPFQmScaRc+Xeas+uNN0=
-
 camelcase@^5.0.0, camelcase@^5.3.1:
   version "5.3.1"
   resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320"
@@ -854,15 +876,6 @@
   resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-2.2.1.tgz#b0433d0b4e9c847ef18868a4ef16fd5fc8271c48"
   integrity sha512-GRMWDxpOB6Dgk2E5Uo+3eEBvtOOlimMmpbFiKuLFnQzYDavtLFY3K5ona41jgN/WdRZtG7utuVSVTL4HbZHGkw==
 
-cliui@^4.0.0:
-  version "4.1.0"
-  resolved "https://registry.yarnpkg.com/cliui/-/cliui-4.1.0.tgz#348422dbe82d800b3022eef4f6ac10bf2e4d1b49"
-  integrity sha512-4FG+RSG9DL7uEwRUZXZn3SS34DiDPfzP0VOiEwtUWlE+AR2EIg+hSyvrIgUUfhdgR/UkAeW2QHgeP+hWrXs7jQ==
-  dependencies:
-    string-width "^2.1.1"
-    strip-ansi "^4.0.0"
-    wrap-ansi "^2.0.0"
-
 cliui@^6.0.0:
   version "6.0.0"
   resolved "https://registry.yarnpkg.com/cliui/-/cliui-6.0.0.tgz#511d702c0c4e41ca156d7d0e96021f23e13225b1"
@@ -889,11 +902,6 @@
   resolved "https://registry.yarnpkg.com/clsx/-/clsx-1.1.1.tgz#98b3134f9abbdf23b2663491ace13c5c03a73188"
   integrity sha512-6/bPho624p3S2pMyvP5kKBPXnI3ufHLObBFCfgx+LkeR5lg2XYy2hqZqUf45ypD8COn2bhgGJSUE+l5dhNBieA==
 
-code-point-at@^1.0.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77"
-  integrity sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=
-
 color-convert@^1.9.0:
   version "1.9.3"
   resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8"
@@ -923,11 +931,6 @@
   resolved "https://registry.yarnpkg.com/colors/-/colors-1.4.0.tgz#c50491479d4c1bdaed2c9ced32cf7c7dc2360f78"
   integrity sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA==
 
-commander@~2.20.3:
-  version "2.20.3"
-  resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33"
-  integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==
-
 commondir@^1.0.1:
   version "1.0.1"
   resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b"
@@ -990,6 +993,13 @@
   resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b"
   integrity sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==
 
+convert-source-map@^1.6.0:
+  version "1.7.0"
+  resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.7.0.tgz#17a2cb882d7f77d3490585e2ce6c524424a3a442"
+  integrity sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==
+  dependencies:
+    safe-buffer "~5.1.1"
+
 cookie@0.3.1:
   version "0.3.1"
   resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb"
@@ -1036,15 +1046,7 @@
     safe-buffer "^5.0.1"
     sha.js "^2.4.8"
 
-cross-spawn@^4:
-  version "4.0.2"
-  resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-4.0.2.tgz#7b9247621c23adfdd3856004a823cbe397424d41"
-  integrity sha1-e5JHYhwjrf3ThWAEqCPL45dCTUE=
-  dependencies:
-    lru-cache "^4.0.1"
-    which "^1.2.9"
-
-cross-spawn@^6.0.0, cross-spawn@^6.0.5:
+cross-spawn@^6.0.5:
   version "6.0.5"
   resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-6.0.5.tgz#4a5ec7c64dfae22c3a14124dbacdee846d80cbc4"
   integrity sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==
@@ -1116,7 +1118,7 @@
   dependencies:
     ms "2.0.0"
 
-debug@^3.1.0, debug@^3.2.6:
+debug@^3.2.6:
   version "3.2.6"
   resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.6.tgz#e83d17de16d8a7efb7717edbe5fb10135eee629b"
   integrity sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==
@@ -1145,7 +1147,7 @@
     decamelize "^1.1.0"
     map-obj "^1.0.0"
 
-decamelize@^1.1.0, decamelize@^1.1.1, decamelize@^1.2.0:
+decamelize@^1.1.0, decamelize@^1.2.0:
   version "1.2.0"
   resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
   integrity sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=
@@ -1604,19 +1606,6 @@
     md5.js "^1.3.4"
     safe-buffer "^5.1.1"
 
-execa@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/execa/-/execa-1.0.0.tgz#c6236a5bb4df6d6f15e88e7f017798216749ddd8"
-  integrity sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==
-  dependencies:
-    cross-spawn "^6.0.0"
-    get-stream "^4.0.0"
-    is-stream "^1.1.0"
-    npm-run-path "^2.0.0"
-    p-finally "^1.0.0"
-    signal-exit "^3.0.0"
-    strip-eof "^1.0.0"
-
 execa@^4.0.0:
   version "4.0.2"
   resolved "https://registry.yarnpkg.com/execa/-/execa-4.0.2.tgz#ad87fb7b2d9d564f70d2b62d511bee41d5cbb240"
@@ -1700,21 +1689,7 @@
     statuses "~1.5.0"
     unpipe "~1.0.0"
 
-find-up@^2.1.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/find-up/-/find-up-2.1.0.tgz#45d1b7e506c717ddd482775a2b77920a3c0c57a7"
-  integrity sha1-RdG35QbHF93UgndaK3eSCjwMV6c=
-  dependencies:
-    locate-path "^2.0.0"
-
-find-up@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/find-up/-/find-up-3.0.0.tgz#49169f1d7993430646da61ecc5ae355c21c97b73"
-  integrity sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==
-  dependencies:
-    locate-path "^3.0.0"
-
-find-up@^4.1.0:
+find-up@^4.0.0, find-up@^4.1.0:
   version "4.1.0"
   resolved "https://registry.yarnpkg.com/find-up/-/find-up-4.1.0.tgz#97afe7d6cdc0bc5928584b7c8d7b16e8a9aa5d19"
   integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==
@@ -1746,13 +1721,13 @@
   resolved "https://registry.yarnpkg.com/foreach/-/foreach-2.0.5.tgz#0bee005018aeb260d0a3af3ae658dd0136ec1b99"
   integrity sha1-C+4AUBiusmDQo6865ljdATbsG5k=
 
-foreground-child@^1.5.6:
-  version "1.5.6"
-  resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-1.5.6.tgz#4fd71ad2dfde96789b980a5c0a295937cb2f5ce9"
-  integrity sha1-T9ca0t/elnibmApcCilZN8svXOk=
+foreground-child@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-2.0.0.tgz#71b32800c9f15aa8f2f83f4a6bd9bff35d861a53"
+  integrity sha512-dCIq9FpEcyQyXKCkyzmlPTFNgrCzPudOe+mhvJU5zAtlBnGVy2yKxtfsxK2tQBThwq225jcvBjpw1Gr40uzZCA==
   dependencies:
-    cross-spawn "^4"
-    signal-exit "^3.0.0"
+    cross-spawn "^7.0.0"
+    signal-exit "^3.0.2"
 
 fs-extra@^7.0.1:
   version "7.0.1"
@@ -1783,6 +1758,14 @@
   resolved "https://registry.yarnpkg.com/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz#1b0ab3bd553b2a0d6399d29c0e3ea0b252078327"
   integrity sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=
 
+furi@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/furi/-/furi-2.0.0.tgz#13d85826a1af21acc691da6254b3888fc39f0b4a"
+  integrity sha512-uKuNsaU0WVaK/vmvj23wW1bicOFfyqSsAIH71bRZx8kA4Xj+YCHin7CJKJJjkIsmxYaPFLk9ljmjEyB7xF7WvQ==
+  dependencies:
+    "@types/is-windows" "^1.0.0"
+    is-windows "^1.0.2"
+
 fwd-stream@^1.0.4:
   version "1.0.4"
   resolved "https://registry.yarnpkg.com/fwd-stream/-/fwd-stream-1.0.4.tgz#ed281cabed46feecf921ee32dc4c50b372ac7cfa"
@@ -1790,11 +1773,6 @@
   dependencies:
     readable-stream "~1.0.26-4"
 
-get-caller-file@^1.0.1:
-  version "1.0.3"
-  resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.3.tgz#f978fa4c90d1dfe7ff2d6beda2a515e713bdcf4a"
-  integrity sha512-3t6rVToeoZfYSGd8YoLFR2DJkiQrIiUrGcjvFX2mDw3bn6k2OtwHN0TNCLbBO+w8qTvimhDkv+LSscbJY1vE6w==
-
 get-caller-file@^2.0.1:
   version "2.0.5"
   resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e"
@@ -1805,7 +1783,7 @@
   resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-6.0.0.tgz#9e09bf712b360ab9225e812048f71fde9c89657b"
   integrity sha512-jp4tHawyV7+fkkSKyvjuLZswblUtz+SQKzSWnBbii16BuZksJlU1wuBYXY75r+duh/llF1ur6oNwi+2ZzjKZ7g==
 
-get-stream@^4.0.0, get-stream@^4.1.0:
+get-stream@^4.1.0:
   version "4.1.0"
   resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-4.1.0.tgz#c1b255575f3dc21d59bfc79cd3d2b46b1c3a54b5"
   integrity sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==
@@ -1895,18 +1873,6 @@
     update-notifier "^4.1.0"
     write-file-atomic "^3.0.3"
 
-handlebars@^4.0.3:
-  version "4.7.6"
-  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.7.6.tgz#d4c05c1baf90e9945f77aa68a7a219aa4a7df74e"
-  integrity sha512-1f2BACcBfiwAfStCKZNrUCgqNZkGsAT7UM3kkYtXuLo0KnaVfjKOyf7PRzB6++aK9STyT1Pd2ZCPe3EGOXleXA==
-  dependencies:
-    minimist "^1.2.5"
-    neo-async "^2.6.0"
-    source-map "^0.6.1"
-    wordwrap "^1.0.0"
-  optionalDependencies:
-    uglify-js "^3.1.4"
-
 hard-rejection@^2.1.0:
   version "2.1.0"
   resolved "https://registry.yarnpkg.com/hard-rejection/-/hard-rejection-2.1.0.tgz#1c6eda5c1685c63942766d79bb40ae773cecd883"
@@ -1924,11 +1890,6 @@
   resolved "https://registry.yarnpkg.com/has-cors/-/has-cors-1.1.0.tgz#5e474793f7ea9843d1bb99c23eef49ff126fff39"
   integrity sha1-XkdHk/fqmEPRu5nCPu9J/xJv/zk=
 
-has-flag@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-1.0.0.tgz#9d9e793165ce017a00f00418c43f942a7b1d11fa"
-  integrity sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=
-
 has-flag@^3.0.0:
   version "3.0.0"
   resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd"
@@ -1994,6 +1955,11 @@
   resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.8.8.tgz#7539bd4bc1e0e0a895815a2e0262420b12858488"
   integrity sha512-f/wzC2QaWBs7t9IYqB4T3sR1xviIViXJRJTWBlx2Gf3g0Xi5vI7Yy4koXQ1c9OYDGHN9sBy1DQ2AB8fqZBWhUg==
 
+html-escaper@^2.0.0:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/html-escaper/-/html-escaper-2.0.2.tgz#dfd60027da36a36dfcbe236262c00a5822681453"
+  integrity sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==
+
 http-cache-semantics@^4.0.0:
   version "4.1.0"
   resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz#49e91c5cbf36c9b94bcfcd71c23d5249ec74e390"
@@ -2130,11 +2096,6 @@
     has "^1.0.3"
     side-channel "^1.0.2"
 
-invert-kv@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-2.0.0.tgz#7393f5afa59ec9ff5f67a27620d11c226e3eec02"
-  integrity sha512-wPVv/y/QQ/Uiirj/vh3oP+1Ww+AWehmi1g5fFWGPF6IpCBCDVrhgHRMvrLfdYcwDh3QJbGXDW4JAuzxElLSqKA==
-
 is-arrayish@^0.2.1:
   version "0.2.1"
   resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d"
@@ -2174,13 +2135,6 @@
   resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2"
   integrity sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=
 
-is-fullwidth-code-point@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb"
-  integrity sha1-754xOG8DGn8NZDr4L95QxFfvAMs=
-  dependencies:
-    number-is-nan "^1.0.0"
-
 is-fullwidth-code-point@^2.0.0:
   version "2.0.0"
   resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f"
@@ -2260,11 +2214,6 @@
   dependencies:
     has-symbols "^1.0.1"
 
-is-stream@^1.1.0:
-  version "1.1.0"
-  resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44"
-  integrity sha1-EtSj3U5o4Lec6428hBc66A2RykQ=
-
 is-stream@^2.0.0:
   version "2.0.0"
   resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.0.tgz#bde9c32680d6fae04129d6ac9d921ce7815f78e3"
@@ -2287,6 +2236,11 @@
   resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
   integrity sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=
 
+is-windows@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/is-windows/-/is-windows-1.0.2.tgz#d1850eb9791ecd18e6182ce12a30f396634bb19d"
+  integrity sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==
+
 is-wsl@^2.1.0:
   version "2.2.0"
   resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-2.2.0.tgz#74a4c76e77ca9fd3f932f290c17ea326cd157271"
@@ -2334,27 +2288,27 @@
   resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10"
   integrity sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=
 
-istanbul-lib-coverage@^1.2.0, istanbul-lib-coverage@^1.2.1:
-  version "1.2.1"
-  resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-1.2.1.tgz#ccf7edcd0a0bb9b8f729feeb0930470f9af664f0"
-  integrity sha512-PzITeunAgyGbtY1ibVIUiV679EFChHjoMNRibEIobvmrCRaIgwLxNucOSimtNWUhEib/oO7QY2imD75JVgCJWQ==
+istanbul-lib-coverage@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-3.0.0.tgz#f5944a37c70b550b02a78a5c3b2055b280cec8ec"
+  integrity sha512-UiUIqxMgRDET6eR+o5HbfRYP1l0hqkWOs7vNxC/mggutCMUIhWMm8gAHb8tHlyfD3/l6rlgNA5cKdDzEAf6hEg==
 
-istanbul-lib-report@^1.1.3:
-  version "1.1.5"
-  resolved "https://registry.yarnpkg.com/istanbul-lib-report/-/istanbul-lib-report-1.1.5.tgz#f2a657fc6282f96170aaf281eb30a458f7f4170c"
-  integrity sha512-UsYfRMoi6QO/doUshYNqcKJqVmFe9w51GZz8BS3WB0lYxAllQYklka2wP9+dGZeHYaWIdcXUx8JGdbqaoXRXzw==
+istanbul-lib-report@^3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz#7518fe52ea44de372f460a76b5ecda9ffb73d8a6"
+  integrity sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw==
   dependencies:
-    istanbul-lib-coverage "^1.2.1"
-    mkdirp "^0.5.1"
-    path-parse "^1.0.5"
-    supports-color "^3.1.2"
+    istanbul-lib-coverage "^3.0.0"
+    make-dir "^3.0.0"
+    supports-color "^7.1.0"
 
-istanbul-reports@^1.3.0:
-  version "1.5.1"
-  resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-1.5.1.tgz#97e4dbf3b515e8c484caea15d6524eebd3ff4e1a"
-  integrity sha512-+cfoZ0UXzWjhAdzosCPP3AN8vvef8XDkWtTfgaN+7L3YTpNYITnCaEkceo5SEYy644VkHka/P1FvkWvrG/rrJw==
+istanbul-reports@^3.0.2:
+  version "3.0.2"
+  resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-3.0.2.tgz#d593210e5000683750cb09fc0644e4b6e27fd53b"
+  integrity sha512-9tZvz7AiR3PEDNGiV9vIouQ/EAcqMXFmkcA1CDFTwOB98OZVDL0PH9glHotf5Ugp6GCOTypfzGWI/OqjWNCRUw==
   dependencies:
-    handlebars "^4.0.3"
+    html-escaper "^2.0.0"
+    istanbul-lib-report "^3.0.0"
 
 jasmine-core@^3.5.0, jasmine-core@~3.5.0:
   version "3.5.0"
@@ -2369,7 +2323,7 @@
     mkdirp "^0.5.1"
     xmldom "^0.1.22"
 
-jasmine@^3.5.0, jasmine@~3.5.0:
+jasmine@^3.5.0:
   version "3.5.0"
   resolved "https://registry.yarnpkg.com/jasmine/-/jasmine-3.5.0.tgz#7101eabfd043a1fc82ac24e0ab6ec56081357f9e"
   integrity sha512-DYypSryORqzsGoMazemIHUfMkXM7I7easFaxAvNM3Mr6Xz3Fy36TupTrAOxZWN8MVKEU5xECv22J4tUQf3uBzQ==
@@ -2577,13 +2531,6 @@
   dependencies:
     package-json "^6.3.0"
 
-lcid@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/lcid/-/lcid-2.0.0.tgz#6ef5d2df60e52f82eb228a4c373e8d1f397253cf"
-  integrity sha512-avPEb8P8EGnwXKClwsNUgryVjllcRqtMYa49NTsbQagYuT1DcXnl1915oxWjoyGrXR6zH/Y0Zc96xWsPcoDKeA==
-  dependencies:
-    invert-kv "^2.0.0"
-
 level-blobs@^0.1.7:
   version "0.1.7"
   resolved "https://registry.yarnpkg.com/level-blobs/-/level-blobs-0.1.7.tgz#9ab9b97bb99f1edbf9f78a3433e21ed56386bdaf"
@@ -2682,32 +2629,6 @@
   resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.1.6.tgz#1c00c743b433cd0a4e80758f7b64a57440d9ff00"
   integrity sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA=
 
-load-json-file@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-4.0.0.tgz#2f5f45ab91e33216234fd53adab668eb4ec0993b"
-  integrity sha1-L19Fq5HjMhYjT9U62rZo607AmTs=
-  dependencies:
-    graceful-fs "^4.1.2"
-    parse-json "^4.0.0"
-    pify "^3.0.0"
-    strip-bom "^3.0.0"
-
-locate-path@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-2.0.0.tgz#2b568b265eec944c6d9c0de9c3dbbbca0354cd8e"
-  integrity sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=
-  dependencies:
-    p-locate "^2.0.0"
-    path-exists "^3.0.0"
-
-locate-path@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-3.0.0.tgz#dbec3b3ab759758071b58fe59fc41871af21400e"
-  integrity sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==
-  dependencies:
-    p-locate "^3.0.0"
-    path-exists "^3.0.0"
-
 locate-path@^5.0.0:
   version "5.0.0"
   resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0"
@@ -2753,14 +2674,6 @@
   resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-2.0.0.tgz#2603e78b7b4b0006cbca2fbcc8a3202558ac9479"
   integrity sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==
 
-lru-cache@^4.0.1:
-  version "4.1.5"
-  resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.1.5.tgz#8bbe50ea85bed59bc9e33dcab8235ee9bcf443cd"
-  integrity sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==
-  dependencies:
-    pseudomap "^1.0.2"
-    yallist "^2.1.2"
-
 ltgt@^2.1.2:
   version "2.2.1"
   resolved "https://registry.yarnpkg.com/ltgt/-/ltgt-2.2.1.tgz#f35ca91c493f7b73da0e07495304f17b31f87ee5"
@@ -2787,13 +2700,6 @@
   dependencies:
     semver "^6.0.0"
 
-map-age-cleaner@^0.1.1:
-  version "0.1.3"
-  resolved "https://registry.yarnpkg.com/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz#7d583a7306434c055fe474b0f45078e6e1b4b92a"
-  integrity sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w==
-  dependencies:
-    p-defer "^1.0.0"
-
 map-obj@^1.0.0:
   version "1.0.1"
   resolved "https://registry.yarnpkg.com/map-obj/-/map-obj-1.0.1.tgz#d933ceb9205d82bdcf4886f6742bdc2b4dea146d"
@@ -2818,15 +2724,6 @@
   resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748"
   integrity sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=
 
-mem@^4.0.0:
-  version "4.3.0"
-  resolved "https://registry.yarnpkg.com/mem/-/mem-4.3.0.tgz#461af497bc4ae09608cdb2e60eefb69bff744178"
-  integrity sha512-qX2bG48pTqYRVmDB37rn/6PT7LcR8T7oAX3bf99u1Tt1nzxYfxkgqDwUwolPlXweM0XzBOBFzSx4kfp7KP1s/w==
-  dependencies:
-    map-age-cleaner "^0.1.1"
-    mimic-fn "^2.0.0"
-    p-is-promise "^2.0.0"
-
 meow@^7.0.0:
   version "7.0.1"
   resolved "https://registry.yarnpkg.com/meow/-/meow-7.0.1.tgz#1ed4a0a50b3844b451369c48362eb0515f04c1dc"
@@ -2876,7 +2773,7 @@
   resolved "https://registry.yarnpkg.com/mime/-/mime-2.4.6.tgz#e5b407c90db442f2beb5b162373d07b69affa4d1"
   integrity sha512-RZKhC3EmpBchfTGBVb8fb+RL2cWyw/32lshnsETttkBAyAUXSGHxbEJWWRXc751DrIxG1q04b8QwMbAwkRPpUA==
 
-mimic-fn@^2.0.0, mimic-fn@^2.1.0:
+mimic-fn@^2.1.0:
   version "2.1.0"
   resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b"
   integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==
@@ -2922,7 +2819,7 @@
   resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.5.tgz#67d66014b66a6a8aaa0c083c5fd58df4e4e97602"
   integrity sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==
 
-mkdirp@^0.5.0, mkdirp@^0.5.1:
+mkdirp@^0.5.1:
   version "0.5.5"
   resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.5.tgz#d91cefd62d1436ca0f41620e251288d420099def"
   integrity sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==
@@ -2959,17 +2856,12 @@
   resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.2.tgz#feacf7ccf525a77ae9634436a64883ffeca346fb"
   integrity sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==
 
-neo-async@^2.6.0:
-  version "2.6.1"
-  resolved "https://registry.yarnpkg.com/neo-async/-/neo-async-2.6.1.tgz#ac27ada66167fa8849a6addd837f6b189ad2081c"
-  integrity sha512-iyam8fBuCUpWeKPGpaNMetEocMt364qkCsfL9JuhjXX6dRnguRVOfk2GZaDpPjcOKiiXCPINZC1GczQ7iTq3Zw==
-
 nice-try@^1.0.4:
   version "1.0.5"
   resolved "https://registry.yarnpkg.com/nice-try/-/nice-try-1.0.5.tgz#a3378a7696ce7d223e88fc9b764bd7ef1089e366"
   integrity sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==
 
-normalize-package-data@^2.3.2, normalize-package-data@^2.5.0:
+normalize-package-data@^2.5.0:
   version "2.5.0"
   resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.5.0.tgz#e66db1838b200c1dfc233225d12cb36520e234a8"
   integrity sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==
@@ -2989,13 +2881,6 @@
   resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-4.5.0.tgz#453354087e6ca96957bd8f5baf753f5982142129"
   integrity sha512-2s47yzUxdexf1OhyRi4Em83iQk0aPvwTddtFz4hnSSw9dCEsLEGf6SwIO8ss/19S9iBb5sJaOuTvTGDeZI00BQ==
 
-npm-run-path@^2.0.0:
-  version "2.0.2"
-  resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-2.0.2.tgz#35a9232dfa35d7067b4cb2ddf2357b1871536c5f"
-  integrity sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=
-  dependencies:
-    path-key "^2.0.0"
-
 npm-run-path@^4.0.0:
   version "4.0.1"
   resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea"
@@ -3003,11 +2888,6 @@
   dependencies:
     path-key "^3.0.0"
 
-number-is-nan@^1.0.0:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d"
-  integrity sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=
-
 object-assign@^4.1.1:
   version "4.1.1"
   resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863"
@@ -3119,20 +2999,6 @@
     type-check "~0.3.2"
     word-wrap "~1.2.3"
 
-os-homedir@^1.0.1:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3"
-  integrity sha1-/7xJiDNuDoM94MFox+8VISGqf7M=
-
-os-locale@^3.1.0:
-  version "3.1.0"
-  resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-3.1.0.tgz#a802a6ee17f24c10483ab9935719cef4ed16bf1a"
-  integrity sha512-Z8l3R4wYWM40/52Z+S265okfFj8Kt2cC2MKY+xNi3kFs+XGI7WXu/I309QQQYbRW4ijiZ+yxs9pqEhJh0DqW3Q==
-  dependencies:
-    execa "^1.0.0"
-    lcid "^2.0.0"
-    mem "^4.0.0"
-
 os-tmpdir@~1.0.2:
   version "1.0.2"
   resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274"
@@ -3143,49 +3009,13 @@
   resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-1.1.0.tgz#d078d15a3af409220c886f1d9a0ca2e441ab26cc"
   integrity sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==
 
-p-defer@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/p-defer/-/p-defer-1.0.0.tgz#9f6eb182f6c9aa8cd743004a7d4f96b196b0fb0c"
-  integrity sha1-n26xgvbJqozXQwBKfU+WsZaw+ww=
-
-p-finally@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae"
-  integrity sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=
-
-p-is-promise@^2.0.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/p-is-promise/-/p-is-promise-2.1.0.tgz#918cebaea248a62cf7ffab8e3bca8c5f882fc42e"
-  integrity sha512-Y3W0wlRPK8ZMRbNq97l4M5otioeA5lm1z7bkNkxCka8HSPjR0xRWmpCmc9utiaLP9Jb1eD8BgeIxTW4AIF45Pg==
-
-p-limit@^1.1.0:
-  version "1.3.0"
-  resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-1.3.0.tgz#b86bd5f0c25690911c7590fcbfc2010d54b3ccb8"
-  integrity sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==
-  dependencies:
-    p-try "^1.0.0"
-
-p-limit@^2.0.0, p-limit@^2.2.0:
+p-limit@^2.2.0:
   version "2.3.0"
   resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1"
   integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==
   dependencies:
     p-try "^2.0.0"
 
-p-locate@^2.0.0:
-  version "2.0.0"
-  resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-2.0.0.tgz#20a0103b222a70c8fd39cc2e580680f3dde5ec43"
-  integrity sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=
-  dependencies:
-    p-limit "^1.1.0"
-
-p-locate@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-3.0.0.tgz#322d69a05c0264b25997d9f40cd8a891ab0064a4"
-  integrity sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==
-  dependencies:
-    p-limit "^2.0.0"
-
 p-locate@^4.1.0:
   version "4.1.0"
   resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07"
@@ -3193,11 +3023,6 @@
   dependencies:
     p-limit "^2.2.0"
 
-p-try@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/p-try/-/p-try-1.0.0.tgz#cbc79cdbaf8fd4228e13f621f2b1a237c1b207b3"
-  integrity sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=
-
 p-try@^2.0.0:
   version "2.2.0"
   resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6"
@@ -3232,14 +3057,6 @@
     pbkdf2 "^3.0.3"
     safe-buffer "^5.1.1"
 
-parse-json@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-4.0.0.tgz#be35f5425be1f7f6c747184f98a788cb99477ee0"
-  integrity sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=
-  dependencies:
-    error-ex "^1.3.1"
-    json-parse-better-errors "^1.0.1"
-
 parse-json@^5.0.0:
   version "5.0.0"
   resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.0.0.tgz#73e5114c986d143efa3712d4ea24db9a4266f60f"
@@ -3269,11 +3086,6 @@
   resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4"
   integrity sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==
 
-path-exists@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515"
-  integrity sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=
-
 path-exists@^4.0.0:
   version "4.0.0"
   resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3"
@@ -3284,7 +3096,7 @@
   resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f"
   integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18=
 
-path-key@^2.0.0, path-key@^2.0.1:
+path-key@^2.0.1:
   version "2.0.1"
   resolved "https://registry.yarnpkg.com/path-key/-/path-key-2.0.1.tgz#411cadb574c5a140d3a4b1910d40d80cc9f40b40"
   integrity sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=
@@ -3294,18 +3106,11 @@
   resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375"
   integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==
 
-path-parse@^1.0.5, path-parse@^1.0.6:
+path-parse@^1.0.6:
   version "1.0.6"
   resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.6.tgz#d62dbb5679405d72c4737ec58600e9ddcf06d24c"
   integrity sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==
 
-path-type@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/path-type/-/path-type-3.0.0.tgz#cef31dc8e0a1a3bb0d105c0cd97cf3bf47f4e36f"
-  integrity sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==
-  dependencies:
-    pify "^3.0.0"
-
 pbkdf2@^3.0.3:
   version "3.1.1"
   resolved "https://registry.yarnpkg.com/pbkdf2/-/pbkdf2-3.1.1.tgz#cb8724b0fada984596856d1a6ebafd3584654b94"
@@ -3322,11 +3127,6 @@
   resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.2.2.tgz#21f333e9b6b8eaff02468f5146ea406d345f4dad"
   integrity sha512-q0M/9eZHzmr0AulXyPwNfZjtwZ/RBZlbN3K3CErVrk50T2ASYI7Bye0EvekFY3IP1Nt2DHu0re+V2ZHIpMkuWg==
 
-pify@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176"
-  integrity sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=
-
 popper.js@1.16.1-lts:
   version "1.16.1-lts"
   resolved "https://registry.yarnpkg.com/popper.js/-/popper.js-1.16.1-lts.tgz#cf6847b807da3799d80ee3d6d2f90df8a3f50b05"
@@ -3407,11 +3207,6 @@
   resolved "https://registry.yarnpkg.com/prr/-/prr-1.0.1.tgz#d3fc114ba06995a45ec6893f484ceb1d78f5f476"
   integrity sha1-0/wRS6BplaRexok/SEzrHXj19HY=
 
-pseudomap@^1.0.2:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3"
-  integrity sha1-8FKijacOYYkX7wqKw0wa5aaChrM=
-
 public-encrypt@^4.0.0:
   version "4.0.3"
   resolved "https://registry.yarnpkg.com/public-encrypt/-/public-encrypt-4.0.3.tgz#4fcc9d77a07e48ba7527e7cbe0de33d0701331e0"
@@ -3533,14 +3328,6 @@
     object-assign "^4.1.1"
     prop-types "^15.6.2"
 
-read-pkg-up@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-4.0.0.tgz#1b221c6088ba7799601c808f91161c66e58f8978"
-  integrity sha512-6etQSH7nJGsK0RbG/2TeDzZFa8shjQ1um+SwQQ5cwKy0dhSXdOncEhb1CPpvQG4h7FyOV6EB6YlV0yJvZQNAkA==
-  dependencies:
-    find-up "^3.0.0"
-    read-pkg "^3.0.0"
-
 read-pkg-up@^7.0.1:
   version "7.0.1"
   resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-7.0.1.tgz#f3a6135758459733ae2b95638056e1854e7ef507"
@@ -3550,15 +3337,6 @@
     read-pkg "^5.2.0"
     type-fest "^0.8.1"
 
-read-pkg@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-3.0.0.tgz#9cbc686978fee65d16c00e2b19c237fcf6e38389"
-  integrity sha1-nLxoaXj+5l0WwA4rGcI3/Pbjg4k=
-  dependencies:
-    load-json-file "^4.0.0"
-    normalize-package-data "^2.3.2"
-    path-type "^3.0.0"
-
 read-pkg@^5.2.0:
   version "5.2.0"
   resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-5.2.0.tgz#7bf295438ca5a33e56cd30e053b34ee7250c93cc"
@@ -3668,11 +3446,6 @@
   resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42"
   integrity sha1-jGStX9MNqxyXbiNE/+f3kqam30I=
 
-require-main-filename@^1.0.1:
-  version "1.0.1"
-  resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-1.0.1.tgz#97f717b69d48784f5f526a6c5aa8ffdda055a4d1"
-  integrity sha1-l/cXtp1IeE9fUmpsWqj/3aBVpNE=
-
 require-main-filename@^2.0.0:
   version "2.0.0"
   resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-2.0.0.tgz#d0b329ecc7cc0f61649f62215be69af54aa8989b"
@@ -3727,14 +3500,14 @@
   dependencies:
     glob "^7.1.3"
 
-rimraf@^2.6.0, rimraf@^2.6.2, rimraf@^2.6.3:
+rimraf@^2.6.0, rimraf@^2.6.3:
   version "2.7.1"
   resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.7.1.tgz#35797f13a7fdadc566142c29d4f07ccad483e3ec"
   integrity sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==
   dependencies:
     glob "^7.1.3"
 
-rimraf@^3.0.2:
+rimraf@^3.0.0, rimraf@^3.0.2:
   version "3.0.2"
   resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-3.0.2.tgz#f1a5402ba6220ad52cc1282bac1ae3aa49fd061a"
   integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==
@@ -3905,7 +3678,7 @@
     es-abstract "^1.17.0-next.1"
     object-inspect "^1.7.0"
 
-signal-exit@^3.0.0, signal-exit@^3.0.2:
+signal-exit@^3.0.2:
   version "3.0.3"
   resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.3.tgz#a1410c2edd8f077b08b4e253c8eacfcaf057461c"
   integrity sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==
@@ -3986,23 +3759,16 @@
   resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263"
   integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==
 
+source-map@^0.7.3:
+  version "0.7.3"
+  resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.7.3.tgz#5302f8169031735226544092e64981f751750383"
+  integrity sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==
+
 sourcemap-codec@^1.4.4:
   version "1.4.8"
   resolved "https://registry.yarnpkg.com/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz#ea804bd94857402e6992d05a38ef1ae35a9ab4c4"
   integrity sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==
 
-spawn-wrap@^1.4.2:
-  version "1.4.3"
-  resolved "https://registry.yarnpkg.com/spawn-wrap/-/spawn-wrap-1.4.3.tgz#81b7670e170cca247d80bf5faf0cfb713bdcf848"
-  integrity sha512-IgB8md0QW/+tWqcavuFgKYR/qIRvJkRLPJDFaoXtLLUaVcCDK0+HeFTkmQHj3eprcYhc+gOl0aEA1w7qZlYezw==
-  dependencies:
-    foreground-child "^1.5.6"
-    mkdirp "^0.5.0"
-    os-homedir "^1.0.1"
-    rimraf "^2.6.2"
-    signal-exit "^3.0.2"
-    which "^1.3.0"
-
 spdx-correct@^3.0.0:
   version "3.1.1"
   resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.1.1.tgz#dece81ac9c1e6713e5f7d1b6f17d468fa53d89a9"
@@ -4055,23 +3821,6 @@
   resolved "https://registry.yarnpkg.com/string-range/-/string-range-1.2.2.tgz#a893ed347e72299bc83befbbf2a692a8d239d5dd"
   integrity sha1-qJPtNH5yKZvIO++78qaSqNI51d0=
 
-string-width@^1.0.1:
-  version "1.0.2"
-  resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3"
-  integrity sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=
-  dependencies:
-    code-point-at "^1.0.0"
-    is-fullwidth-code-point "^1.0.0"
-    strip-ansi "^3.0.0"
-
-string-width@^2.0.0, string-width@^2.1.1:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e"
-  integrity sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==
-  dependencies:
-    is-fullwidth-code-point "^2.0.0"
-    strip-ansi "^4.0.0"
-
 string-width@^3.0.0:
   version "3.1.0"
   resolved "https://registry.yarnpkg.com/string-width/-/string-width-3.1.0.tgz#22767be21b62af1081574306f69ac51b62203961"
@@ -4137,20 +3886,6 @@
   dependencies:
     safe-buffer "~5.1.0"
 
-strip-ansi@^3.0.0, strip-ansi@^3.0.1:
-  version "3.0.1"
-  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
-  integrity sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=
-  dependencies:
-    ansi-regex "^2.0.0"
-
-strip-ansi@^4.0.0:
-  version "4.0.0"
-  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f"
-  integrity sha1-qEeQIusaw2iocTibY1JixQXuNo8=
-  dependencies:
-    ansi-regex "^3.0.0"
-
 strip-ansi@^5.1.0, strip-ansi@^5.2.0:
   version "5.2.0"
   resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-5.2.0.tgz#8c9a536feb6afc962bdfa5b104a5091c1ad9c0ae"
@@ -4165,16 +3900,6 @@
   dependencies:
     ansi-regex "^5.0.0"
 
-strip-bom@^3.0.0:
-  version "3.0.0"
-  resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3"
-  integrity sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=
-
-strip-eof@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/strip-eof/-/strip-eof-1.0.0.tgz#bb43ff5598a6eb05d89b59fcd129c983313606bf"
-  integrity sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=
-
 strip-final-newline@^2.0.0:
   version "2.0.0"
   resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz#89b852fb2fcbe936f6f4b3187afb0a12c1ab58ad"
@@ -4197,13 +3922,6 @@
   resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a"
   integrity sha1-PFMZQukIwml8DsNEhYwobHygpgo=
 
-supports-color@^3.1.2:
-  version "3.2.3"
-  resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-3.2.3.tgz#65ac0504b3954171d8a64946b2ae3cbb8a5f54f6"
-  integrity sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=
-  dependencies:
-    has-flag "^1.0.0"
-
 supports-color@^5.3.0:
   version "5.5.0"
   resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f"
@@ -4233,15 +3951,14 @@
   resolved "https://registry.yarnpkg.com/term-size/-/term-size-2.2.0.tgz#1f16adedfe9bdc18800e1776821734086fcc6753"
   integrity sha512-a6sumDlzyHVJWb8+YofY4TW112G6p2FCPEAFk+59gIYHv3XHRhm9ltVQ9kli4hNWeQBwSpe8cRN25x0ROunMOw==
 
-test-exclude@^5.2.2:
-  version "5.2.3"
-  resolved "https://registry.yarnpkg.com/test-exclude/-/test-exclude-5.2.3.tgz#c3d3e1e311eb7ee405e092dac10aefd09091eac0"
-  integrity sha512-M+oxtseCFO3EDtAaGH7iiej3CBkzXqFMbzqYAACdzKui4eZA+pq3tZEwChvOdNfa7xxy8BfbmgJSIr43cC/+2g==
+test-exclude@^6.0.0:
+  version "6.0.0"
+  resolved "https://registry.yarnpkg.com/test-exclude/-/test-exclude-6.0.0.tgz#04a8698661d805ea6fa293b6cb9e63ac044ef15e"
+  integrity sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==
   dependencies:
-    glob "^7.1.3"
+    "@istanbuljs/schema" "^0.1.2"
+    glob "^7.1.4"
     minimatch "^3.0.4"
-    read-pkg-up "^4.0.0"
-    require-main-filename "^2.0.0"
 
 text-table@^0.2.0:
   version "0.2.0"
@@ -4380,13 +4097,6 @@
   resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.21.tgz#853cf9ce93f642f67174273cc34565ae6f308777"
   integrity sha512-+O8/qh/Qj8CgC6eYBVBykMrNtp5Gebn4dlGD/kKXVkJNDwyrAwSIqwz8CDf+tsAIWVycKcku6gIXJ0qwx/ZXaQ==
 
-uglify-js@^3.1.4:
-  version "3.9.4"
-  resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.9.4.tgz#867402377e043c1fc7b102253a22b64e5862401b"
-  integrity sha512-8RZBJq5smLOa7KslsNsVcSH+KOXf1uDU8yqLeNuVKwmT0T3FA0ZoXlinQfRad7SDcbZZRZE4ov+2v71EnxNyCA==
-  dependencies:
-    commander "~2.20.3"
-
 ultron@~1.1.0:
   version "1.1.1"
   resolved "https://registry.yarnpkg.com/ultron/-/ultron-1.1.1.tgz#9fe1536a10a664a65266a1e3ccf85fd36302bc9c"
@@ -4452,39 +4162,19 @@
   resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713"
   integrity sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=
 
-uuid@^3.3.2:
-  version "3.4.0"
-  resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.4.0.tgz#b23e4358afa8a202fe7a100af1f5f883f02007ee"
-  integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==
-
 v8-compile-cache@^2.0.3:
   version "2.1.1"
   resolved "https://registry.yarnpkg.com/v8-compile-cache/-/v8-compile-cache-2.1.1.tgz#54bc3cdd43317bca91e35dcaf305b1a7237de745"
   integrity sha512-8OQ9CL+VWyt3JStj7HX7/ciTL2V3Rl1Wf5OL+SNTm0yK1KvtReVulksyeRnCANHHuUxHlQig+JJDlUhBt1NQDQ==
 
-v8-coverage@1.0.9:
-  version "1.0.9"
-  resolved "https://registry.yarnpkg.com/v8-coverage/-/v8-coverage-1.0.9.tgz#780889680c0fea0f587adf22e2b5f443b9434745"
-  integrity sha512-JolsCH1JDI2QULrxkAGZaovJPvg/Q0p20Uj0F5N8fPtYDtz38gNBRPQ/WVXlLLd3d8WHvKN96AfE4XFk4u0g2g==
+v8-to-istanbul@^4.1.2:
+  version "4.1.4"
+  resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-4.1.4.tgz#b97936f21c0e2d9996d4985e5c5156e9d4e49cd6"
+  integrity sha512-Rw6vJHj1mbdK8edjR7+zuJrpDtKIgNdAvTSAcpYfgMIw+u2dPDntD3dgN4XQFLU2/fvFQdzj+EeSGfd/jnY5fQ==
   dependencies:
-    debug "^3.1.0"
-    foreground-child "^1.5.6"
-    istanbul-lib-coverage "^1.2.0"
-    istanbul-lib-report "^1.1.3"
-    istanbul-reports "^1.3.0"
-    mkdirp "^0.5.1"
-    rimraf "^2.6.2"
-    signal-exit "^3.0.2"
-    spawn-wrap "^1.4.2"
-    test-exclude "^5.2.2"
-    uuid "^3.3.2"
-    v8-to-istanbul "1.2.0"
-    yargs "^11.0.0"
-
-v8-to-istanbul@1.2.0:
-  version "1.2.0"
-  resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-1.2.0.tgz#f6a22ffb08b2202aaba8c2be497d1d41fe8fb4b6"
-  integrity sha512-rVSmjdEfJmOHN8GYCbg+XUhbzXZr7DzdaXIslB9DdcopGZEMsW5x5qIdxr/8DcW7msULHNnvs/xUY1TszvhKRw==
+    "@types/istanbul-lib-coverage" "^2.0.1"
+    convert-source-map "^1.6.0"
+    source-map "^0.7.3"
 
 validate-npm-package-license@^3.0.1:
   version "3.0.4"
@@ -4509,7 +4199,7 @@
   resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a"
   integrity sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=
 
-which@^1.2.1, which@^1.2.9, which@^1.3.0:
+which@^1.2.1, which@^1.2.9:
   version "1.3.1"
   resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a"
   integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==
@@ -4535,19 +4225,6 @@
   resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.3.tgz#610636f6b1f703891bd34771ccb17fb93b47079c"
   integrity sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==
 
-wordwrap@^1.0.0:
-  version "1.0.0"
-  resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb"
-  integrity sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=
-
-wrap-ansi@^2.0.0:
-  version "2.1.0"
-  resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85"
-  integrity sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU=
-  dependencies:
-    string-width "^1.0.1"
-    strip-ansi "^3.0.1"
-
 wrap-ansi@^6.2.0:
   version "6.2.0"
   resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53"
@@ -4635,22 +4312,12 @@
   resolved "https://registry.yarnpkg.com/xtend/-/xtend-3.0.0.tgz#5cce7407baf642cba7becda568111c493f59665a"
   integrity sha1-XM50B7r2Qsunvs2laBEcST9ZZlo=
 
-y18n@^3.2.1:
-  version "3.2.1"
-  resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.1.tgz#6d15fba884c08679c0d77e88e7759e811e07fa41"
-  integrity sha1-bRX7qITAhnnA136I53WegR4H+kE=
-
 y18n@^4.0.0:
   version "4.0.0"
   resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.0.tgz#95ef94f85ecc81d007c264e190a120f0a3c8566b"
   integrity sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w==
 
-yallist@^2.1.2:
-  version "2.1.2"
-  resolved "https://registry.yarnpkg.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52"
-  integrity sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI=
-
-yargs-parser@^18.1.1, yargs-parser@^18.1.3:
+yargs-parser@^18.0.0, yargs-parser@^18.1.1, yargs-parser@^18.1.2, yargs-parser@^18.1.3:
   version "18.1.3"
   resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-18.1.3.tgz#be68c4975c6b2abf469236b0c870362fab09a7b0"
   integrity sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==
@@ -4658,30 +4325,22 @@
     camelcase "^5.0.0"
     decamelize "^1.2.0"
 
-yargs-parser@^9.0.2:
-  version "9.0.2"
-  resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-9.0.2.tgz#9ccf6a43460fe4ed40a9bb68f48d43b8a68cc077"
-  integrity sha1-nM9qQ0YP5O1Aqbto9I1DuKaMwHc=
+yargs@^15.0.0:
+  version "15.4.1"
+  resolved "https://registry.yarnpkg.com/yargs/-/yargs-15.4.1.tgz#0d87a16de01aee9d8bec2bfbf74f67851730f4f8"
+  integrity sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==
   dependencies:
-    camelcase "^4.1.0"
-
-yargs@^11.0.0:
-  version "11.1.1"
-  resolved "https://registry.yarnpkg.com/yargs/-/yargs-11.1.1.tgz#5052efe3446a4df5ed669c995886cc0f13702766"
-  integrity sha512-PRU7gJrJaXv3q3yQZ/+/X6KBswZiaQ+zOmdprZcouPYtQgvNU35i+68M4b1ZHLZtYFT5QObFLV+ZkmJYcwKdiw==
-  dependencies:
-    cliui "^4.0.0"
-    decamelize "^1.1.1"
-    find-up "^2.1.0"
-    get-caller-file "^1.0.1"
-    os-locale "^3.1.0"
+    cliui "^6.0.0"
+    decamelize "^1.2.0"
+    find-up "^4.1.0"
+    get-caller-file "^2.0.1"
     require-directory "^2.1.1"
-    require-main-filename "^1.0.1"
+    require-main-filename "^2.0.0"
     set-blocking "^2.0.0"
-    string-width "^2.0.0"
+    string-width "^4.2.0"
     which-module "^2.0.0"
-    y18n "^3.2.1"
-    yargs-parser "^9.0.2"
+    y18n "^4.0.0"
+    yargs-parser "^18.1.2"
 
 yargs@^15.3.1:
   version "15.3.1"