Upgrade ninja to v1.13.0

This project was upgraded with external_updater.
Usage: tools/external_updater/updater.sh update external/<absolute path to project>
For more info, check https://cs.android.com/android/platform/superproject/main/+/main:tools/external_updater/README.md

Test: TreeHugger
Change-Id: I47c67a017033da360a4a3c40b68e6888da821697
diff --git a/.clang-tidy b/.clang-tidy
index e0afd47..357f046 100644
--- a/.clang-tidy
+++ b/.clang-tidy
@@ -6,6 +6,7 @@
   ,readability-redundant-string-cstr,
   ,readability-redundant-string-init,
   ,readability-simplify-boolean-expr,
+  ,cppcoreguidelines-pro-type-cstyle-cast,
 '
 WarningsAsErrors: '
   ,readability-avoid-const-params-in-decls,
@@ -14,4 +15,5 @@
   ,readability-redundant-string-cstr,
   ,readability-redundant-string-init,
   ,readability-simplify-boolean-expr,
+  ,cppcoreguidelines-pro-type-cstyle-cast,
 '
diff --git a/.github/workflows/linux-musl.yml b/.github/workflows/linux-musl.yml
new file mode 100644
index 0000000..5f049b1
--- /dev/null
+++ b/.github/workflows/linux-musl.yml
@@ -0,0 +1,68 @@
+name: ci-linux-musl
+
+on:
+  workflow_dispatch:
+  pull_request:
+  push:
+  release:
+    types: [published]
+
+concurrency:
+  group: ${{ github.workflow }}-${{ github.ref }}
+  cancel-in-progress: true
+
+permissions: {}
+
+jobs:
+  build:
+    runs-on: ubuntu-24.04
+    container: alpine:edge
+    permissions:
+      contents: read
+    strategy:
+      fail-fast: false
+      matrix:
+        build_method: ["python", "cmake"]
+
+    steps:
+      - name: Host - checkout
+        uses: actions/checkout@v4
+        with:
+          fetch-depth: 0
+          persist-credentials: false
+
+      - name: Install ninja build optional dependencies
+        run: apk update && apk add -u --no-cache python3 build-base cmake re2c
+
+      - name: Configure ninja build
+        if: matrix.build_method == 'cmake'
+        run: cmake -B build -D CMAKE_BUILD_TYPE="Release"
+
+      - name: Cmake Build ninja
+        if: matrix.build_method == 'cmake'
+        run: cmake --build build --parallel --config Release
+
+      - name: Cmake test ninja
+        if: matrix.build_method == 'cmake'
+        run: build/ninja_test --gtest_color=yes
+
+      - name: Python Build ninja
+        if: matrix.build_method == 'python'
+        run: python3 configure.py --bootstrap --verbose
+
+      - name: Python test ninja
+        if: matrix.build_method == 'python'
+        run: |
+          ./ninja all
+          python3 misc/ninja_syntax_test.py
+          # python3 misc/output_test.py
+
+      - name: Move ninja binary
+        if: matrix.build_method == 'cmake'
+        run: mv -f build/ninja ninja
+
+      - name: ninja-ninja --version
+        run: ./ninja --version >> $GITHUB_STEP_SUMMARY
+
+      - name: binary info via file
+        run: file ./ninja >> $GITHUB_STEP_SUMMARY
diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml
index fdebf0b..b94fb99 100644
--- a/.github/workflows/linux.yml
+++ b/.github/workflows/linux.yml
@@ -7,125 +7,147 @@
     types: published
 
 jobs:
+  fedora:
+    runs-on: [ubuntu-latest]
+    container:
+      image: fedora:40
+    steps:
+      - uses: actions/checkout@v4
+      - name: Install dependencies
+        run: dnf install -y ninja-build cmake gtest-devel re2c clang util-linux clang-tools-extra
+      - name: Linting
+        run: misc/ci.py
+      - name: Configure with CMake
+        run: cmake -Bbuild -G"Ninja Multi-Config" -DNINJA_CLANG_TIDY=1
+      - name: Build debug ninja
+        run: CLICOLOR_FORCE=1 ninja
+        working-directory: build
+      - name: Test debug ninja
+        working-directory: build/Debug
+        run: |
+          ./ninja_test --gtest_color=yes
+          ../../misc/output_test.py
+          ../../misc/jobserver_test.py
+      - name: Build release ninja
+        run: CLICOLOR_FORCE=1 ninja -f build-Release.ninja
+        working-directory: build
+      - name: Test release ninja
+        working-directory: build/Release
+        run: |
+          ./ninja_test --gtest_color=yes
+          ../../misc/output_test.py
+          ../../misc/jobserver_test.py
+
   build:
     runs-on: [ubuntu-latest]
     container:
-      image: centos:7
+      image: rockylinux:8
     steps:
-    - uses: actions/checkout@v2
-    - uses: codespell-project/actions-codespell@master
-      with:
-        ignore_words_list: fo,wee
-    - name: Install dependencies
-      run: |
-        curl -L -O https://github.com/Kitware/CMake/releases/download/v3.16.4/cmake-3.16.4-Linux-x86_64.sh
-        chmod +x cmake-3.16.4-Linux-x86_64.sh
-        ./cmake-3.16.4-Linux-x86_64.sh --skip-license --prefix=/usr/local
-        curl -L -O https://www.mirrorservice.org/sites/dl.fedoraproject.org/pub/epel/7/x86_64/Packages/p/p7zip-16.02-20.el7.x86_64.rpm
-        curl -L -O https://www.mirrorservice.org/sites/dl.fedoraproject.org/pub/epel/7/x86_64/Packages/p/p7zip-plugins-16.02-20.el7.x86_64.rpm
-        rpm -U --quiet p7zip-16.02-20.el7.x86_64.rpm
-        rpm -U --quiet p7zip-plugins-16.02-20.el7.x86_64.rpm
-        yum install -y make gcc-c++ libasan clang-analyzer
+      - uses: actions/checkout@v4
+      - uses: codespell-project/actions-codespell@master
+        with:
+          ignore_words_list: fo,wee,addin,notin
+      - name: Install dependencies
+        run: |
+          dnf install -y make gcc-c++ libasan clang-analyzer cmake dnf-plugins-core epel-release
+          dnf config-manager --set-enabled powertools
+          dnf install -y gtest-devel p7zip p7zip-plugins ninja-build
 
-    - name: Build debug ninja
-      shell: bash
-      env:
-        CFLAGS: -fstack-protector-all -fsanitize=address
-        CXXFLAGS: -fstack-protector-all -fsanitize=address
-      run: |
-        scan-build -o scanlogs cmake -DCMAKE_BUILD_TYPE=Debug -B debug-build
-        scan-build -o scanlogs cmake --build debug-build --parallel --config Debug
+      - name: Build debug ninja
+        shell: bash
+        env:
+          CFLAGS: -fstack-protector-all -fsanitize=address
+          CXXFLAGS: -fstack-protector-all -fsanitize=address
+        run: |
+          scan-build -o scanlogs cmake -GNinja -DCMAKE_BUILD_TYPE=Debug -B debug-build
+          scan-build -o scanlogs cmake --build debug-build --parallel --config Debug
 
-    - name: Test debug ninja
-      run: ./ninja_test
-      working-directory: debug-build
+      - name: Test debug ninja
+        run: ASAN_OPTIONS=detect_leaks=0 ./ninja_test
+        working-directory: debug-build
 
-    - name: Build release ninja
-      shell: bash
-      run: |
-        cmake -DCMAKE_BUILD_TYPE=Release -B release-build
-        cmake --build release-build --parallel --config Release
-        strip release-build/ninja
+      - name: Build release ninja
+        shell: bash
+        run: |
+          cmake -GNinja -DCMAKE_BUILD_TYPE=Release -B release-build
+          cmake --build release-build --parallel --config Release
+          strip release-build/ninja
 
-    - name: Test release ninja
-      run: ./ninja_test
-      working-directory: release-build
+      - name: Test release ninja
+        run: ./ninja_test
+        working-directory: release-build
 
-    - name: Create ninja archive
-      run: |
-        mkdir artifact
-        7z a artifact/ninja-linux.zip ./release-build/ninja
+      - name: Create ninja archive
+        run: |
+          mkdir artifact
+          7z a artifact/ninja-linux.zip ./release-build/ninja
 
-    # Upload ninja binary archive as an artifact
-    - name: Upload artifact
-      uses: actions/upload-artifact@v3
-      with:
-        name: ninja-binary-archives
-        path: artifact
+      # Upload ninja binary archive as an artifact
+      - name: Upload artifact
+        uses: actions/upload-artifact@v4
+        with:
+          name: ninja-binary-archives
+          path: artifact
 
-    - name: Upload release asset
-      if: github.event.action == 'published'
-      uses: actions/upload-release-asset@v1
-      env:
-        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-      with:
-        upload_url: ${{ github.event.release.upload_url }}
-        asset_path: ./artifact/ninja-linux.zip
-        asset_name: ninja-linux.zip
-        asset_content_type: application/zip
+      - name: Upload release asset
+        if: github.event.action == 'published'
+        uses: actions/upload-release-asset@v1
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+        with:
+          upload_url: ${{ github.event.release.upload_url }}
+          asset_path: ./artifact/ninja-linux.zip
+          asset_name: ninja-linux.zip
+          asset_content_type: application/zip
 
   test:
     runs-on: [ubuntu-latest]
     container:
       image: ubuntu:20.04
     steps:
-    - uses: actions/checkout@v2
-    - name: Install dependencies
-      run: |
-        apt update
-        apt install -y python3-pytest ninja-build clang-tidy python3-pip clang libgtest-dev
-        pip3 install cmake==3.17.*
-    - name: Configure (GCC)
-      run: cmake -Bbuild-gcc -DCMAKE_BUILD_TYPE=Debug -G'Ninja Multi-Config'
+      - uses: actions/checkout@v4
+      - name: Install dependencies
+        run: |
+          apt update
+          apt install -y python3-pytest ninja-build python3-pip clang libgtest-dev
+          pip3 install cmake==3.17.*
+      - name: Configure (GCC)
+        run: cmake -Bbuild-gcc -DCMAKE_BUILD_TYPE=Debug -G'Ninja Multi-Config'
 
-    - name: Build (GCC, Debug)
-      run: cmake --build build-gcc --config Debug
-    - name: Unit tests (GCC, Debug)
-      run: ./build-gcc/Debug/ninja_test
-    - name: Python tests (GCC, Debug)
-      run: pytest-3 --color=yes ../..
-      working-directory: build-gcc/Debug
+      - name: Build (GCC, Debug)
+        run: cmake --build build-gcc --config Debug
+      - name: Unit tests (GCC, Debug)
+        run: ./build-gcc/Debug/ninja_test
+      - name: Python tests (GCC, Debug)
+        run: pytest-3 --color=yes ../..
+        working-directory: build-gcc/Debug
 
-    - name: Build (GCC, Release)
-      run: cmake --build build-gcc --config Release
-    - name: Unit tests (GCC, Release)
-      run: ./build-gcc/Release/ninja_test
-    - name: Python tests (GCC, Release)
-      run: pytest-3 --color=yes ../..
-      working-directory: build-gcc/Release
+      - name: Build (GCC, Release)
+        run: cmake --build build-gcc --config Release
+      - name: Unit tests (GCC, Release)
+        run: ./build-gcc/Release/ninja_test
+      - name: Python tests (GCC, Release)
+        run: pytest-3 --color=yes ../..
+        working-directory: build-gcc/Release
 
-    - name: Configure (Clang)
-      run: CC=clang CXX=clang++ cmake -Bbuild-clang -DCMAKE_BUILD_TYPE=Debug -G'Ninja Multi-Config' -DCMAKE_EXPORT_COMPILE_COMMANDS=1
+      - name: Configure (Clang)
+        run: CC=clang CXX=clang++ cmake -Bbuild-clang -DCMAKE_BUILD_TYPE=Debug -G'Ninja Multi-Config'
 
-    - name: Build (Clang, Debug)
-      run: cmake --build build-clang --config Debug
-    - name: Unit tests (Clang, Debug)
-      run: ./build-clang/Debug/ninja_test
-    - name: Python tests (Clang, Debug)
-      run: pytest-3 --color=yes ../..
-      working-directory: build-clang/Debug
+      - name: Build (Clang, Debug)
+        run: cmake --build build-clang --config Debug
+      - name: Unit tests (Clang, Debug)
+        run: ./build-clang/Debug/ninja_test
+      - name: Python tests (Clang, Debug)
+        run: pytest-3 --color=yes ../..
+        working-directory: build-clang/Debug
 
-    - name: Build (Clang, Release)
-      run: cmake --build build-clang --config Release
-    - name: Unit tests (Clang, Release)
-      run: ./build-clang/Release/ninja_test
-    - name: Python tests (Clang, Release)
-      run: pytest-3 --color=yes ../..
-      working-directory: build-clang/Release
-
-    - name: clang-tidy
-      run: /usr/lib/llvm-10/share/clang/run-clang-tidy.py -header-filter=src
-      working-directory: build-clang
+      - name: Build (Clang, Release)
+        run: cmake --build build-clang --config Release
+      - name: Unit tests (Clang, Release)
+        run: ./build-clang/Release/ninja_test
+      - name: Python tests (Clang, Release)
+        run: pytest-3 --color=yes ../..
+        working-directory: build-clang/Release
 
   build-with-python:
     runs-on: [ubuntu-latest]
@@ -133,47 +155,34 @@
       image: ${{ matrix.image }}
     strategy:
       matrix:
-        image: ['ubuntu:20.04', 'ubuntu:22.04', 'ubuntu:24.04']
+        image: ["ubuntu:20.04", "ubuntu:22.04", "ubuntu:24.04"]
     steps:
-    - uses: actions/checkout@v2
-    - name: Install dependencies
-      run: |
-        apt update
-        apt install -y g++ python3
-    - name: ${{ matrix.image }}
-      run: |
-        python3 configure.py --bootstrap
-        ./ninja all
-        python3 misc/ninja_syntax_test.py
-        ./misc/output_test.py
+      - uses: actions/checkout@v4
+      - name: Install dependencies
+        run: |
+          apt update
+          apt install -y g++ python3
+      - name: ${{ matrix.image }}
+        run: |
+          python3 configure.py --bootstrap
+          ./ninja all
+          python3 misc/ninja_syntax_test.py
+          ./misc/output_test.py
 
   build-aarch64:
     name: Build Linux ARM64
-    runs-on: [ubuntu-latest]
+    runs-on: [ubuntu-24.04-arm]
     steps:
-    - uses: actions/checkout@v3
+      - uses: actions/checkout@v4
+        with:
+          fetch-depth: 0
+          persist-credentials: false
 
-    - name: Build
-      uses: uraimo/run-on-arch-action@v2
-      with:
-        arch: aarch64
-        distro: ubuntu18.04
-        githubToken: ${{ github.token }}
-        dockerRunArgs: |
-          --volume "${PWD}:/ninja"
-        install: |
-          apt-get update -q -y
-          apt-get install -q -y make gcc g++ libasan5 clang-tools curl p7zip-full file
-        run: |
-          set -x
-          cd /ninja
+      - run: |
+          sudo apt-get update -q -y
+          sudo apt-get install -q -y make gcc g++ libasan5 clang-tools curl p7zip-full file cmake re2c
 
-          # INSTALL CMAKE
-          CMAKE_VERSION=3.23.4
-          curl -L -O https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-Linux-aarch64.sh
-          chmod +x cmake-${CMAKE_VERSION}-Linux-aarch64.sh
-          ./cmake-${CMAKE_VERSION}-Linux-aarch64.sh --skip-license --prefix=/usr/local
-
+      - run: |
           # BUILD
           cmake -DCMAKE_BUILD_TYPE=Release -B release-build
           cmake --build release-build --parallel --config Release
@@ -189,20 +198,20 @@
           mkdir artifact
           7z a artifact/ninja-linux-aarch64.zip ./release-build/ninja
 
-    # Upload ninja binary archive as an artifact
-    - name: Upload artifact
-      uses: actions/upload-artifact@v3
-      with:
-        name: ninja-binary-archives
-        path: artifact
+      # Upload ninja binary archive as an artifact
+      - name: Upload artifact
+        uses: actions/upload-artifact@v4
+        with:
+          name: ninja-aarch64-binary-archives
+          path: artifact
 
-    - name: Upload release asset
-      if: github.event.action == 'published'
-      uses: actions/upload-release-asset@v1
-      env:
-        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-      with:
-        upload_url: ${{ github.event.release.upload_url }}
-        asset_path: ./artifact/ninja-linux-aarch64.zip
-        asset_name: ninja-linux-aarch64.zip
-        asset_content_type: application/zip
+      - name: Upload release asset
+        if: github.event.action == 'published'
+        uses: actions/upload-release-asset@v1
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+        with:
+          upload_url: ${{ github.event.release.upload_url }}
+          asset_path: ./artifact/ninja-linux-aarch64.zip
+          asset_name: ninja-linux-aarch64.zip
+          asset_content_type: application/zip
diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml
index d3dd9ee..33acd89 100644
--- a/.github/workflows/macos.yml
+++ b/.github/workflows/macos.yml
@@ -8,10 +8,10 @@
 
 jobs:
   build:
-    runs-on: macos-12
+    runs-on: macos-13
 
     steps:
-    - uses: actions/checkout@v2
+    - uses: actions/checkout@v4
 
     - name: Install dependencies
       run: brew install re2c p7zip cmake
@@ -24,9 +24,9 @@
         cmake -Bbuild -GXcode '-DCMAKE_OSX_ARCHITECTURES=arm64;x86_64'
         cmake --build build --config Release
 
-    - name: Test ninja
-      run: ctest -C Release -vv
-      working-directory: build
+    - name: Test ninja (Release)
+      run: ./ninja_test
+      working-directory: build/Release
 
     - name: Create ninja archive
       shell: bash
@@ -36,7 +36,7 @@
 
     # Upload ninja binary archive as an artifact
     - name: Upload artifact
-      uses: actions/upload-artifact@v3
+      uses: actions/upload-artifact@v4
       with:
         name: ninja-binary-archives
         path: artifact
diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml
index e169eb4..c20889d 100644
--- a/.github/workflows/windows.yml
+++ b/.github/workflows/windows.yml
@@ -20,7 +20,7 @@
           suffix: 'arm64'
 
     steps:
-    - uses: actions/checkout@v2
+    - uses: actions/checkout@v4
 
     - name: Install dependencies
       run: choco install re2c
@@ -50,9 +50,9 @@
 
     # Upload ninja binary archive as an artifact
     - name: Upload artifact
-      uses: actions/upload-artifact@v3
+      uses: actions/upload-artifact@v4
       with:
-        name: ninja-binary-archives
+        name: ninja-binary-archives${{ matrix.suffix }}
         path: artifact
 
     - name: Upload release asset
diff --git a/.gitignore b/.gitignore
index ca36ec8..bb13dcd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,6 +10,7 @@
 /build_log_perftest
 /canon_perftest
 /clparser_perftest
+/elide_middle_perftest
 /depfile_parser_perftest
 /hash_collision_bench
 /ninja_test
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 90e3418..f96b548 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -13,6 +13,7 @@
 
 if(lto_supported)
 	message(STATUS "IPO / LTO enabled")
+	set(CMAKE_POLICY_DEFAULT_CMP0069 NEW)
 	set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE TRUE)
 else()
 	message(STATUS "IPO / LTO not supported: <${error}>")
@@ -32,9 +33,13 @@
 	if(flag_no_deprecated)
 		add_compile_options(-Wno-deprecated)
 	endif()
-	check_cxx_compiler_flag(-fdiagnostics-color flag_color_diag)
-	if(flag_color_diag)
-		add_compile_options(-fdiagnostics-color)
+	if(CMAKE_VERSION VERSION_LESS 3.24)
+		check_cxx_compiler_flag(-fdiagnostics-color flag_color_diag)
+		if(flag_color_diag)
+			add_compile_options(-fdiagnostics-color)
+		endif()
+	elseif(NOT DEFINED ENV{CMAKE_COLOR_DIAGNOSTICS})
+		set(CMAKE_COLOR_DIAGNOSTICS ON)
 	endif()
 
 	if(NOT NINJA_FORCE_PSELECT)
@@ -130,17 +135,20 @@
 	src/deps_log.cc
 	src/disk_interface.cc
 	src/edit_distance.cc
+	src/elide_middle.cc
 	src/eval_env.cc
 	src/graph.cc
 	src/graphviz.cc
+	src/jobserver.cc
 	src/json.cc
 	src/line_printer.cc
 	src/manifest_parser.cc
 	src/metrics.cc
 	src/missing_deps.cc
 	src/parser.cc
+	src/real_command_runner.cc
 	src/state.cc
-	src/status.cc
+	src/status_printer.cc
 	src/string_piece_util.cc
 	src/util.cc
 	src/version.cc
@@ -149,6 +157,7 @@
 	target_sources(libninja PRIVATE
 		src/subprocess-win32.cc
 		src/includes_normalize-win32.cc
+		src/jobserver-win32.cc
 		src/msvc_helper-win32.cc
 		src/msvc_helper_main-win32.cc
 		src/getopt.c
@@ -158,8 +167,16 @@
 	# so that build environments which lack a C compiler, but have a C++
 	# compiler may build ninja.
 	set_source_files_properties(src/getopt.c PROPERTIES LANGUAGE CXX)
+
+	# windows.h defines min() and max() which conflict with std::min()
+	# and std::max(), which both might be used in sources. Avoid compile
+	# errors by telling windows.h to not define those two.
+	add_compile_definitions(NOMINMAX)
 else()
-	target_sources(libninja PRIVATE src/subprocess-posix.cc)
+	target_sources(libninja PRIVATE
+		src/jobserver-posix.cc
+		src/subprocess-posix.cc
+	)
 	if(CMAKE_SYSTEM_NAME STREQUAL "OS400" OR CMAKE_SYSTEM_NAME STREQUAL "AIX")
 		target_sources(libninja PRIVATE src/getopt.c)
 		# Build getopt.c, which can be compiled as either C or C++, as C++
@@ -175,6 +192,7 @@
 endif()
 
 target_compile_features(libninja PUBLIC cxx_std_11)
+target_compile_features(libninja-re2c PUBLIC cxx_std_11)
 
 #Fixes GetActiveProcessorCount on MinGW
 if(MINGW)
@@ -195,6 +213,12 @@
 	if(WIN32)
 		target_sources(ninja PRIVATE windows/ninja.manifest)
 	endif()
+
+	option(NINJA_CLANG_TIDY "Run clang-tidy on source files" OFF)
+	if(NINJA_CLANG_TIDY)
+		set_target_properties(libninja PROPERTIES CXX_CLANG_TIDY "clang-tidy;--use-color")
+		set_target_properties(ninja    PROPERTIES CXX_CLANG_TIDY "clang-tidy;--use-color")
+	endif()
 endif()
 
 # Adds browse mode into the ninja binary if it's supported by the host platform.
@@ -227,42 +251,21 @@
 
 include(CTest)
 if(BUILD_TESTING)
+
+  # Can be removed if cmake min version is >=3.24
+  if (POLICY CMP0135)
+    cmake_policy(SET CMP0135 NEW)
+  endif()
+
   find_package(GTest)
   if(NOT GTest_FOUND)
     include(FetchContent)
     FetchContent_Declare(
       googletest
-      URL https://github.com/google/googletest/archive/release-1.10.0.tar.gz
-      URL_HASH SHA1=9c89be7df9c5e8cb0bc20b3c4b39bf7e82686770
+      URL https://github.com/google/googletest/archive/refs/tags/release-1.12.1.tar.gz
+      URL_HASH SHA256=81964fe578e9bd7c94dfdb09c8e4d6e6759e19967e397dbea48d1c10e45d0df2
     )
     FetchContent_MakeAvailable(googletest)
-
-    # Before googletest-1.11.0, the CMake files provided by the source archive
-    # did not define the GTest::gtest target, only the gtest one, so define
-    # an alias when needed to ensure the rest of this file works with all
-    # GoogleTest releases.
-    #
-    # Note that surprisingly, this is not needed when using GTEST_ROOT to
-    # point to a local installation, because this one contains CMake-generated
-    # files that contain the right target definition, and which will be
-    # picked up by the find_package(GTest) file above.
-    #
-    # This comment and the four lines below can be removed once Ninja only
-    # depends on release-1.11.0 or above.
-    if (NOT TARGET GTest::gtest)
-      message(STATUS "Defining GTest::gtest alias to work-around bug in older release.")
-      add_library(GTest::gtest ALIAS gtest)
-
-      # NOTE: gtest uninit some variables, gcc >= 1.11.3 may cause error on compile.
-      # Remove this comment and six lines below, once ninja deps gtest-1.11.0 or above.
-      if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL "1.11.3")
-        check_cxx_compiler_flag(-Wmaybe-uninitialized flag_maybe_uninit)
-        if (flag_maybe_uninit)
-          target_compile_options(gtest PRIVATE -Wno-maybe-uninitialized)
-        endif()
-      endif()
-
-    endif()
   endif()
 
   # Tests all build into ninja_test executable.
@@ -276,7 +279,10 @@
     src/disk_interface_test.cc
     src/dyndep_parser_test.cc
     src/edit_distance_test.cc
+    src/elide_middle_test.cc
+    src/explanations_test.cc
     src/graph_test.cc
+    src/jobserver_test.cc
     src/json_test.cc
     src/lexer_test.cc
     src/manifest_parser_test.cc
@@ -291,6 +297,11 @@
   if(WIN32)
     target_sources(ninja_test PRIVATE src/includes_normalize_test.cc src/msvc_helper_test.cc
       windows/ninja.manifest)
+
+    if(MSVC)
+      # Silence warnings about using unlink rather than _unlink
+      target_compile_definitions(ninja_test PRIVATE _CRT_NONSTDC_NO_DEPRECATE)
+    endif()
   endif()
   find_package(Threads REQUIRED)
   target_link_libraries(ninja_test PRIVATE libninja libninja-re2c GTest::gtest Threads::Threads)
@@ -300,6 +311,7 @@
     canon_perftest
     clparser_perftest
     depfile_parser_perftest
+    elide_middle_perftest
     hash_collision_bench
     manifest_parser_perftest
   )
diff --git a/METADATA b/METADATA
index 23563ae..3b36e71 100644
--- a/METADATA
+++ b/METADATA
@@ -7,14 +7,14 @@
 third_party {
   license_type: NOTICE
   last_upgrade_date {
-    year: 2024
-    month: 9
-    day: 6
+    year: 2025
+    month: 6
+    day: 25
   }
   homepage: "https://ninja-build.org/"
   identifier {
     type: "Git"
     value: "https://github.com/ninja-build/ninja"
-    version: "v1.12.1"
+    version: "v1.13.0"
   }
 }
diff --git a/README.md b/README.md
index 732ef28..65929fa 100644
--- a/README.md
+++ b/README.md
@@ -34,17 +34,32 @@
 This will generate the `ninja` binary and a `build.ninja` file you can now use
 to build Ninja with itself.
 
-### CMake
+If you have a GoogleTest source directory, you can build the tests
+by passing its path with `--gtest-source-dir=PATH` option, or the
+`GTEST_SOURCE_DIR` environment variable, e.g.:
 
 ```
-cmake -Bbuild-cmake
+./configure.py --bootstrap --gtest-source-dir=/path/to/googletest
+./ninja all     # build ninja_test and other auxiliary binaries
+./ninja_test`   # run the unit-test suite.
+```
+
+Use the CMake build below if you want to use a preinstalled binary
+version of the library.
+
+### CMake
+
+To build the ninja binary without building the unit tests, disable test building by setting `BUILD_TESTING` to `OFF`:
+
+```
+cmake -Bbuild-cmake -DBUILD_TESTING=OFF
 cmake --build build-cmake
 ```
 
 The `ninja` binary will now be inside the `build-cmake` directory (you can
 choose any other name you like).
 
-To run the unit tests:
+To run the unit tests, omit the `-DBUILD_TESTING=OFF` option, and after building, run:
 
 ```
 ./build-cmake/ninja_test
@@ -58,7 +73,7 @@
 
 ```
 ./configure.py
-ninja manual doc/manual.pdf
+ninja manual doc/manual.html
 ```
 
 Which will generate `doc/manual.html`.
diff --git a/configure.py b/configure.py
index 6ee64a8..a52fb96 100755
--- a/configure.py
+++ b/configure.py
@@ -24,15 +24,19 @@
 import shlex
 import subprocess
 import sys
+from typing import Optional, Union, Dict, List, Any, TYPE_CHECKING
 
 sourcedir = os.path.dirname(os.path.realpath(__file__))
 sys.path.insert(0, os.path.join(sourcedir, 'misc'))
-import ninja_syntax
+if TYPE_CHECKING:
+    import misc.ninja_syntax as ninja_syntax
+else:
+    import ninja_syntax
 
 
 class Platform(object):
     """Represents a host/target platform and its specific build attributes."""
-    def __init__(self, platform):
+    def __init__(self, platform: Optional[str]) -> None:
         self._platform = platform
         if self._platform is not None:
             return
@@ -63,55 +67,55 @@
             self._platform = 'dragonfly'
 
     @staticmethod
-    def known_platforms():
+    def known_platforms() -> List[str]:
       return ['linux', 'darwin', 'freebsd', 'openbsd', 'solaris', 'sunos5',
               'mingw', 'msvc', 'gnukfreebsd', 'bitrig', 'netbsd', 'aix',
               'dragonfly']
 
-    def platform(self):
-        return self._platform
+    def platform(self) -> str:
+        return self._platform  # type: ignore # Incompatible return value type
 
-    def is_linux(self):
+    def is_linux(self) -> bool:
         return self._platform == 'linux'
 
-    def is_mingw(self):
+    def is_mingw(self) -> bool:
         return self._platform == 'mingw'
 
-    def is_msvc(self):
+    def is_msvc(self) -> bool:
         return self._platform == 'msvc'
 
-    def msvc_needs_fs(self):
+    def msvc_needs_fs(self) -> bool:
         popen = subprocess.Popen(['cl', '/nologo', '/help'],
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.PIPE)
         out, err = popen.communicate()
         return b'/FS' in out
 
-    def is_windows(self):
+    def is_windows(self) -> bool:
         return self.is_mingw() or self.is_msvc()
 
-    def is_solaris(self):
+    def is_solaris(self) -> bool:
         return self._platform == 'solaris'
 
-    def is_aix(self):
+    def is_aix(self) -> bool:
         return self._platform == 'aix'
 
-    def is_os400_pase(self):
-        return self._platform == 'os400' or os.uname().sysname.startswith('OS400')
+    def is_os400_pase(self) -> bool:
+        return self._platform == 'os400' or os.uname().sysname.startswith('OS400')  # type: ignore # Module has no attribute "uname"
 
-    def uses_usr_local(self):
+    def uses_usr_local(self) -> bool:
         return self._platform in ('freebsd', 'openbsd', 'bitrig', 'dragonfly', 'netbsd')
 
-    def supports_ppoll(self):
+    def supports_ppoll(self) -> bool:
         return self._platform in ('freebsd', 'linux', 'openbsd', 'bitrig',
                                   'dragonfly')
 
-    def supports_ninja_browse(self):
+    def supports_ninja_browse(self) -> bool:
         return (not self.is_windows()
                 and not self.is_solaris()
                 and not self.is_aix())
 
-    def can_rebuild_in_place(self):
+    def can_rebuild_in_place(self) -> bool:
         return not (self.is_windows() or self.is_aix())
 
 class Bootstrap:
@@ -122,37 +126,43 @@
     It also proxies all calls to an underlying ninja_syntax.Writer, to
     behave like non-bootstrap mode.
     """
-    def __init__(self, writer, verbose=False):
+    def __init__(self, writer: ninja_syntax.Writer, verbose: bool = False) -> None:
         self.writer = writer
         self.verbose = verbose
         # Map of variable name => expanded variable value.
-        self.vars = {}
+        self.vars: Dict[str, str] = {}
         # Map of rule name => dict of rule attributes.
-        self.rules = {
+        self.rules: Dict[str, Dict[str, Any]] = {
             'phony': {}
         }
 
-    def comment(self, text):
+    def comment(self, text: str) -> None:
         return self.writer.comment(text)
 
-    def newline(self):
+    def newline(self) -> None:
         return self.writer.newline()
 
-    def variable(self, key, val):
+    def variable(self, key: str, val: str) -> None:
         # In bootstrap mode, we have no ninja process to catch /showIncludes
         # output.
         self.vars[key] = self._expand(val).replace('/showIncludes', '')
         return self.writer.variable(key, val)
 
-    def rule(self, name, **kwargs):
+    def rule(self, name: str, **kwargs: Any) -> None:
         self.rules[name] = kwargs
         return self.writer.rule(name, **kwargs)
 
-    def build(self, outputs, rule, inputs=None, **kwargs):
+    def build(
+        self,
+        outputs: Union[str, List[str]],
+        rule: str,
+        inputs: Optional[Union[str, List[str]]] = None,
+        **kwargs: Any
+    ) -> List[str]:
         ruleattr = self.rules[rule]
         cmd = ruleattr.get('command')
         if cmd is None:  # A phony rule, for example.
-            return
+            return  # type: ignore # Return value expected
 
         # Implement just enough of Ninja variable expansion etc. to
         # make the bootstrap build work.
@@ -167,23 +177,23 @@
 
         return self.writer.build(outputs, rule, inputs, **kwargs)
 
-    def default(self, paths):
+    def default(self, paths: Union[str, List[str]]) -> None:
         return self.writer.default(paths)
 
-    def _expand_paths(self, paths):
+    def _expand_paths(self, paths: Optional[Union[str, List[str]]]) -> str:
         """Expand $vars in an array of paths, e.g. from a 'build' block."""
         paths = ninja_syntax.as_list(paths)
         return ' '.join(map(self._shell_escape, (map(self._expand, paths))))
 
-    def _expand(self, str, local_vars={}):
+    def _expand(self, str: str, local_vars: Dict[str, str] = {}) -> str:
         """Expand $vars in a string."""
         return ninja_syntax.expand(str, self.vars, local_vars)
 
-    def _shell_escape(self, path):
+    def _shell_escape(self, path: str) -> str:
         """Quote paths containing spaces."""
         return '"%s"' % path if ' ' in path else path
 
-    def _run_command(self, cmdline):
+    def _run_command(self, cmdline: str) -> None:
         """Run a subcommand, quietly.  Prints the full command on error."""
         try:
             if self.verbose:
@@ -213,7 +223,10 @@
 parser.add_option('--profile', metavar='TYPE',
                   choices=profilers,
                   help='enable profiling (' + '/'.join(profilers) + ')',)
-parser.add_option('--with-gtest', metavar='PATH', help='ignored')
+parser.add_option('--gtest-source-dir', metavar='PATH',
+                  help='Path to GoogleTest source directory. If not provided ' +
+                       'GTEST_SOURCE_DIR will be probed in the environment. ' +
+                       'Tests will not be built without a value.')
 parser.add_option('--with-python', metavar='EXE',
                   help='use EXE as the Python interpreter',
                   default=os.path.basename(sys.executable))
@@ -233,7 +246,7 @@
 
 BUILD_FILENAME = 'build.ninja'
 ninja_writer = ninja_syntax.Writer(open(BUILD_FILENAME, 'w'))
-n = ninja_writer
+n: Union[ninja_syntax.Writer, Bootstrap] = ninja_writer
 
 if options.bootstrap:
     # Make the build directory.
@@ -244,7 +257,7 @@
     # Wrap ninja_writer with the Bootstrapper, which also executes the
     # commands.
     print('bootstrapping ninja...')
-    n = Bootstrap(n, verbose=options.verbose)
+    n = Bootstrap(n, verbose=options.verbose)  # type: ignore # Incompatible types in assignment
 
 n.comment('This file is used to build ninja itself.')
 n.comment('It is generated by ' + os.path.basename(__file__) + '.')
@@ -272,17 +285,17 @@
     CXX = 'cl'
     objext = '.obj'
 
-def src(filename):
+def src(filename: str) -> str:
     return os.path.join('$root', 'src', filename)
-def built(filename):
+def built(filename: str) -> str:
     return os.path.join('$builddir', filename)
-def doc(filename):
+def doc(filename: str) -> str:
     return os.path.join('$root', 'doc', filename)
-def cc(name, **kwargs):
+def cc(name: str, **kwargs: Any) -> List[str]:
     return n.build(built(name + objext), 'cxx', src(name + '.c'), **kwargs)
-def cxx(name, **kwargs):
+def cxx(name: str, **kwargs: Any) -> List[str]:
     return n.build(built(name + objext), 'cxx', src(name + '.cc'), **kwargs)
-def binary(name):
+def binary(name: str) -> str:
     if platform.is_windows():
         exe = name + '.exe'
         n.build(name, 'phony', exe)
@@ -302,7 +315,7 @@
 else:
     n.variable('ar', configure_env.get('AR', 'ar'))
 
-def search_system_path(file_name):
+def search_system_path(file_name: str) -> Optional[str]:  # type: ignore # Missing return statement
   """Find a file in the system path."""
   for dir in os.environ['path'].split(';'):
     path = os.path.join(dir, file_name)
@@ -316,11 +329,12 @@
         raise Exception('cl.exe not found. Run again from the Developer Command Prompt for VS')
     cflags = ['/showIncludes',
               '/nologo',  # Don't print startup banner.
+              '/utf-8',
               '/Zi',  # Create pdb with debug info.
               '/W4',  # Highest warning level.
               '/WX',  # Warnings as errors.
               '/wd4530', '/wd4100', '/wd4706', '/wd4244',
-              '/wd4512', '/wd4800', '/wd4702', '/wd4819',
+              '/wd4512', '/wd4800', '/wd4702',
               # Disable warnings about constant conditional expressions.
               '/wd4127',
               # Disable warnings about passing "this" during initialization.
@@ -348,7 +362,7 @@
               '-Wno-unused-parameter',
               '-fno-rtti',
               '-fno-exceptions',
-              '-std=c++11',
+              '-std=c++14',
               '-fvisibility=hidden', '-pipe',
               '-DNINJA_PYTHON="%s"' % options.with_python]
     if options.debug:
@@ -404,7 +418,7 @@
 # Search for generated headers relative to build dir.
 cflags.append('-I.')
 
-def shell_escape(str):
+def shell_escape(str: str) -> str:
     """Escape str such that it's interpreted as a single argument by
     the shell."""
 
@@ -425,6 +439,7 @@
 if 'LDFLAGS' in configure_env:
     ldflags.append(configure_env['LDFLAGS'])
 n.variable('ldflags', ' '.join(shell_escape(flag) for flag in ldflags))
+
 n.newline()
 
 if platform.is_msvc():
@@ -481,7 +496,7 @@
     n.newline()
 
 n.comment('the depfile parser and ninja lexers are generated using re2c.')
-def has_re2c():
+def has_re2c() -> bool:
     try:
         proc = subprocess.Popen(['re2c', '-V'], stdout=subprocess.PIPE)
         return int(proc.communicate()[0], 10) >= 1503
@@ -525,17 +540,20 @@
              'dyndep',
              'dyndep_parser',
              'edit_distance',
+             'elide_middle',
              'eval_env',
              'graph',
              'graphviz',
+             'jobserver',
              'json',
              'line_printer',
              'manifest_parser',
              'metrics',
              'missing_deps',
              'parser',
+             'real_command_runner',
              'state',
-             'status',
+             'status_printer',
              'string_piece_util',
              'util',
              'version']:
@@ -543,6 +561,7 @@
 if platform.is_windows():
     for name in ['subprocess-win32',
                  'includes_normalize-win32',
+                 'jobserver-win32',
                  'msvc_helper-win32',
                  'msvc_helper_main-win32']:
         objs += cxx(name, variables=cxxvariables)
@@ -550,7 +569,9 @@
         objs += cxx('minidump-win32', variables=cxxvariables)
     objs += cc('getopt')
 else:
-    objs += cxx('subprocess-posix')
+    for name in ['jobserver-posix',
+                 'subprocess-posix']:
+        objs += cxx(name, variables=cxxvariables)
 if platform.is_aix():
     objs += cc('getopt')
 if platform.is_msvc():
@@ -582,6 +603,86 @@
     # build.ninja file.
     n = ninja_writer
 
+# Build the ninja_test executable only if the GTest source directory
+# is provided explicitly. Either from the environment with GTEST_SOURCE_DIR
+# or with the --gtest-source-dir command-line option.
+#
+# Do not try to look for an installed binary version, and link against it
+# because doing so properly is platform-specific (use the CMake build for
+# this).
+if options.gtest_source_dir:
+    gtest_src_dir = options.gtest_source_dir
+else:
+    gtest_src_dir = os.environ.get('GTEST_SOURCE_DIR')
+
+if gtest_src_dir:
+    # Verify GoogleTest source directory, and add its include directory
+    # to the global include search path (even for non-test sources) to
+    # keep the build plan generation simple.
+    gtest_all_cc = os.path.join(gtest_src_dir, 'googletest', 'src', 'gtest-all.cc')
+    if not os.path.exists(gtest_all_cc):
+        print('ERROR: Missing GoogleTest source file: %s' % gtest_all_cc)
+        sys.exit(1)
+
+    n.comment('Tests all build into ninja_test executable.')
+
+    # Test-specific version of cflags, must include the GoogleTest
+    # include directory.
+    test_cflags = cflags.copy()
+    test_cflags.append('-I' + os.path.join(gtest_src_dir, 'googletest', 'include'))
+
+    test_variables = [('cflags', test_cflags)]
+    if platform.is_msvc():
+        test_variables += [('pdb', 'ninja_test.pdb')]
+
+    test_names = [
+        'build_log_test',
+        'build_test',
+        'clean_test',
+        'clparser_test',
+        'depfile_parser_test',
+        'deps_log_test',
+        'disk_interface_test',
+        'dyndep_parser_test',
+        'edit_distance_test',
+        'elide_middle_test',
+        'explanations_test',
+        'graph_test',
+        'jobserver_test',
+        'json_test',
+        'lexer_test',
+        'manifest_parser_test',
+        'ninja_test',
+        'state_test',
+        'string_piece_util_test',
+        'subprocess_test',
+        'test',
+        'util_test',
+    ]
+    if platform.is_windows():
+        test_names += [
+            'includes_normalize_test',
+            'msvc_helper_test',
+        ]
+
+    objs = []
+    for name in test_names:
+        objs += cxx(name, variables=test_variables)
+
+    # Build GTest as a monolithic source file.
+    # This requires one extra include search path, so replace the
+    # value of 'cflags' in our list.
+    gtest_all_variables = test_variables[1:] + [
+      ('cflags', test_cflags + ['-I' + os.path.join(gtest_src_dir, 'googletest') ]),
+    ]
+    # Do not use cxx() directly to ensure the object file is under $builddir.
+    objs += n.build(built('gtest_all' + objext), 'cxx', gtest_all_cc, variables=gtest_all_variables)
+
+    ninja_test = n.build(binary('ninja_test'), 'link', objs, implicit=ninja_lib,
+                         variables=[('libs', libs)])
+    n.newline()
+    all_targets += ninja_test
+
 n.comment('Ancillary executables.')
 
 if platform.is_aix() and '-maix64' not in ldflags:
@@ -591,6 +692,7 @@
 
 for name in ['build_log_perftest',
              'canon_perftest',
+             'elide_middle_perftest',
              'depfile_parser_perftest',
              'hash_collision_bench',
              'manifest_parser_perftest',
@@ -672,7 +774,7 @@
 
 n.build('all', 'phony', all_targets)
 
-n.close()
+n.close()  # type: ignore # Item "Bootstrap" of "Writer | Bootstrap" has no attribute "close"
 print('wrote %s.' % BUILD_FILENAME)
 
 if options.bootstrap:
diff --git a/doc/manual.asciidoc b/doc/manual.asciidoc
index 3482a1e..6f78cc0 100644
--- a/doc/manual.asciidoc
+++ b/doc/manual.asciidoc
@@ -1,6 +1,6 @@
 The Ninja build system
 ======================
-v1.12.1, May 2024
+v1.13.0, Jun 2025
 
 
 Introduction
@@ -188,10 +188,43 @@
 you don't need to pass `-j`.)
 
 
+GNU Jobserver support
+~~~~~~~~~~~~~~~~~~~~~
+
+Since version 1.13., Ninja builds can follow the
+https://https://www.gnu.org/software/make/manual/html_node/Job-Slots.html[GNU Make jobserver]
+client protocol. This is useful when Ninja is invoked as part of a larger
+build system controlled by a top-level GNU Make instance, or any other
+jobserver pool implementation, as it allows better coordination between
+concurrent build tasks.
+
+This feature is automatically enabled under the following conditions:
+
+- Dry-run (i.e. `-n` or `--dry-run`) is not enabled.
+
+- No explicit job count (e.g. `-j<COUNT>`) is passed on the command
+  line.
+
+- The `MAKEFLAGS` environment variable is defined and describes a valid
+  jobserver mode using `--jobserver-auth=SEMAPHORE_NAME` on Windows, or
+  `--jobserver-auth=fifo:PATH` on Posix.
+
+In this case, Ninja will use the jobserver pool of job slots to control
+parallelism, instead of its default parallel implementation.
+
+Note that load-average limitations (i.e. when using `-l<count>`)
+are still being enforced in this mode.
+
+IMPORTANT: On Posix, only the FIFO-based version of the protocol, which is
+implemented by GNU Make 4.4 and higher, is supported. Ninja will detect
+when a pipe-based jobserver is being used (i.e. when `MAKEFLAGS` contains
+`--jobserver-auth=<read>,<write>`) and will print a warning, but will
+otherwise ignore it.
+
 Environment variables
 ~~~~~~~~~~~~~~~~~~~~~
 
-Ninja supports one environment variable to control its behavior:
+Ninja supports two environment variables to control its behavior:
 `NINJA_STATUS`, the progress status printed before the rule being run.
 
 Several placeholders are available:
@@ -216,6 +249,10 @@
 to separate from the build rule). Another example of possible progress status
 could be `"[%u/%r/%f] "`.
 
+If `MAKEFLAGS` is defined in the environment, if may alter how
+Ninja dispatches parallel build commands. See the GNU Jobserver support
+section for details.
+
 Extra tools
 ~~~~~~~~~~~
 
@@ -266,6 +303,47 @@
 rebuild those targets.
 _Available since Ninja 1.11._
 
+`multi-inputs`:: print one or more sets of inputs required to build targets.
+Each line will consist of a target, a delimiter, an input and a terminator character.
+The list produced by the tool can be helpful if one would like to know which targets
+that are affected by a certain input.
++
+The output will be a series of lines with the following elements:
++
+----
+<target> <delimiter> <input> <terminator>
+----
++
+The default `<delimiter>` is a single TAB character.
+The delimiter can be modified to any string using the `--delimiter` argument.
++
+The default `<terminator>` is a line terminator (i.e. `\n` on Posix and `\r\n` on Windows).
+The terminator can be changed to `\0` by using the `--print0` argument.
++
+----
+----
++
+Example usage of the `multi-inputs` tool:
++
+----
+ninja -t multi-inputs target1 target2 target3
+----
++
+Example of produced output from the `multi-inputs` tool:
++
+----
+target1 file1.c
+target2 file1.c
+target2 file2.c
+target3 file1.c
+target3 file2.c
+target3 file3.c
+----
++
+_Note that a given input may appear for several targets if it is used by more
+than one targets._
+_Available since Ninja 1.13._
+
 `clean`:: remove built files. By default, it removes all built files
 except for those created by the generator.  Adding the `-g` flag also
 removes built files created by the generator (see <<ref_rule,the rule
@@ -290,6 +368,11 @@
 by the Clang tooling interface.
 _Available since Ninja 1.2._
 
+`compdb-targets`:: like `compdb`, but takes a list of targets instead of rules,
+and expects at least one target. The resulting compilation database contains
+all commands required to build the indicated targets, and _only_ those
+commands.
+
 `deps`:: show all dependencies stored in the `.ninja_deps` file. When given a
 target, show just the target's dependencies. _Available since Ninja 1.4._
 
@@ -787,7 +870,7 @@
    Order-only dependencies may be tacked on the end with +||
    _dependency1_ _dependency2_+.  (See <<ref_dependencies,the reference on
    dependency types>>.)
-   Validations may be taked on the end with +|@ _validation1_ _validation2_+.
+   Validations may be tacked on the end with +|@ _validation1_ _validation2_+.
    (See <<validations,the reference on validations>>.)
 +
 Implicit outputs _(available since Ninja 1.7)_ may be added before
@@ -996,7 +1079,7 @@
 object file of a compile command.
 
 2. _Implicit outputs_, as listed in a build line with the syntax +|
-   _out1_ _out2_+ + before the `:` of a build line _(available since
+   _out1_ _out2_+ before the `:` of a build line _(available since
    Ninja 1.7)_.  The semantics are identical to explicit outputs,
   the only difference is that implicit outputs don't show up in the
   `$out` variable.
diff --git a/doc/style.css b/doc/style.css
index 363e272..2be09de 100644
--- a/doc/style.css
+++ b/doc/style.css
@@ -53,3 +53,16 @@
 p {
     margin-top: 0;
 }
+
+/* The following applies to the left column of a [horizontal] labeled list: */
+table.horizontal > tbody > tr > td:nth-child(1) {
+
+    /* prevent the insertion of a line-break in the middle of a label: */
+    white-space: nowrap;
+
+    /* insert a little horizontal padding between the two columns: */
+    padding-right: 1.5em;
+
+    /* right-justify labels: */
+    text-align: end;
+}
diff --git a/misc/ci.py b/misc/ci.py
index 17cbf14..20a4415 100755
--- a/misc/ci.py
+++ b/misc/ci.py
@@ -5,22 +5,27 @@
 ignores = [
 	'.git/',
 	'misc/afl-fuzz-tokens/',
-	'ninja_deps',
 	'src/depfile_parser.cc',
 	'src/lexer.cc',
 ]
 
 error_count = 0
 
-def error(path, msg):
+def error(path: str, msg: str) -> None:
 	global error_count
 	error_count += 1
 	print('\x1b[1;31m{}\x1b[0;31m{}\x1b[0m'.format(path, msg))
 
+try:
+	import git
+	repo = git.Repo('.')
+except:
+	repo = None
+
 for root, directory, filenames in os.walk('.'):
 	for filename in filenames:
 		path = os.path.join(root, filename)[2:]
-		if any([path.startswith(x) for x in ignores]):
+		if any([path.startswith(x) for x in ignores]) or (repo is not None and repo.ignored(path)):
 			continue
 		with open(path, 'rb') as file:
 			line_nr = 1
diff --git a/misc/jobserver_pool.py b/misc/jobserver_pool.py
new file mode 100755
index 0000000..aac8841
--- /dev/null
+++ b/misc/jobserver_pool.py
@@ -0,0 +1,340 @@
+#!/usr/bin/env python3
+# Copyright 2024 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Setup a GNU Make Jobserver jobs pool then launch a command with it.
+
+On Windows, this only supports the semaphore-based scheme.
+On Posix, this uses a fifo by default, use --pipe for pipe mode.
+
+On exit, this script verifies that all job slots were returned to
+the pool, and will print an error message if this is not the case.
+
+This is useful to catch the use of broken protocol clients.
+Use the `--no-check` flag to disable this.
+
+See --help-usage for usage examples.
+"""
+import argparse
+import os
+import platform
+import subprocess
+import sys
+import typing as T
+
+# Technical note about the MAKEFLAGS values set by this script.
+#
+# All the MAKEFLAGS values created by this script begin
+# with " -j{count} ", i.e. an initial space, the "-j" characters
+# followed by a job slot count then another space.
+#
+# The initial space is only there to mimic what GNU Make 4.3
+# does. Other pool implementations do not use one and thus
+# clients should not expect it (even GNU Make doesn't seem
+# to care when used as a jobserver client).
+#
+# The {count} value is also not available in many pool
+# implementations, but is useful to better debug multi-builds
+# using this script (i.e. to verify that the pool has the
+# expected size). Protocol clients should not depend on it
+# though.
+
+_DEFAULT_NAME = "jobserver_pool"
+_IS_WINDOWS = sys.platform in ("win32", "cygwin")
+
+if _IS_WINDOWS:
+
+    try:
+        # This requires pywin32 to be installed.
+        import pywintypes
+        import win32event
+        import win32api
+    except ModuleNotFoundError:
+        print(
+            "\nERROR: Could not import Win32 API, please install pywin32, e.g. `python -m pip install pywin32`.\n",
+            file=sys.stderr,
+        )
+        raise
+
+    # It seems impossible to import a proper mypy-compatible type definition for PyHANDLE
+    # 'from pywintypes import PyHANDLE' fails stating there is no such name.
+    # 'from pywintypes import HANDLE as PyHANDLE' fails because HANDLE is a function, not a type.
+    PyHandle: T.TypeAlias = T.Any
+
+    def create_sem(
+        sem_name: str, jobs_count: int
+    ) -> T.Tuple[PyHandle, T.Dict[str, str]]:
+        """Create and initialize Win32 semaphore."""
+        assert jobs_count > 0, f"Jobs count must be strictly positive"
+        # The win32event documentation states that the first argument to CreateSemaphore()
+        # can be None to indicate default security attributes, but mypy only wants
+        # a PySECURITY_ATTRIBUTES for some reason.
+        handle = win32event.CreateSemaphore(
+            None,  # type: ignore
+            jobs_count - 1,
+            jobs_count - 1,
+            sem_name,
+        )
+        assert bool(handle), f"Error creating Win32 semaphore {win32api.GetLastError()}"
+        # See technical note above about MAKEFLAGS format.
+        env = dict(os.environ)
+        env["MAKEFLAGS"] = f" -j{jobs_count} --jobserver-auth=" + sem_name
+        return handle, env
+
+    def check_sem_count(handle: PyHandle, jobs_count: int) -> int:
+        if jobs_count <= 1:
+            # Nothing to check here.
+            return 0
+
+        expected_count = jobs_count - 1
+
+        read_count = win32event.ReleaseSemaphore(handle, 1)
+        if read_count < expected_count:
+            print(
+                f"ERROR: {expected_count - read_count} were missing from the jobs pool (got {read_count}, expected {expected_count})",
+                file=sys.stderr,
+            )
+            return 1
+        if read_count > expected_count:
+            print(
+                f"ERROR: {read_count - expected_count} extra tokens were released to the jobs pool (got {read_count}, expected {expected_count})",
+                file=sys.stderr,
+            )
+            return 1
+
+        return 0
+
+    def print_usage() -> int:
+        print(
+            r"""Example usage:
+
+# Start <command> after setting the server to provide as many jobs
+# as available CPUs (the default)
+python \path\to\jobserver_pool.py <command>
+
+# Start <command> with a fixed number of job slots.
+python \path\to\jobserver_pool.py -j10 <command>
+
+# Disable the feature with a non-positive count. This is equivalent
+# to running <command> directly.
+python \path\to\jobserver_pool.py -j0 <command>
+
+# Use a specific semaphore name
+python \path\to\jobserver_pool.py --name=my_build_jobs <command>
+
+# Setup jobserver then start new interactive PowerShell
+# session, print MAKEFLAGS value, build stuff, then exit.
+python \path\to\jobserver_pool.py powershell.exe
+$env:MAKEFLAGS
+... build stuff ...
+exit
+"""
+        )
+        return 0
+
+else:  # !_IS_WINDOWS
+
+    def create_pipe(jobs_count: int) -> T.Tuple[int, int, T.Dict[str, str]]:
+        """Create and fill Posix PIPE."""
+        read_fd, write_fd = os.pipe()
+        os.set_inheritable(read_fd, True)
+        os.set_inheritable(write_fd, True)
+        assert jobs_count > 0, f"Token count must be strictly positive"
+        os.write(write_fd, (jobs_count - 1) * b"x")
+        # See technical note above about MAKEFLAGS format.
+        env = dict(os.environ)
+        env["MAKEFLAGS"] = (
+            f" -j{jobs_count} --jobserver-fds={read_fd},{write_fd} --jobserver-auth={read_fd},{write_fd}"
+        )
+        return read_fd, write_fd, env
+
+    def create_fifo(path: str, jobs_count: int) -> T.Tuple[int, int, T.Dict[str, str]]:
+        """Create and fill Posix FIFO."""
+        if os.path.exists(path):
+            os.remove(path)
+
+        # mypy complains that this does not exit on Windows.
+        os.mkfifo(path)  # type: ignore
+
+        read_fd = os.open(path, os.O_RDONLY | os.O_NONBLOCK)
+        write_fd = os.open(path, os.O_WRONLY | os.O_NONBLOCK)
+        assert jobs_count > 0, f"Token count must be strictly positive"
+        os.write(write_fd, (jobs_count - 1) * b"x")
+        # See technical note above about MAKEFLAGS format.
+        env = dict(os.environ)
+        env["MAKEFLAGS"] = f" -j{jobs_count} --jobserver-auth=fifo:" + path
+        return read_fd, write_fd, env
+
+    def print_usage() -> int:
+        print(
+            r"""Example usage:
+
+# Start <command> after setting the job pool to provide as many jobs
+# as available CPUs (the default)
+/path/to/jobserver_pool.py <command>
+
+# Start <command> with a fixed number of jobs
+/path/to/jobserver_pool.py -j10 <command>
+
+# Disable the feature with a non-positive count. This is equivalent
+# to running <command> directly.
+/path/to/jobserver_pool.py -j0 <command>
+
+# Use a specific FIFO path
+/path/to/jobserver_pool.py --fifo=/tmp/my_build_jobs <command>
+
+# Setup jobserver then start new interactive Bash shell
+# session, print MAKEFLAGS value, build stuff, then exit.
+/path/to/jobserver_pool.py bash -i
+echo "$MAKEFLAGS"
+... build stuff ...
+exit
+"""
+        )
+        return 0
+
+    def check_pipe_tokens(read_fd: int, jobs_count: int) -> int:
+        if jobs_count <= 1:  # Nothing to check
+            return 0
+
+        # Remove implicit token from the expected count.
+        expected_count = jobs_count - 1
+        os.set_blocking(read_fd, False)
+        read_count = 0
+        while True:
+            try:
+                token = os.read(read_fd, 1)
+                if len(token) == 0:  # End of pipe?
+                    break
+                read_count += 1
+            except BlockingIOError:
+                break
+
+        if read_count < expected_count:
+            print(
+                f"ERROR: {expected_count - read_count} tokens were missing from the jobs pool (got {read_count}, expected {expected_count})",
+                file=sys.stderr,
+            )
+            return 1
+        if read_count > expected_count:
+            print(
+                f"ERROR: {read_count - expected_count} extra tokens were released to the jobs pool (got {read_count}, expected {expected_count})",
+                file=sys.stderr,
+            )
+            return 1
+
+        return 0
+
+
+def main() -> int:
+    parser = argparse.ArgumentParser(
+        description=__doc__, formatter_class=argparse.RawTextHelpFormatter
+    )
+    if _IS_WINDOWS:
+        parser.add_argument(
+            "--name",
+            help=f"Specify semaphore name, default is {_DEFAULT_NAME}",
+            default=_DEFAULT_NAME,
+        )
+    else:
+        mutex_group = parser.add_mutually_exclusive_group()
+        mutex_group.add_argument(
+            "--pipe",
+            action="store_true",
+            help="Implement the pool with a Unix pipe (default is FIFO).",
+        )
+        mutex_group.add_argument(
+            "--fifo",
+            default=_DEFAULT_NAME,
+            help=f"Specify pool FIFO file path (default ./{_DEFAULT_NAME})",
+        )
+
+    parser.add_argument(
+        "--no-check",
+        action="store_true",
+        help="Disable the final check that verifies that all job slots were returned to the pool on exit.",
+    )
+
+    parser.add_argument(
+        "--help-usage", action="store_true", help="Print usage examples."
+    )
+
+    parser.add_argument(
+        "-j",
+        "--jobs",
+        action="store",
+        metavar="COUNT",
+        dest="jobs_count",
+        type=int,
+        default=os.cpu_count(),
+        help="Set job slots ccount, default is available CPUs count",
+    )
+
+    parser.add_argument("command", nargs=argparse.REMAINDER, help="Command to run.")
+    args = parser.parse_args()
+
+    if args.help_usage:
+        return print_usage()
+
+    if not args.command:
+        parser.error("This script requires at least one command argument!")
+
+    jobs_count = args.jobs_count
+    if jobs_count <= 0:
+        # Disable the feature.
+        ret = subprocess.run(args.command)
+        exit_code = ret.returncode
+    elif _IS_WINDOWS:
+        # Run with a Window semaphore.
+        try:
+            handle, env = create_sem(args.name, jobs_count)
+            ret = subprocess.run(args.command, env=env)
+            exit_code = ret.returncode
+
+            if exit_code == 0 and not args.no_check:
+                exit_code = check_sem_count(handle, jobs_count)
+
+        finally:
+            win32api.CloseHandle(handle)
+    else:
+        # Run with pipe descriptors by default, or a FIFO if --fifo is used.
+        exit_code = 0
+        fifo_path = ""
+        try:
+            if not args.pipe:
+                fifo_path = os.path.abspath(args.fifo)
+                read_fd, write_fd, env = create_fifo(fifo_path, args.jobs_count)
+                ret = subprocess.run(args.command, env=env)
+            else:
+                read_fd, write_fd, env = create_pipe(args.jobs_count)
+                ret = subprocess.run(
+                    args.command, env=env, pass_fds=(read_fd, write_fd)
+                )
+
+            exit_code = ret.returncode
+            if exit_code == 0 and not args.no_check:
+                exit_code = check_pipe_tokens(read_fd, jobs_count)
+
+        finally:
+            os.close(read_fd)
+            os.close(write_fd)
+
+            if fifo_path:
+                os.remove(fifo_path)
+
+    return exit_code
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/misc/jobserver_pool_test.py b/misc/jobserver_pool_test.py
new file mode 100755
index 0000000..97916c8
--- /dev/null
+++ b/misc/jobserver_pool_test.py
@@ -0,0 +1,174 @@
+#!/usr/bin/env python3
+
+"""Regression tests for the jobserver_pool.py script."""
+
+import os
+import re
+import platform
+import subprocess
+import sys
+import tempfile
+import unittest
+import typing as T
+
+_SCRIPT_DIR = os.path.dirname(__file__)
+_JOBSERVER_SCRIPT = os.path.join(_SCRIPT_DIR, "jobserver_pool.py")
+_JOBSERVER_CMD = [sys.executable, _JOBSERVER_SCRIPT]
+
+_IS_WINDOWS = sys.platform == "win32"
+
+# This is only here to avoid depending on the non-standard
+# scanf package which does the job properly :-)
+
+
+def _simple_scanf(pattern: str, input: str) -> T.Sequence[T.Any]:
+    """Extract values from input using a scanf-like pattern.
+
+    This is very basic and only used to avoid depending on the
+    non-standard scanf package which does the job properly.
+    Only supports %d, %s and %%, does not support any fancy
+    escaping.
+    """
+    re_pattern = ""
+    groups = ""
+    from_pos = 0
+
+    # Just in case.
+    assert "." not in pattern, f"Dots in pattern not supported."
+    assert "?" not in pattern, f"Question marks in pattern not supported."
+
+    while True:
+        next_percent = pattern.find("%", from_pos)
+        if next_percent < 0 or next_percent + 1 >= len(pattern):
+            re_pattern += pattern[from_pos:]
+            break
+
+        re_pattern += pattern[from_pos:next_percent]
+
+        from_pos = next_percent + 2
+        formatter = pattern[next_percent + 1]
+        if formatter == "%":
+            re_pattern += "%"
+        elif formatter == "d":
+            groups += formatter
+            re_pattern += "(\\d+)"
+        elif formatter == "s":
+            groups += formatter
+            re_pattern += "(\\S+)"
+        else:
+            assert False, f"Unsupported scanf formatter: %{formatter}"
+
+    m = re.match(re_pattern, input)
+    if not m:
+        return None
+
+    result = []
+    for group_index, formatter in enumerate(groups, start=1):
+        if formatter == "d":
+            result.append(int(m.group(group_index)))
+        elif formatter == "s":
+            result.append(m.group(group_index))
+        else:
+            assert False, f"Unsupported formatter {formatter}"
+
+    return result
+
+
+class JobserverPool(unittest.TestCase):
+    def _run_jobserver_echo_MAKEFLAGS(
+        self, cmd_args_prefix
+    ) -> "subprocess.CompletedProcess[str]":
+        if _IS_WINDOWS:
+            cmd_args = cmd_args_prefix + ["cmd.exe", "/c", "echo %MAKEFLAGS%"]
+        else:
+            cmd_args = cmd_args_prefix + ["sh", "-c", 'echo "$MAKEFLAGS"']
+
+        ret = subprocess.run(
+            cmd_args,
+            text=True,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+        )
+        ret.check_returncode()
+        return ret
+
+    def _test_echo_MAKEFLAGS(self, cmd_args_prefix, expected_core_count: int):
+        ret = self._run_jobserver_echo_MAKEFLAGS(cmd_args_prefix)
+        makeflags = ret.stdout.rstrip()
+
+        if expected_core_count == 0:
+            if _IS_WINDOWS:
+                # On Windows, echo %FOO% prints "%FOO%" if FOO is not defined!
+                self.assertEqual(makeflags.strip(), "%MAKEFLAGS%")
+            else:
+                self.assertEqual(makeflags.strip(), "")
+
+        else:  # expected_core_count > 0
+            if _IS_WINDOWS:
+                expected_format = " -j%d --jobserver-auth=%s"
+            else:
+                expected_format = " -j%d --jobserver-auth=fifo:%s"
+
+            m = _simple_scanf(expected_format, makeflags)
+            self.assertTrue(
+                m,
+                f"Invalid MAKEFLAGS value, expected format [{expected_format}], got: [{makeflags}]",
+            )
+
+            if _IS_WINDOWS:
+                sem_name = m[1]
+                self.assertEqual(
+                    sem_name,
+                    "jobserver_pool",
+                    f"Invalid semaphore name in MAKEFLAGS value [{makeflags}]",
+                )
+            else:
+                fifo_name = os.path.basename(m[1])
+                self.assertEqual(
+                    fifo_name,
+                    "jobserver_pool",
+                    f"Invalid fifo name in MAKEFLAGS value [{makeflags}]",
+                )
+
+            core_count = m[0]
+            self.assertEqual(
+                core_count,
+                expected_core_count,
+                f"Invalid core count {core_count}, expected {expected_core_count}",
+            )
+
+    def test_MAKEFLAGS_default(self):
+        self._test_echo_MAKEFLAGS(_JOBSERVER_CMD, os.cpu_count())
+
+    def test_MAKEFLAGS_with_10_jobs(self):
+        self._test_echo_MAKEFLAGS(_JOBSERVER_CMD + ["-j10"], 10)
+        self._test_echo_MAKEFLAGS(_JOBSERVER_CMD + ["--jobs=10"], 10)
+        self._test_echo_MAKEFLAGS(_JOBSERVER_CMD + ["--jobs", "10"], 10)
+
+    def test_MAKEFLAGS_with_no_jobs(self):
+        self._test_echo_MAKEFLAGS(_JOBSERVER_CMD + ["-j0"], 0)
+        self._test_echo_MAKEFLAGS(_JOBSERVER_CMD + ["--jobs=0"], 0)
+        self._test_echo_MAKEFLAGS(_JOBSERVER_CMD + ["--jobs", "0"], 0)
+
+    @unittest.skipIf(_IS_WINDOWS, "--fifo is not supported on Windows")
+    def test_MAKEFLAGS_with_fifo(self):
+        fifo_name = "test_fifo"
+        fifo_path = os.path.abspath(fifo_name)
+        ret = self._run_jobserver_echo_MAKEFLAGS(
+            _JOBSERVER_CMD + ["-j10", "--fifo", fifo_name]
+        )
+        makeflags = ret.stdout.rstrip()
+        self.assertEqual(makeflags, " -j10 --jobserver-auth=fifo:" + fifo_path)
+
+    @unittest.skipIf(not _IS_WINDOWS, "--name is not supported on Posix")
+    def test_MAKEFLAGS_with_name(self):
+        sem_name = "test_semaphore"
+        ret = self._run_jobserver_echo_MAKEFLAGS(
+            _JOBSERVER_CMD + ["-j10", "--name", sem_name]
+        )
+        makeflags = ret.stdout.rstrip()
+        self.assertEqual(makeflags, " -j10 --jobserver-auth=" + sem_name)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/misc/jobserver_test.py b/misc/jobserver_test.py
new file mode 100755
index 0000000..0378c98
--- /dev/null
+++ b/misc/jobserver_test.py
@@ -0,0 +1,324 @@
+#!/usr/bin/env python3
+# Copyright 2024 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from textwrap import dedent
+import os
+import platform
+import subprocess
+import tempfile
+import typing as T
+import shlex
+import sys
+import unittest
+
+_SCRIPT_DIR = os.path.realpath(os.path.dirname(__file__))
+_JOBSERVER_POOL_SCRIPT = os.path.join(_SCRIPT_DIR, "jobserver_pool.py")
+_JOBSERVER_TEST_HELPER_SCRIPT = os.path.join(_SCRIPT_DIR, "jobserver_test_helper.py")
+
+_PLATFORM_IS_WINDOWS = platform.system() == "Windows"
+
+# Set this to True to debug command invocations.
+_DEBUG = False
+
+default_env = dict(os.environ)
+default_env.pop("NINJA_STATUS", None)
+default_env.pop("MAKEFLAGS", None)
+default_env["TERM"] = "dumb"
+NINJA_PATH = os.path.abspath("./ninja")
+
+
+class BuildDir:
+    def __init__(self, build_ninja: str):
+        self.build_ninja = dedent(build_ninja)
+        self.d: T.Optional[tempfile.TemporaryDirectory] = None
+
+    def __enter__(self):
+        self.d = tempfile.TemporaryDirectory()
+        with open(os.path.join(self.d.name, "build.ninja"), "w") as f:
+            f.write(self.build_ninja)
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.d.cleanup()
+
+    @property
+    def path(self) -> str:
+        assert self.d
+        return self.d.name
+
+    def run(
+        self,
+        cmd_flags: T.Sequence[str] = [],
+        env: T.Dict[str, str] = default_env,
+    ) -> None:
+        """Run a command, raise exception on error. Do not capture outputs."""
+        ret = subprocess.run(cmd_flags, env=env)
+        ret.check_returncode()
+
+    def ninja_run(
+        self,
+        ninja_args: T.List[str],
+        prefix_args: T.List[str] = [],
+        extra_env: T.Dict[str, str] = {},
+    ) -> "subprocess.CompletedProcess[str]":
+        ret = self.ninja_spawn(
+            ninja_args,
+            prefix_args=prefix_args,
+            extra_env=extra_env,
+            capture_output=False,
+        )
+        ret.check_returncode()
+        return ret
+
+    def ninja_clean(self) -> None:
+        self.ninja_run(["-t", "clean"])
+
+    def ninja_spawn(
+        self,
+        ninja_args: T.List[str],
+        prefix_args: T.List[str] = [],
+        extra_env: T.Dict[str, str] = {},
+        capture_output: bool = True,
+    ) -> "subprocess.CompletedProcess[str]":
+        """Run Ninja command and capture outputs."""
+        cmd_args = prefix_args + [NINJA_PATH, "-C", self.path] + ninja_args
+        if _DEBUG:
+            cmd_str = " ".join(shlex.quote(c) for c in cmd_args)
+            print(f"CMD [{cmd_str}]", file=sys.stderr)
+        return subprocess.run(
+            cmd_args,
+            text=True,
+            stdout=subprocess.PIPE if capture_output else None,
+            stderr=subprocess.PIPE if capture_output else None,
+            env={**default_env, **extra_env},
+        )
+
+
+def span_output_file(span_n: int) -> str:
+    return "out%02d" % span_n
+
+
+def generate_build_plan(command_count: int) -> str:
+    """Generate a Ninja build plan for |command_count| parallel tasks.
+
+    Each task calls the test helper script which waits for 50ms
+    then writes its own start and end time to its output file.
+    """
+    result = f"""
+rule span
+    command = {sys.executable} -S {_JOBSERVER_TEST_HELPER_SCRIPT} --duration-ms=50 $out
+
+"""
+
+    for n in range(command_count):
+        result += "build %s: span\n" % span_output_file(n)
+
+    result += "build all: phony %s\n" % " ".join(
+        [span_output_file(n) for n in range(command_count)]
+    )
+    return result
+
+
+def compute_max_overlapped_spans(build_dir: str, command_count: int) -> int:
+    """Compute the maximum number of overlapped spanned tasks.
+
+    This reads the output files from |build_dir| and look at their start and end times
+    to compute the maximum number of tasks that were run in parallel.
+    """
+    # Read the output files.
+    if command_count < 2:
+        return 0
+
+    spans: T.List[T.Tuple[int, int]] = []
+    for n in range(command_count):
+        with open(os.path.join(build_dir, span_output_file(n)), "rb") as f:
+            content = f.read().decode("utf-8")
+        lines = content.splitlines()
+        assert len(lines) == 2, f"Unexpected output file content: [{content}]"
+        spans.append((int(lines[0]), int(lines[1])))
+
+    # Stupid but simple, for each span, count the number of other spans that overlap it.
+    max_overlaps = 1
+    for n in range(command_count):
+        cur_start, cur_end = spans[n]
+        cur_overlaps = 1
+        for m in range(command_count):
+            other_start, other_end = spans[m]
+            if n != m and other_end > cur_start and other_start < cur_end:
+                cur_overlaps += 1
+
+        if cur_overlaps > max_overlaps:
+            max_overlaps = cur_overlaps
+
+    return max_overlaps
+
+
+class JobserverTest(unittest.TestCase):
+
+    def test_no_jobserver_client(self):
+        task_count = 4
+        build_plan = generate_build_plan(task_count)
+        with BuildDir(build_plan) as b:
+            output = b.run([NINJA_PATH, "-C", b.path, f"-j{task_count}", "all"])
+
+            max_overlaps = compute_max_overlapped_spans(b.path, task_count)
+            self.assertEqual(max_overlaps, task_count)
+
+            b.ninja_clean()
+            output = b.run([NINJA_PATH, "-C", b.path, "-j1", "all"])
+
+            max_overlaps = compute_max_overlapped_spans(b.path, task_count)
+            self.assertEqual(max_overlaps, 1)
+
+    def _run_client_test(self, jobserver_args: T.List[str]) -> None:
+        task_count = 4
+        build_plan = generate_build_plan(task_count)
+        with BuildDir(build_plan) as b:
+            # First, run the full tasks with with {task_count} tokens, this should allow all
+            # tasks to run in parallel.
+            ret = b.ninja_run(
+                ninja_args=["all"],
+                prefix_args=jobserver_args + [f"--jobs={task_count}"],
+            )
+            max_overlaps = compute_max_overlapped_spans(b.path, task_count)
+            self.assertEqual(max_overlaps, task_count)
+
+            # Second, use 2 tokens only, and verify that this was enforced by Ninja.
+            b.ninja_clean()
+            b.ninja_run(
+                ["all"],
+                prefix_args=jobserver_args + ["--jobs=2"],
+            )
+            max_overlaps = compute_max_overlapped_spans(b.path, task_count)
+            self.assertEqual(max_overlaps, 2)
+
+            # Third, verify that --jobs=1 serializes all tasks.
+            b.ninja_clean()
+            b.ninja_run(
+                ["all"],
+                prefix_args=jobserver_args + ["--jobs=1"],
+            )
+            max_overlaps = compute_max_overlapped_spans(b.path, task_count)
+            self.assertEqual(max_overlaps, 1)
+
+            # Finally, verify that -j1 overrides the pool.
+            b.ninja_clean()
+            b.ninja_run(
+                ["-j1", "all"],
+                prefix_args=jobserver_args + [f"--jobs={task_count}"],
+            )
+            max_overlaps = compute_max_overlapped_spans(b.path, task_count)
+            self.assertEqual(max_overlaps, 1)
+
+            # On Linux, use taskset to limit the number of available cores to 1
+            # and verify that the jobserver overrides the default Ninja parallelism
+            # and that {task_count} tasks are still spawned in parallel.
+            if platform.system() == "Linux":
+                # First, run without a jobserver, with a single CPU, Ninja will
+                # use a parallelism of 2 in this case (GuessParallelism() in ninja.cc)
+                b.ninja_clean()
+                b.ninja_run(
+                    ["all"],
+                    prefix_args=["taskset", "-c", "0"],
+                )
+                max_overlaps = compute_max_overlapped_spans(b.path, task_count)
+                self.assertEqual(max_overlaps, 2)
+
+                # Now with a jobserver with {task_count} tasks.
+                b.ninja_clean()
+                b.ninja_run(
+                    ["all"],
+                    prefix_args=jobserver_args
+                    + [f"--jobs={task_count}"]
+                    + ["taskset", "-c", "0"],
+                )
+                max_overlaps = compute_max_overlapped_spans(b.path, task_count)
+                self.assertEqual(max_overlaps, task_count)
+
+    @unittest.skipIf(_PLATFORM_IS_WINDOWS, "These test methods do not work on Windows")
+    def test_jobserver_client_with_posix_fifo(self):
+        self._run_client_test([sys.executable, "-S", _JOBSERVER_POOL_SCRIPT])
+
+    @unittest.skipIf(_PLATFORM_IS_WINDOWS, "These test methods do not work on Windows")
+    def test_jobserver_client_with_posix_pipe(self):
+        # Verify that setting up a --pipe server does not make Ninja exit with an error.
+        # Instead, a warning is printed.
+        task_count = 4
+        build_plan = generate_build_plan(task_count)
+        with BuildDir(build_plan) as b:
+
+            prefix_args = [
+                sys.executable,
+                "-S",
+                _JOBSERVER_POOL_SCRIPT,
+                "--pipe",
+                f"--jobs={task_count}",
+            ]
+
+            def run_ninja_with_jobserver_pipe(args):
+                ret = b.ninja_spawn(args, prefix_args=prefix_args)
+                ret.check_returncode()
+                return ret.stdout, ret.stderr
+
+            output, error = run_ninja_with_jobserver_pipe(["all"])
+            if _DEBUG:
+                print(f"OUTPUT [{output}]\nERROR [{error}]\n", file=sys.stderr)
+            self.assertTrue(error.find("Pipe-based protocol is not supported!") >= 0)
+
+            max_overlaps = compute_max_overlapped_spans(b.path, task_count)
+            self.assertEqual(max_overlaps, task_count)
+
+            # Using an explicit -j<N> ignores the jobserver pool.
+            b.ninja_clean()
+            output, error = run_ninja_with_jobserver_pipe(["-j1", "all"])
+            if _DEBUG:
+                print(f"OUTPUT [{output}]\nERROR [{error}]\n", file=sys.stderr)
+            self.assertFalse(error.find("Pipe-based protocol is not supported!") >= 0)
+
+            max_overlaps = compute_max_overlapped_spans(b.path, task_count)
+            self.assertEqual(max_overlaps, 1)
+
+    def _test_MAKEFLAGS_value(
+        self, ninja_args: T.List[str] = [], prefix_args: T.List[str] = []
+    ):
+        build_plan = r"""
+rule print
+    command = echo MAKEFLAGS="[$$MAKEFLAGS]"
+
+build all: print
+"""
+        with BuildDir(build_plan) as b:
+            ret = b.ninja_spawn(
+                ninja_args + ["--quiet", "all"], prefix_args=prefix_args
+            )
+            self.assertEqual(ret.returncode, 0)
+            output = ret.stdout.strip()
+            pos = output.find("MAKEFLAGS=[")
+            self.assertNotEqual(pos, -1, "Could not find MAKEFLAGS in output!")
+            makeflags, sep, _ = output[pos + len("MAKEFLAGS=[") :].partition("]")
+            self.assertEqual(sep, "]", "Missing ] in output!: " + output)
+            self.assertTrue(
+                "--jobserver-auth=" in makeflags,
+                f"Missing --jobserver-auth from MAKEFLAGS [{makeflags}]\nSTDOUT [{ret.stdout}]\nSTDERR [{ret.stderr}]",
+            )
+
+    def test_client_passes_MAKEFLAGS(self):
+        self._test_MAKEFLAGS_value(
+            prefix_args=[sys.executable, "-S", _JOBSERVER_POOL_SCRIPT]
+        )
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/misc/jobserver_test_helper.py b/misc/jobserver_test_helper.py
new file mode 100755
index 0000000..8c23862
--- /dev/null
+++ b/misc/jobserver_test_helper.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python3
+# Copyright 2024 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Simple utility used by the jobserver test. Wait for specific time, then write start/stop times to output file."""
+
+import argparse
+import time
+import sys
+from pathlib import Path
+
+
+def main():
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument(
+        "--duration-ms",
+        default="50",
+        help="sleep duration in milliseconds (default 50)",
+    )
+    parser.add_argument("output_file", type=Path, help="output file name.")
+    args = parser.parse_args()
+
+    now_time_ns = time.time_ns()
+    time.sleep(int(args.duration_ms) / 1000.0)
+    args.output_file.write_text(f"{now_time_ns}\n{time.time_ns()}\n")
+
+    return 0
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/misc/manifest_fuzzer.cc b/misc/manifest_fuzzer.cc
index 0e1261a..085840a 100644
--- a/misc/manifest_fuzzer.cc
+++ b/misc/manifest_fuzzer.cc
@@ -27,15 +27,15 @@
 	if (!fp)
 		return 0;
 	fwrite(data, size, 1, fp);
-	fclose(fp);	
-	
+	fclose(fp);
+
 	std::string err;
 	RealDiskInterface disk_interface;
 	State state;
 	ManifestParser parser(&state, &disk_interface);
-	
+
 	parser.Load("/tmp/build.ninja", &err);
-	
+
 	std::__fs::filesystem::remove_all("/tmp/build.ninja");
 	return 0;
 }
diff --git a/misc/measure.py b/misc/measure.py
index f3825ef..e808804 100755
--- a/misc/measure.py
+++ b/misc/measure.py
@@ -20,10 +20,11 @@
 import time
 import subprocess
 import sys
+from typing import Union, List
 
 devnull = open('/dev/null', 'w')
 
-def run(cmd, repeat=10):
+def run(cmd: Union[str, List[str]], repeat: int = 10) -> None:
     print('sampling:', end=' ')
     sys.stdout.flush()
 
diff --git a/misc/ninja-mode.el b/misc/ninja-mode.el
deleted file mode 100644
index d4f06e6..0000000
--- a/misc/ninja-mode.el
+++ /dev/null
@@ -1,110 +0,0 @@
-;;; ninja-mode.el --- Major mode for editing .ninja files -*- lexical-binding: t -*-
-
-;; Package-Requires: ((emacs "24"))
-
-;; Copyright 2011 Google Inc. All Rights Reserved.
-;;
-;; Licensed under the Apache License, Version 2.0 (the "License");
-;; you may not use this file except in compliance with the License.
-;; You may obtain a copy of the License at
-;;
-;;     http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-;;; Commentary:
-
-;; Simple emacs mode for editing .ninja files.
-
-;;; Code:
-
-(defcustom ninja-indent-offset 2
-  "*Amount of offset per level of indentation."
-  :type 'integer
-  :safe 'natnump
-  :group 'ninja)
-
-(defconst ninja-keywords-re
-  (concat "^" (regexp-opt '("rule" "build" "subninja" "include" "pool" "default")
-                          'words)))
-
-(defvar ninja-keywords
-  `((,ninja-keywords-re . font-lock-keyword-face)
-    ("^[[:space:]]*\\([[:alnum:]_]+\\)[[:space:]]*=" 1 font-lock-variable-name-face)
-    ;; Variable expansion.
-    ("$[[:alnum:]_]+" . font-lock-variable-name-face)
-    ("${[[:alnum:]._]+}" . font-lock-variable-name-face)
-    ;; Rule names
-    ("rule +\\([[:alnum:]_.-]+\\)" 1 font-lock-function-name-face)
-    ;; Build Statement - highlight the rule used,
-    ;; allow for escaped $,: in outputs.
-    ("build +\\(?:[^:$\n]\\|$[:$]\\)+ *: *\\([[:alnum:]_.-]+\\)"
-     1 font-lock-function-name-face)))
-
-(defvar ninja-mode-syntax-table
-  (let ((table (make-syntax-table)))
-    (modify-syntax-entry ?\" "." table)
-    table)
-  "Syntax table used in `ninja-mode'.")
-
-(defun ninja-syntax-propertize (start end)
-  (save-match-data
-    (goto-char start)
-    (while (search-forward "#" end t)
-      (let ((match-pos (match-beginning 0)))
-        (when (and
-               ;; Is it the first non-white character on the line?
-               (eq match-pos (save-excursion (back-to-indentation) (point)))
-               (save-excursion
-                 (goto-char (line-end-position 0))
-                 (or
-                  ;; If we're continuing the previous line, it's not a
-                  ;; comment.
-                  (not (eq ?$ (char-before)))
-                  ;; Except if the previous line is a comment as well, as the
-                  ;; continuation dollar is ignored then.
-                  (nth 4 (syntax-ppss)))))
-          (put-text-property match-pos (1+ match-pos) 'syntax-table '(11))
-          (let ((line-end (line-end-position)))
-            ;; Avoid putting properties past the end of the buffer.
-            ;; Otherwise we get an `args-out-of-range' error.
-            (unless (= line-end (1+ (buffer-size)))
-              (put-text-property line-end (1+ line-end) 'syntax-table '(12)))))))))
-
-(defun ninja-compute-indentation ()
-  "Calculate indentation for the current line."
-  (save-excursion
-    (beginning-of-line)
-    (if (or (looking-at ninja-keywords-re)
-            (= (line-number-at-pos) 1))
-        0
-      (forward-line -1)
-      (if (looking-at ninja-keywords-re)
-          ninja-indent-offset
-        (current-indentation)))))
-
-(defun ninja-indent-line ()
-  "Indent the current line.  Uses previous indentation level if
- available or `ninja-indent-offset'"
-  (interactive "*")
-  (indent-line-to (ninja-compute-indentation)))
-
-;;;###autoload
-(define-derived-mode ninja-mode prog-mode "ninja"
-  (set (make-local-variable 'comment-start) "#")
-  (set (make-local-variable 'parse-sexp-lookup-properties) t)
-  (set (make-local-variable 'syntax-propertize-function) #'ninja-syntax-propertize)
-  (set (make-local-variable 'indent-line-function) 'ninja-indent-line)
-  (setq font-lock-defaults '(ninja-keywords)))
-
-;; Run ninja-mode for files ending in .ninja.
-;;;###autoload
-(add-to-list 'auto-mode-alist '("\\.ninja$" . ninja-mode))
-
-(provide 'ninja-mode)
-
-;;; ninja-mode.el ends here
diff --git a/misc/ninja_syntax_test.py b/misc/ninja_syntax_test.py
index 61fb177..3412249 100755
--- a/misc/ninja_syntax_test.py
+++ b/misc/ninja_syntax_test.py
@@ -15,6 +15,7 @@
 # limitations under the License.
 
 import unittest
+from typing import Dict
 
 try:
     from StringIO import StringIO
@@ -28,16 +29,16 @@
 INDENT = '    '
 
 class TestLineWordWrap(unittest.TestCase):
-    def setUp(self):
+    def setUp(self) -> None:
         self.out = StringIO()
         self.n = ninja_syntax.Writer(self.out, width=8)
 
-    def test_single_long_word(self):
+    def test_single_long_word(self) -> None:
         # We shouldn't wrap a single long word.
         self.n._line(LONGWORD)
         self.assertEqual(LONGWORD + '\n', self.out.getvalue())
 
-    def test_few_long_words(self):
+    def test_few_long_words(self) -> None:
         # We should wrap a line where the second word is overlong.
         self.n._line(' '.join(['x', LONGWORD, 'y']))
         self.assertEqual(' $\n'.join(['x',
@@ -45,13 +46,13 @@
                                       INDENT + 'y']) + '\n',
                          self.out.getvalue())
 
-    def test_comment_wrap(self):
+    def test_comment_wrap(self) -> None:
         # Filenames should not be wrapped
         self.n.comment('Hello /usr/local/build-tools/bin')
         self.assertEqual('# Hello\n# /usr/local/build-tools/bin\n',
                          self.out.getvalue())
 
-    def test_short_words_indented(self):
+    def test_short_words_indented(self) -> None:
         # Test that indent is taking into account when breaking subsequent lines.
         # The second line should not be '    to tree', as that's longer than the
         # test layout width of 8.
@@ -63,7 +64,7 @@
 ''',
                          self.out.getvalue())
 
-    def test_few_long_words_indented(self):
+    def test_few_long_words_indented(self) -> None:
         # Check wrapping in the presence of indenting.
         self.n._line(' '.join(['x', LONGWORD, 'y']), indent=1)
         self.assertEqual(' $\n'.join(['  ' + 'x',
@@ -71,14 +72,14 @@
                                       '  ' + INDENT + 'y']) + '\n',
                          self.out.getvalue())
 
-    def test_escaped_spaces(self):
+    def test_escaped_spaces(self) -> None:
         self.n._line(' '.join(['x', LONGWORDWITHSPACES, 'y']))
         self.assertEqual(' $\n'.join(['x',
                                       INDENT + LONGWORDWITHSPACES,
                                       INDENT + 'y']) + '\n',
                          self.out.getvalue())
 
-    def test_fit_many_words(self):
+    def test_fit_many_words(self) -> None:
         self.n = ninja_syntax.Writer(self.out, width=78)
         self.n._line('command = cd ../../chrome; python ../tools/grit/grit/format/repack.py ../out/Debug/obj/chrome/chrome_dll.gen/repack/theme_resources_large.pak ../out/Debug/gen/chrome/theme_resources_large.pak', 1)
         self.assertEqual('''\
@@ -88,7 +89,7 @@
 ''',
                          self.out.getvalue())
 
-    def test_leading_space(self):
+    def test_leading_space(self) -> None:
         self.n = ninja_syntax.Writer(self.out, width=14)  # force wrapping
         self.n.variable('foo', ['', '-bar', '-somethinglong'], 0)
         self.assertEqual('''\
@@ -97,7 +98,7 @@
 ''',
                          self.out.getvalue())
 
-    def test_embedded_dollar_dollar(self):
+    def test_embedded_dollar_dollar(self) -> None:
         self.n = ninja_syntax.Writer(self.out, width=15)  # force wrapping
         self.n.variable('foo', ['a$$b', '-somethinglong'], 0)
         self.assertEqual('''\
@@ -106,7 +107,7 @@
 ''',
                          self.out.getvalue())
 
-    def test_two_embedded_dollar_dollars(self):
+    def test_two_embedded_dollar_dollars(self) -> None:
         self.n = ninja_syntax.Writer(self.out, width=17)  # force wrapping
         self.n.variable('foo', ['a$$b', '-somethinglong'], 0)
         self.assertEqual('''\
@@ -115,7 +116,7 @@
 ''',
                          self.out.getvalue())
 
-    def test_leading_dollar_dollar(self):
+    def test_leading_dollar_dollar(self) -> None:
         self.n = ninja_syntax.Writer(self.out, width=14)  # force wrapping
         self.n.variable('foo', ['$$b', '-somethinglong'], 0)
         self.assertEqual('''\
@@ -124,7 +125,7 @@
 ''',
                          self.out.getvalue())
 
-    def test_trailing_dollar_dollar(self):
+    def test_trailing_dollar_dollar(self) -> None:
         self.n = ninja_syntax.Writer(self.out, width=14)  # force wrapping
         self.n.variable('foo', ['a$$', '-somethinglong'], 0)
         self.assertEqual('''\
@@ -134,11 +135,11 @@
                          self.out.getvalue())
 
 class TestBuild(unittest.TestCase):
-    def setUp(self):
+    def setUp(self) -> None:
         self.out = StringIO()
         self.n = ninja_syntax.Writer(self.out)
 
-    def test_variables_dict(self):
+    def test_variables_dict(self) -> None:
         self.n.build('out', 'cc', 'in', variables={'name': 'value'})
         self.assertEqual('''\
 build out: cc in
@@ -146,7 +147,7 @@
 ''',
                          self.out.getvalue())
 
-    def test_variables_list(self):
+    def test_variables_list(self) -> None:
         self.n.build('out', 'cc', 'in', variables=[('name', 'value')])
         self.assertEqual('''\
 build out: cc in
@@ -154,7 +155,7 @@
 ''',
                          self.out.getvalue())
 
-    def test_implicit_outputs(self):
+    def test_implicit_outputs(self) -> None:
         self.n.build('o', 'cc', 'i', implicit_outputs='io')
         self.assertEqual('''\
 build o | io: cc i
@@ -162,29 +163,29 @@
                          self.out.getvalue())
 
 class TestExpand(unittest.TestCase):
-    def test_basic(self):
+    def test_basic(self) -> None:
         vars = {'x': 'X'}
         self.assertEqual('foo', ninja_syntax.expand('foo', vars))
 
-    def test_var(self):
+    def test_var(self) -> None:
         vars = {'xyz': 'XYZ'}
         self.assertEqual('fooXYZ', ninja_syntax.expand('foo$xyz', vars))
 
-    def test_vars(self):
+    def test_vars(self) -> None:
         vars = {'x': 'X', 'y': 'YYY'}
         self.assertEqual('XYYY', ninja_syntax.expand('$x$y', vars))
 
-    def test_space(self):
-        vars = {}
+    def test_space(self) -> None:
+        vars: Dict[str, str] = {}
         self.assertEqual('x y z', ninja_syntax.expand('x$ y$ z', vars))
 
-    def test_locals(self):
+    def test_locals(self) -> None:
         vars = {'x': 'a'}
         local_vars = {'x': 'b'}
         self.assertEqual('a', ninja_syntax.expand('$x', vars))
         self.assertEqual('b', ninja_syntax.expand('$x', vars, local_vars))
 
-    def test_double(self):
+    def test_double(self) -> None:
         self.assertEqual('a b$c', ninja_syntax.expand('a$ b$$c', {}))
 
 if __name__ == '__main__':
diff --git a/misc/output_test.py b/misc/output_test.py
index 13b0926..dc2eca5 100755
--- a/misc/output_test.py
+++ b/misc/output_test.py
@@ -11,6 +11,8 @@
 import sys
 import tempfile
 import unittest
+from textwrap import dedent
+import typing as T
 
 default_env = dict(os.environ)
 default_env.pop('NINJA_STATUS', None)
@@ -18,30 +20,126 @@
 default_env['TERM'] = ''
 NINJA_PATH = os.path.abspath('./ninja')
 
-def run(build_ninja, flags='', pipe=False, env=default_env):
-    with tempfile.TemporaryDirectory() as d:
-        with open(os.path.join(d, 'build.ninja'), 'w') as f:
-            f.write(build_ninja)
+def remove_non_visible_lines(raw_output: bytes) -> str:
+  # When running in a smart terminal, Ninja uses CR (\r) to
+  # return the cursor to the start of the current line, prints
+  # something, then uses `\x1b[K` to clear everything until
+  # the end of the line.
+  #
+  # Thus printing 'FOO', 'BAR', 'ZOO' on the same line, then
+  # jumping to the next one results in the following output
+  # on Posix:
+  #
+  # '\rFOO\x1b[K\rBAR\x1b[K\rZOO\x1b[K\r\n'
+  #
+  # The following splits the output at both \r, \n and \r\n
+  # boundaries, which gives:
+  #
+  #  [ '\r', 'FOO\x1b[K\r', 'BAR\x1b[K\r', 'ZOO\x1b[K\r\n' ]
+  #
+  decoded_lines = raw_output.decode('utf-8').splitlines(True)
+
+  # Remove any item that ends with a '\r' as this means its
+  # content will be overwritten by the next item in the list.
+  # For the previous example, this gives:
+  #
+  #  [ 'ZOO\x1b[K\r\n' ]
+  #
+  final_lines = [ l for l in decoded_lines if not l.endswith('\r') ]
+
+  # Return a single string that concatenates all filtered lines
+  # while removing any remaining \r in it. Needed to transform
+  # \r\n into \n.
+  #
+  #  "ZOO\x1b[K\n'
+  #
+  return ''.join(final_lines).replace('\r', '')
+
+class BuildDir:
+    def __init__(self, build_ninja: str):
+        self.build_ninja = dedent(build_ninja)
+        self.d = None
+
+    def __enter__(self):
+        self.d = tempfile.TemporaryDirectory()
+        with open(os.path.join(self.d.name, 'build.ninja'), 'w') as f:
+            f.write(self.build_ninja)
             f.flush()
-        ninja_cmd = '{} {}'.format(NINJA_PATH, flags)
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.d.cleanup()
+
+    @property
+    def path(self) -> str:
+        return os.path.realpath(self.d.name)
+
+
+    def run(
+        self,
+        flags: T.Optional[str] = None,
+        pipe: bool = False,
+        raw_output: bool = False,
+        env: T.Dict[str, str] = default_env,
+        print_err_output = True,
+    ) -> str:
+        """Run Ninja command, and get filtered output.
+
+        Args:
+          flags: Extra arguments passed to Ninja.
+
+          pipe: set to True to run Ninja in a non-interactive terminal.
+            If False (the default), this runs Ninja in a pty to simulate
+            a smart terminal (this feature cannot work on Windows!).
+
+          raw_output: set to True to return the raw, unfiltered command
+            output.
+
+          env: Optional environment dictionary to run the command in.
+
+          print_err_output: set to False if the test expects ninja to print
+            something to stderr. (Otherwise, an error message from Ninja
+            probably represents a failed test.)
+
+        Returns:
+          A UTF-8 string corresponding to the output (stdout only) of the
+          Ninja command. By default, partial lines that were overwritten
+          are removed according to the rules described in the comments
+          below.
+        """
+        ninja_cmd = '{} {}'.format(NINJA_PATH, flags if flags else '')
         try:
             if pipe:
-                output = subprocess.check_output([ninja_cmd], shell=True, cwd=d, env=env)
+                output = subprocess.check_output(
+                    [ninja_cmd], shell=True, cwd=self.d.name, env=env)
             elif platform.system() == 'Darwin':
                 output = subprocess.check_output(['script', '-q', '/dev/null', 'bash', '-c', ninja_cmd],
-                                                 cwd=d, env=env)
+                                                 cwd=self.d.name, env=env)
             else:
                 output = subprocess.check_output(['script', '-qfec', ninja_cmd, '/dev/null'],
-                                                 cwd=d, env=env)
+                                                 cwd=self.d.name, env=env)
         except subprocess.CalledProcessError as err:
-            sys.stdout.buffer.write(err.output)
+            if print_err_output:
+              sys.stdout.buffer.write(err.output)
+            err.cooked_output = remove_non_visible_lines(err.output)
             raise err
-    final_output = ''
-    for line in output.decode('utf-8').splitlines(True):
-        if len(line) > 0 and line[-1] == '\r':
-            continue
-        final_output += line.replace('\r', '')
-    return final_output
+
+        if raw_output:
+            return output.decode('utf-8')
+        return remove_non_visible_lines(output)
+
+def run(
+    build_ninja: str,
+    flags: T.Optional[str] = None,
+    pipe: bool = False,
+    raw_output: bool = False,
+    env: T.Dict[str, str] = default_env,
+    print_err_output = True,
+) -> str:
+    """Run Ninja with a given build plan in a temporary directory.
+    """
+    with BuildDir(build_ninja) as b:
+        return b.run(flags, pipe, raw_output, env, print_err_output)
 
 @unittest.skipIf(platform.system() == 'Windows', 'These test methods do not work on Windows')
 class Output(unittest.TestCase):
@@ -54,7 +152,21 @@
         '',
     ))
 
-    def test_issue_1418(self):
+    def _test_expected_error(self, plan: str, flags: T.Optional[str],expected: str,
+                             *args, exit_code: T.Optional[int]=None, **kwargs)->None:
+        """Run Ninja with a given plan and flags, and verify its cooked output against an expected content.
+        All *args and **kwargs are passed to the `run` function
+        """
+        actual = ''
+        kwargs['print_err_output'] = False
+        with self.assertRaises(subprocess.CalledProcessError) as cm:
+            run(plan, flags, *args,  **kwargs)
+        actual = cm.exception.cooked_output
+        if exit_code is not None:
+            self.assertEqual(cm.exception.returncode, exit_code)
+        self.assertEqual(expected, actual)
+
+    def test_issue_1418(self) -> None:
         self.assertEqual(run(
 '''rule echo
   command = sleep $delay && echo $out
@@ -75,7 +187,7 @@
 a
 ''')
 
-    def test_issue_1214(self):
+    def test_issue_1214(self) -> None:
         print_red = '''rule echo
   command = printf '\x1b[31mred\x1b[0m'
   description = echo $out
@@ -109,7 +221,7 @@
 \x1b[31mred\x1b[0m
 ''')
 
-    def test_issue_1966(self):
+    def test_issue_1966(self) -> None:
         self.assertEqual(run(
 '''rule cat
   command = cat $rspfile $rspfile > $out
@@ -121,13 +233,36 @@
 '''[1/1] cat cat.rsp cat.rsp > a\x1b[K
 ''')
 
+    def test_issue_2499(self) -> None:
+        # This verifies that Ninja prints its status line updates on a single
+        # line when running in a smart terminal, and when commands do not have
+        # any output. Get the raw command output which includes CR (\r) codes
+        # and all content that was printed by Ninja.
+        self.assertEqual(run(
+'''rule touch
+  command = touch $out
 
-    def test_pr_1685(self):
+build foo: touch
+build bar: touch foo
+build zoo: touch bar
+''', flags='-j1 zoo', raw_output=True).split('\r'),
+            [
+                '',
+                '[0/3] touch foo\x1b[K',
+                '[1/3] touch foo\x1b[K',
+                '[1/3] touch bar\x1b[K',
+                '[2/3] touch bar\x1b[K',
+                '[2/3] touch zoo\x1b[K',
+                '[3/3] touch zoo\x1b[K',
+                '\n',
+            ])
+
+    def test_pr_1685(self) -> None:
         # Running those tools without .ninja_deps and .ninja_log shouldn't fail.
         self.assertEqual(run('', flags='-t recompact'), '')
         self.assertEqual(run('', flags='-t restat'), '')
 
-    def test_issue_2048(self):
+    def test_issue_2048(self) -> None:
         with tempfile.TemporaryDirectory() as d:
             with open(os.path.join(d, 'build.ninja'), 'w'):
                 pass
@@ -150,25 +285,109 @@
             except subprocess.CalledProcessError as err:
                 self.fail("non-zero exit code with: " + err.output)
 
-    def test_status(self):
+    def test_pr_2540(self)->None:
+        py = sys.executable
+        plan = f'''\
+rule CUSTOM_COMMAND
+  command = $COMMAND
+
+build 124: CUSTOM_COMMAND
+  COMMAND = {py} -c 'exit(124)'
+
+build 127: CUSTOM_COMMAND
+  COMMAND = {py} -c 'exit(127)'
+
+build 130: CUSTOM_COMMAND
+  COMMAND = {py} -c 'exit(130)'
+
+build 137: CUSTOM_COMMAND
+  COMMAND = {py} -c 'exit(137)'
+
+build success: CUSTOM_COMMAND
+  COMMAND = sleep 0.3; echo success
+'''
+        # Disable colors
+        env = default_env.copy()
+        env['TERM'] = 'dumb'
+        self._test_expected_error(
+            plan, '124',
+            f'''[1/1] {py} -c 'exit(124)'
+FAILED: [code=124] 124 \n{py} -c 'exit(124)'
+ninja: build stopped: subcommand failed.
+''',
+            exit_code=124, env=env,
+        )
+        self._test_expected_error(
+            plan, '127',
+            f'''[1/1] {py} -c 'exit(127)'
+FAILED: [code=127] 127 \n{py} -c 'exit(127)'
+ninja: build stopped: subcommand failed.
+''',
+            exit_code=127, env=env,
+        )
+        self._test_expected_error(
+            plan, '130',
+            'ninja: build stopped: interrupted by user.\n',
+            exit_code=130, env=env,
+        )
+        self._test_expected_error(
+            plan, '137',
+            f'''[1/1] {py} -c 'exit(137)'
+FAILED: [code=137] 137 \n{py} -c 'exit(137)'
+ninja: build stopped: subcommand failed.
+''',
+            exit_code=137, env=env,
+        )
+        self._test_expected_error(
+            plan, 'non-existent-target',
+            "ninja: error: unknown target 'non-existent-target'\n",
+            exit_code=1, env=env,
+        )
+        self._test_expected_error(
+            plan, '-j2 success 127',
+            f'''[1/2] {py} -c 'exit(127)'
+FAILED: [code=127] 127 \n{py} -c 'exit(127)'
+[2/2] sleep 0.3; echo success
+success
+ninja: build stopped: subcommand failed.
+''',
+            exit_code=127, env=env,
+        )
+
+    def test_depfile_directory_creation(self) -> None:
+        b = BuildDir('''\
+            rule touch
+              command = touch $out && echo "$out: extra" > $depfile
+
+            build somewhere/out: touch
+              depfile = somewhere_else/out.d
+            ''')
+        with b:
+            self.assertEqual(b.run('', pipe=True), dedent('''\
+                [1/1] touch somewhere/out && echo "somewhere/out: extra" > somewhere_else/out.d
+                '''))
+            self.assertTrue(os.path.isfile(os.path.join(b.d.name, "somewhere", "out")))
+            self.assertTrue(os.path.isfile(os.path.join(b.d.name, "somewhere_else", "out.d")))
+
+    def test_status(self) -> None:
         self.assertEqual(run(''), 'ninja: no work to do.\n')
         self.assertEqual(run('', pipe=True), 'ninja: no work to do.\n')
         self.assertEqual(run('', flags='--quiet'), '')
 
-    def test_ninja_status_default(self):
+    def test_ninja_status_default(self) -> None:
         'Do we show the default status by default?'
         self.assertEqual(run(Output.BUILD_SIMPLE_ECHO), '[1/1] echo a\x1b[K\ndo thing\n')
 
-    def test_ninja_status_quiet(self):
+    def test_ninja_status_quiet(self) -> None:
         'Do we suppress the status information when --quiet is specified?'
         output = run(Output.BUILD_SIMPLE_ECHO, flags='--quiet')
         self.assertEqual(output, 'do thing\n')
 
-    def test_entering_directory_on_stdout(self):
+    def test_entering_directory_on_stdout(self) -> None:
         output = run(Output.BUILD_SIMPLE_ECHO, flags='-C$PWD', pipe=True)
         self.assertEqual(output.splitlines()[0][:25], "ninja: Entering directory")
 
-    def test_tool_inputs(self):
+    def test_tool_inputs(self) -> None:
         plan = '''
 rule cat
   command = cat $in $out
@@ -185,6 +404,207 @@
 out2
 ''')
 
+        self.assertEqual(run(plan, flags='-t inputs --dependency-order out3'),
+'''in2
+in1
+out1
+out2
+implicit
+order_only
+''')
+
+        # Verify that results are shell-escaped by default, unless --no-shell-escape
+        # is used. Also verify that phony outputs are never part of the results.
+        quote = '"' if platform.system() == "Windows" else "'"
+
+        plan = '''
+rule cat
+  command = cat $in $out
+build out1 : cat in1
+build out$ 2 : cat out1
+build out$ 3 : phony out$ 2
+build all: phony out$ 3
+'''
+
+        # Quoting changes the order of results when sorting alphabetically.
+        self.assertEqual(run(plan, flags='-t inputs all'),
+f'''{quote}out 2{quote}
+in1
+out1
+''')
+
+        self.assertEqual(run(plan, flags='-t inputs --no-shell-escape all'),
+'''in1
+out 2
+out1
+''')
+
+        # But not when doing dependency order.
+        self.assertEqual(
+            run(
+              plan,
+              flags='-t inputs --dependency-order all'
+            ),
+            f'''in1
+out1
+{quote}out 2{quote}
+''')
+
+        self.assertEqual(
+          run(
+            plan,
+            flags='-t inputs --dependency-order --no-shell-escape all'
+          ),
+          f'''in1
+out1
+out 2
+''')
+
+        self.assertEqual(
+          run(
+            plan,
+            flags='-t inputs --dependency-order --no-shell-escape --print0 all'
+          ),
+          f'''in1\0out1\0out 2\0'''
+        )
+
+
+    def test_tool_compdb_targets(self) -> None:
+        plan = '''
+rule cat
+  command = cat $in $out
+build out1 : cat in1
+build out2 : cat in2 out1
+build out3 : cat out2 out1
+build out4 : cat in4
+'''
+
+
+        self._test_expected_error(plan, '-t compdb-targets',
+'''ninja: error: compdb-targets expects the name of at least one target
+usage: ninja -t compdb [-hx] target [targets]
+
+options:
+  -h     display this help message
+  -x     expand @rspfile style response file invocations
+''')
+
+        self._test_expected_error(plan, '-t compdb-targets in1',
+            "ninja: fatal: 'in1' is not a target (i.e. it is not an output of any `build` statement)\n")
+
+        self._test_expected_error(plan, '-t compdb-targets nonexistent_target',
+            "ninja: fatal: unknown target 'nonexistent_target'\n")
+
+
+        with BuildDir(plan) as b:
+            actual = b.run(flags='-t compdb-targets out3')
+            expected = f'''[
+  {{
+    "directory": "{b.path}",
+    "command": "cat in1 out1",
+    "file": "in1",
+    "output": "out1"
+  }},
+  {{
+    "directory": "{b.path}",
+    "command": "cat in2 out1 out2",
+    "file": "in2",
+    "output": "out2"
+  }},
+  {{
+    "directory": "{b.path}",
+    "command": "cat out2 out1 out3",
+    "file": "out2",
+    "output": "out3"
+  }}
+]
+'''
+            self.assertEqual(expected, actual)
+
+
+    def test_tool_multi_inputs(self) -> None:
+        plan = '''
+rule cat
+  command = cat $in $out
+build out1 : cat in1
+build out2 : cat in1 in2
+build out3 : cat in1 in2 in3
+'''
+        self.assertEqual(run(plan, flags='-t multi-inputs out1'),
+'''out1<TAB>in1
+'''.replace("<TAB>", "\t"))
+
+        self.assertEqual(run(plan, flags='-t multi-inputs out1 out2 out3'),
+'''out1<TAB>in1
+out2<TAB>in1
+out2<TAB>in2
+out3<TAB>in1
+out3<TAB>in2
+out3<TAB>in3
+'''.replace("<TAB>", "\t"))
+
+        self.assertEqual(run(plan, flags='-t multi-inputs -d: out1'),
+'''out1:in1
+''')
+
+        self.assertEqual(
+          run(
+            plan,
+            flags='-t multi-inputs -d, --print0 out1 out2'
+          ),
+          '''out1,in1\0out2,in1\0out2,in2\0'''
+        )
+
+
+    def test_explain_output(self):
+        b = BuildDir('''\
+            build .FORCE: phony
+            rule create_if_non_exist
+              command = [ -e $out ] || touch $out
+              restat = true
+            rule write
+              command = cp $in $out
+            build input : create_if_non_exist .FORCE
+            build mid : write input
+            build output : write mid
+            default output
+            ''')
+        with b:
+            # The explain output is shown just before the relevant build:
+            self.assertEqual(b.run('-v -d explain'), dedent('''\
+                ninja explain: .FORCE is dirty
+                [1/3] [ -e input ] || touch input
+                ninja explain: input is dirty
+                [2/3] cp input mid
+                ninja explain: mid is dirty
+                [3/3] cp mid output
+                '''))
+            # Don't print "ninja explain: XXX is dirty" for inputs that are
+            # pruned from the graph by an earlier restat.
+            self.assertEqual(b.run('-v -d explain'), dedent('''\
+                ninja explain: .FORCE is dirty
+                [1/3] [ -e input ] || touch input
+                '''))
+
+    def test_issue_2586(self):
+        """This shouldn't hang"""
+        plan = '''rule echo
+  command = echo echo
+build dep: echo
+build console1: echo dep
+  pool = console
+build console2: echo
+  pool = console
+build all: phony console1 console2
+default all
+'''
+        self.assertEqual(run(plan, flags='-j2', env={'NINJA_STATUS':''}), '''echo echo
+echo
+echo echo
+echo
+echo echo
+echo
+''')
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/misc/write_fake_manifests.py b/misc/write_fake_manifests.py
index bf9cf7d..124ba8b 100755
--- a/misc/write_fake_manifests.py
+++ b/misc/write_fake_manifests.py
@@ -18,11 +18,12 @@
 import os
 import random
 import sys
+from typing import Generator, Optional, Tuple, List, Set
 
 import ninja_syntax
 
 
-def paretoint(avg, alpha):
+def paretoint(avg: float, alpha: float) -> int:
     """Returns a random integer that's avg on average, following a power law.
     alpha determines the shape of the power curve. alpha has to be larger
     than 1. The closer alpha is to 1, the higher the variation of the returned
@@ -31,7 +32,7 @@
 
 
 # Based on http://neugierig.org/software/chromium/class-name-generator.html
-def moar(avg_options, p_suffix):
+def moar(avg_options: float, p_suffix: float) -> str:
     kStart = ['render', 'web', 'browser', 'tab', 'content', 'extension', 'url',
               'file', 'sync', 'content', 'http', 'profile']
     kOption = ['view', 'host', 'holder', 'container', 'impl', 'ref',
@@ -50,38 +51,38 @@
 
 
 class GenRandom(object):
-    def __init__(self, src_dir):
-        self.seen_names = set([None])
-        self.seen_defines = set([None])
+    def __init__(self, src_dir: str) -> None:
+        self.seen_names: Set[Optional[str]] = set([None])
+        self.seen_defines: Set[Optional[str]] = set([None])
         self.src_dir = src_dir
 
-    def _unique_string(self, seen, avg_options=1.3, p_suffix=0.1):
+    def _unique_string(self, seen: Set[Optional[str]], avg_options: float = 1.3, p_suffix: float = 0.1) -> str:
         s = None
         while s in seen:
             s = moar(avg_options, p_suffix)
         seen.add(s)
-        return s
+        return s  # type: ignore # Incompatible return value type
 
-    def _n_unique_strings(self, n):
-        seen = set([None])
+    def _n_unique_strings(self, n: int) -> List[str]:
+        seen: Set[Optional[str]] = set([None])
         return [self._unique_string(seen, avg_options=3, p_suffix=0.4)
                 for _ in range(n)]
 
-    def target_name(self):
+    def target_name(self) -> str:
         return self._unique_string(p_suffix=0, seen=self.seen_names)
 
-    def path(self):
+    def path(self) -> str:
         return os.path.sep.join([
             self._unique_string(self.seen_names, avg_options=1, p_suffix=0)
             for _ in range(1 + paretoint(0.6, alpha=4))])
 
-    def src_obj_pairs(self, path, name):
+    def src_obj_pairs(self, path: str, name: str) -> List[Tuple[str, str]]:
         num_sources = paretoint(55, alpha=2) + 1
         return [(os.path.join(self.src_dir, path, s + '.cc'),
                  os.path.join('obj', path, '%s.%s.o' % (name, s)))
                 for s in self._n_unique_strings(num_sources)]
 
-    def defines(self):
+    def defines(self) -> List[str]:
         return [
             '-DENABLE_' + self._unique_string(self.seen_defines).upper()
             for _ in range(paretoint(20, alpha=3))]
@@ -89,7 +90,7 @@
 
 LIB, EXE = 0, 1
 class Target(object):
-    def __init__(self, gen, kind):
+    def __init__(self, gen: GenRandom, kind: int) -> None:
         self.name = gen.target_name()
         self.dir_path = gen.path()
         self.ninja_file_path = os.path.join(
@@ -100,12 +101,12 @@
         elif kind == EXE:
             self.output = os.path.join(self.name)
         self.defines = gen.defines()
-        self.deps = []
+        self.deps: List[Target] = []
         self.kind = kind
         self.has_compile_depends = random.random() < 0.4
 
 
-def write_target_ninja(ninja, target, src_dir):
+def write_target_ninja(ninja: ninja_syntax.Writer, target: Target, src_dir: str) -> None:
     compile_depends = None
     if target.has_compile_depends:
       compile_depends = os.path.join(
@@ -133,7 +134,7 @@
                 implicit=deps)
 
 
-def write_sources(target, root_dir):
+def write_sources(target: Target, root_dir: str) -> None:
     need_main = target.kind == EXE
 
     includes = []
@@ -174,7 +175,7 @@
                 f.write('int main(int argc, char **argv) {}\n')
                 need_main = False
 
-def write_master_ninja(master_ninja, targets):
+def write_master_ninja(master_ninja: ninja_syntax.Writer, targets: List[Target]) -> None:
     """Writes master build.ninja file, referencing all given subninjas."""
     master_ninja.variable('cxx', 'c++')
     master_ninja.variable('ld', '$cxx')
@@ -212,7 +213,7 @@
 
 
 @contextlib.contextmanager
-def FileWriter(path):
+def FileWriter(path: str) -> Generator[ninja_syntax.Writer, None, None]:
     """Context manager for a ninja_syntax object writing to a file."""
     try:
         os.makedirs(os.path.dirname(path))
@@ -223,7 +224,7 @@
     f.close()
 
 
-def random_targets(num_targets, src_dir):
+def random_targets(num_targets: int, src_dir: str) -> List[Target]:
     gen = GenRandom(src_dir)
 
     # N-1 static libraries, and 1 executable depending on all of them.
@@ -238,7 +239,7 @@
     return targets
 
 
-def main():
+def main() -> None:
     parser = argparse.ArgumentParser()
     parser.add_argument('-s', '--sources', nargs="?", const="src",
         help='write sources to directory (relative to output directory)')
@@ -269,4 +270,4 @@
 
 
 if __name__ == '__main__':
-    sys.exit(main())
+    sys.exit(main())  # type: ignore # "main" does not return a value
diff --git a/src/browse.cc b/src/browse.cc
index ac54207..2956f89 100644
--- a/src/browse.cc
+++ b/src/browse.cc
@@ -59,7 +59,7 @@
           command.push_back(argv[i]);
       }
       command.push_back(NULL);
-      execvp(command[0], (char**)&command[0]);
+      execvp(command[0], const_cast<char**>(&command[0]));
       if (errno == ENOENT) {
         printf("ninja: %s is required for the browse tool\n", NINJA_PYTHON);
       } else {
diff --git a/src/browse.py b/src/browse.py
index b125e80..a91628c 100755
--- a/src/browse.py
+++ b/src/browse.py
@@ -24,8 +24,8 @@
     import http.server as httpserver
     import socketserver
 except ImportError:
-    import BaseHTTPServer as httpserver
-    import SocketServer as socketserver
+    import BaseHTTPServer as httpserver  # type: ignore # Name "httpserver" already defined
+    import SocketServer as socketserver  # type: ignore # Name "socketserver" already defined
 import argparse
 import os
 import socket
@@ -37,10 +37,11 @@
 else:
     from cgi import escape
 try:
-    from urllib.request import unquote
+    from urllib.request import unquote  # type: ignore # Module "urllib.request" has no attribute "unquote"
 except ImportError:
     from urllib2 import unquote
 from collections import namedtuple
+from typing import Tuple, Any
 
 Node = namedtuple('Node', ['inputs', 'rule', 'target', 'outputs'])
 
@@ -57,15 +58,15 @@
 # This means there's no single view that shows you all inputs and outputs
 # of an edge.  But I think it's less confusing than alternatives.
 
-def match_strip(line, prefix):
+def match_strip(line: str, prefix: str) -> Tuple[bool, str]:
     if not line.startswith(prefix):
         return (False, line)
     return (True, line[len(prefix):])
 
-def html_escape(text):
+def html_escape(text: str) -> str:
     return escape(text, quote=True)
 
-def parse(text):
+def parse(text: str) -> Node:
     lines = iter(text.split('\n'))
 
     target = None
@@ -81,7 +82,7 @@
         if match:
             (match, line) = match_strip(next(lines), '    ')
             while match:
-                type = None
+                type = ""
                 (match, line) = match_strip(line, '| ')
                 if match:
                     type = 'implicit'
@@ -102,7 +103,7 @@
 
     return Node(inputs, rule, target, outputs)
 
-def create_page(body):
+def create_page(body: str) -> str:
     return '''<!DOCTYPE html>
 <style>
 body {
@@ -130,7 +131,7 @@
 </style>
 ''' + body
 
-def generate_html(node):
+def generate_html(node: Node) -> str:
     document = ['<h1><tt>%s</tt></h1>' % html_escape(node.target)]
 
     if node.inputs:
@@ -156,14 +157,14 @@
 
     return '\n'.join(document)
 
-def ninja_dump(target):
+def ninja_dump(target: str) -> Tuple[str, str, int]:
     cmd = [args.ninja_command, '-f', args.f, '-t', 'query', target]
     proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                             universal_newlines=True)
     return proc.communicate() + (proc.returncode,)
 
 class RequestHandler(httpserver.BaseHTTPRequestHandler):
-    def do_GET(self):
+    def do_GET(self) -> None:
         assert self.path[0] == '/'
         target = unquote(self.path[1:])
 
@@ -190,7 +191,7 @@
         self.end_headers()
         self.wfile.write(create_page(page_body).encode('utf-8'))
 
-    def log_message(self, format, *args):
+    def log_message(self, format: str, *args: Any) -> None:
         pass  # Swallow console spam.
 
 parser = argparse.ArgumentParser(prog='ninja -t browse')
diff --git a/src/build.cc b/src/build.cc
index 36b52dc..b2f314a 100644
--- a/src/build.cc
+++ b/src/build.cc
@@ -20,7 +20,6 @@
 #include <stdio.h>
 #include <stdlib.h>
 
-#include <climits>
 #include <functional>
 #include <unordered_set>
 
@@ -34,11 +33,13 @@
 #include "depfile_parser.h"
 #include "deps_log.h"
 #include "disk_interface.h"
+#include "exit_status.h"
+#include "explanations.h"
 #include "graph.h"
+#include "jobserver.h"
 #include "metrics.h"
 #include "state.h"
 #include "status.h"
-#include "subprocess.h"
 #include "util.h"
 
 using namespace std;
@@ -47,12 +48,10 @@
 
 /// A CommandRunner that doesn't actually run the commands.
 struct DryRunCommandRunner : public CommandRunner {
-  virtual ~DryRunCommandRunner() {}
-
   // Overridden from CommandRunner:
-  virtual size_t CanRunMore() const;
-  virtual bool StartCommand(Edge* edge);
-  virtual bool WaitForCommand(Result* result);
+  size_t CanRunMore() const override;
+  bool StartCommand(Edge* edge) override;
+  bool WaitForCommand(Result* result) override;
 
  private:
   queue<Edge*> finished_;
@@ -164,6 +163,15 @@
     return NULL;
 
   Edge* work = ready_.top();
+
+  // If jobserver mode is enabled, try to acquire a token first,
+  // and return null in case of failure.
+  if (builder_ && builder_->jobserver_.get()) {
+    work->job_slot_ = builder_->jobserver_->TryAcquire();
+    if (!work->job_slot_.IsValid())
+      return nullptr;
+  }
+
   ready_.pop();
   return work;
 }
@@ -200,6 +208,10 @@
     edge->pool()->EdgeFinished(*edge);
   edge->pool()->RetrieveReadyEdges(&ready_);
 
+  // Release job slot if needed.
+  if (builder_ && builder_->jobserver_.get())
+    builder_->jobserver_->Release(std::move(edge->job_slot_));
+
   // The rest of this function only applies to successful commands.
   if (result != kEdgeSucceeded)
     return true;
@@ -527,7 +539,7 @@
   for (Edge* edge : sorted_edges)
     edge->set_critical_path_weight(EdgeWeightHeuristic(edge));
 
-  // Second propagate / increment weidghts from
+  // Second propagate / increment weights from
   // children to parents. Scan the list
   // in reverse order to do so.
   for (auto reverse_it = sorted_edges.rbegin();
@@ -557,16 +569,14 @@
            end = want_.end(); it != end; ++it) {
     Edge* edge = it->first;
     Plan::Want want = it->second;
-    if (!(want == kWantToStart && edge->AllInputsReady())) {
-      continue;
-    }
-
-    Pool* pool = edge->pool();
-    if (pool->ShouldDelayEdge()) {
-      pool->DelayEdge(edge);
-      pools.insert(pool);
-    } else {
-      ScheduleWork(it);
+    if (want == kWantToStart && edge->AllInputsReady()) {
+      Pool* pool = edge->pool();
+      if (pool->ShouldDelayEdge()) {
+        pool->DelayEdge(edge);
+        pools.insert(pool);
+      } else {
+        ScheduleWork(it);
+      }
     }
   }
 
@@ -594,99 +604,24 @@
   printf("ready: %d\n", (int)ready_.size());
 }
 
-struct RealCommandRunner : public CommandRunner {
-  explicit RealCommandRunner(const BuildConfig& config) : config_(config) {}
-  virtual ~RealCommandRunner() {}
-  virtual size_t CanRunMore() const;
-  virtual bool StartCommand(Edge* edge);
-  virtual bool WaitForCommand(Result* result);
-  virtual vector<Edge*> GetActiveEdges();
-  virtual void Abort();
-
-  const BuildConfig& config_;
-  SubprocessSet subprocs_;
-  map<const Subprocess*, Edge*> subproc_to_edge_;
-};
-
-vector<Edge*> RealCommandRunner::GetActiveEdges() {
-  vector<Edge*> edges;
-  for (map<const Subprocess*, Edge*>::iterator e = subproc_to_edge_.begin();
-       e != subproc_to_edge_.end(); ++e)
-    edges.push_back(e->second);
-  return edges;
-}
-
-void RealCommandRunner::Abort() {
-  subprocs_.Clear();
-}
-
-size_t RealCommandRunner::CanRunMore() const {
-  size_t subproc_number =
-      subprocs_.running_.size() + subprocs_.finished_.size();
-
-  int64_t capacity = config_.parallelism - subproc_number;
-
-  if (config_.max_load_average > 0.0f) {
-    int load_capacity = config_.max_load_average - GetLoadAverage();
-    if (load_capacity < capacity)
-      capacity = load_capacity;
-  }
-
-  if (capacity < 0)
-    capacity = 0;
-
-  if (capacity == 0 && subprocs_.running_.empty())
-    // Ensure that we make progress.
-    capacity = 1;
-
-  return capacity;
-}
-
-bool RealCommandRunner::StartCommand(Edge* edge) {
-  string command = edge->EvaluateCommand();
-  Subprocess* subproc = subprocs_.Add(command, edge->use_console());
-  if (!subproc)
-    return false;
-  subproc_to_edge_.insert(make_pair(subproc, edge));
-
-  return true;
-}
-
-bool RealCommandRunner::WaitForCommand(Result* result) {
-  Subprocess* subproc;
-  while ((subproc = subprocs_.NextFinished()) == NULL) {
-    bool interrupted = subprocs_.DoWork();
-    if (interrupted)
-      return false;
-  }
-
-  result->status = subproc->Finish();
-  result->output = subproc->GetOutput();
-
-  map<const Subprocess*, Edge*>::iterator e = subproc_to_edge_.find(subproc);
-  result->edge = e->second;
-  subproc_to_edge_.erase(e);
-
-  delete subproc;
-  return true;
-}
-
-Builder::Builder(State* state, const BuildConfig& config,
-                 BuildLog* build_log, DepsLog* deps_log,
-                 DiskInterface* disk_interface, Status *status,
-                 int64_t start_time_millis)
+Builder::Builder(State* state, const BuildConfig& config, BuildLog* build_log,
+                 DepsLog* deps_log, DiskInterface* disk_interface,
+                 Status* status, int64_t start_time_millis)
     : state_(state), config_(config), plan_(this), status_(status),
       start_time_millis_(start_time_millis), disk_interface_(disk_interface),
+      explanations_(g_explaining ? new Explanations() : nullptr),
       scan_(state, build_log, deps_log, disk_interface,
-            &config_.depfile_parser_options) {
+            &config_.depfile_parser_options, explanations_.get()) {
   lock_file_path_ = ".ninja_lock";
   string build_dir = state_->bindings_.LookupVariable("builddir");
   if (!build_dir.empty())
     lock_file_path_ = build_dir + "/" + lock_file_path_;
+  status_->SetExplanations(explanations_.get());
 }
 
 Builder::~Builder() {
   Cleanup();
+  status_->SetExplanations(nullptr);
 }
 
 void Builder::Cleanup() {
@@ -765,7 +700,7 @@
   return !plan_.more_to_do();
 }
 
-bool Builder::Build(string* err) {
+ExitStatus Builder::Build(string* err) {
   assert(!AlreadyUpToDate());
   plan_.PrepareQueue();
 
@@ -777,7 +712,8 @@
     if (config_.dry_run)
       command_runner_.reset(new DryRunCommandRunner);
     else
-      command_runner_.reset(new RealCommandRunner(config_));
+      command_runner_.reset(CommandRunner::factory(config_, jobserver_.get()));
+    ;
   }
 
   // We are about to start the build process.
@@ -804,14 +740,14 @@
         if (!StartEdge(edge, err)) {
           Cleanup();
           status_->BuildFinished();
-          return false;
+          return ExitFailure;
         }
 
         if (edge->is_phony()) {
           if (!plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, err)) {
             Cleanup();
             status_->BuildFinished();
-            return false;
+            return ExitFailure;
           }
         } else {
           ++pending_commands;
@@ -838,14 +774,16 @@
         Cleanup();
         status_->BuildFinished();
         *err = "interrupted by user";
-        return false;
+        return result.status;
       }
 
       --pending_commands;
-      if (!FinishCommand(&result, err)) {
+      bool command_finished = FinishCommand(&result, err);
+      SetFailureCode(result.status);
+      if (!command_finished) {
         Cleanup();
         status_->BuildFinished();
-        return false;
+        return result.status;
       }
 
       if (!result.success()) {
@@ -869,11 +807,11 @@
     else
       *err = "stuck [this is a bug]";
 
-    return false;
+    return GetExitCode();
   }
 
   status_->BuildFinished();
-  return true;
+  return ExitSuccess;
 }
 
 bool Builder::StartEdge(Edge* edge, string* err) {
@@ -905,6 +843,12 @@
 
   edge->command_start_time_ = build_start;
 
+  // Create depfile directory if needed.
+  // XXX: this may also block; do we care?
+  std::string depfile = edge->GetUnescapedDepfile();
+  if (!depfile.empty() && !disk_interface_->MakeDirs(depfile))
+    return false;
+
   // Create response file, if needed
   // XXX: this may also block; do we care?
   string rspfile = edge->GetUnescapedRspfile();
@@ -955,7 +899,7 @@
   running_edges_.erase(it);
 
   status_->BuildEdgeFinished(edge, start_time_millis, end_time_millis,
-                             result->success(), result->output);
+                             result->status, result->output);
 
   // The rest of this function only applies to successful commands.
   if (!result->success()) {
@@ -1097,8 +1041,6 @@
 }
 
 bool Builder::LoadDyndeps(Node* node, string* err) {
-  status_->BuildLoadDyndeps();
-
   // Load the dyndep information provided by this node.
   DyndepFile ddf;
   if (!scan_.LoadDyndeps(node, &ddf, err))
@@ -1110,3 +1052,10 @@
 
   return true;
 }
+
+void Builder::SetFailureCode(ExitStatus code) {
+  // ExitSuccess should not overwrite any error
+  if (code != ExitSuccess) {
+    exit_code_ = code;
+  }
+}
diff --git a/src/build.h b/src/build.h
index 471f0b2..0531747 100644
--- a/src/build.h
+++ b/src/build.h
@@ -22,14 +22,16 @@
 #include <vector>
 
 #include "depfile_parser.h"
-#include "graph.h"
 #include "exit_status.h"
+#include "graph.h"
+#include "jobserver.h"
 #include "util.h"  // int64_t
 
 struct BuildLog;
 struct Builder;
 struct DiskInterface;
 struct Edge;
+struct Explanations;
 struct Node;
 struct State;
 struct Status;
@@ -140,6 +142,8 @@
   int wanted_edges_;
 };
 
+struct BuildConfig;
+
 /// CommandRunner is an interface that wraps running the build
 /// subcommands.  This allows tests to abstract out running commands.
 /// RealCommandRunner is an implementation that actually runs commands.
@@ -161,12 +165,16 @@
 
   virtual std::vector<Edge*> GetActiveEdges() { return std::vector<Edge*>(); }
   virtual void Abort() {}
+
+  /// Creates the RealCommandRunner. \arg jobserver can be nullptr if there
+  /// is no jobserver pool to use.
+  static CommandRunner* factory(const BuildConfig& config,
+                                Jobserver::Client* jobserver);
 };
 
 /// Options (e.g. verbosity, parallelism) passed to a build.
 struct BuildConfig {
-  BuildConfig() : verbosity(NORMAL), dry_run(false), parallelism(1),
-                  failures_allowed(1), max_load_average(-0.0f) {}
+  BuildConfig() = default;
 
   enum Verbosity {
     QUIET,  // No output -- used when testing.
@@ -174,24 +182,29 @@
     NORMAL,  // regular output and status update
     VERBOSE
   };
-  Verbosity verbosity;
-  bool dry_run;
-  int parallelism;
-  int failures_allowed;
+  Verbosity verbosity = NORMAL;
+  bool dry_run = false;
+  int parallelism = 1;
+  bool disable_jobserver_client = false;
+  int failures_allowed = 1;
   /// The maximum load average we must not exceed. A negative value
   /// means that we do not have any limit.
-  double max_load_average;
+  double max_load_average = -0.0f;
   DepfileParserOptions depfile_parser_options;
 };
 
 /// Builder wraps the build process: starting commands, updating status.
 struct Builder {
-  Builder(State* state, const BuildConfig& config,
-          BuildLog* build_log, DepsLog* deps_log,
-          DiskInterface* disk_interface, Status* status,
+  Builder(State* state, const BuildConfig& config, BuildLog* build_log,
+          DepsLog* deps_log, DiskInterface* disk_interface, Status* status,
           int64_t start_time_millis);
   ~Builder();
 
+  /// Set Jobserver client instance for this builder.
+  void SetJobserverClient(std::unique_ptr<Jobserver::Client> jobserver_client) {
+    jobserver_ = std::move(jobserver_client);
+  }
+
   /// Clean up after interrupted commands by deleting output files.
   void Cleanup();
 
@@ -204,9 +217,9 @@
   /// Returns true if the build targets are already up to date.
   bool AlreadyUpToDate() const;
 
-  /// Run the build.  Returns false on error.
+  /// Run the build.  Returns ExitStatus or the exit code of the last failed job.
   /// It is an error to call this function when AlreadyUpToDate() is true.
-  bool Build(std::string* err);
+  ExitStatus Build(std::string* err);
 
   bool StartEdge(Edge* edge, std::string* err);
 
@@ -225,9 +238,14 @@
   State* state_;
   const BuildConfig& config_;
   Plan plan_;
+  std::unique_ptr<Jobserver::Client> jobserver_;
   std::unique_ptr<CommandRunner> command_runner_;
   Status* status_;
 
+  /// Returns ExitStatus or the exit code of the last failed job
+  /// (doesn't need to be an enum value of ExitStatus)
+  ExitStatus GetExitCode() const { return exit_code_; }
+
  private:
   bool ExtractDeps(CommandRunner::Result* result, const std::string& deps_type,
                    const std::string& deps_prefix,
@@ -242,8 +260,16 @@
 
   std::string lock_file_path_;
   DiskInterface* disk_interface_;
+
+  // Only create an Explanations class if '-d explain' is used.
+  std::unique_ptr<Explanations> explanations_;
+
   DependencyScan scan_;
 
+  /// Keep the global exit code for the build
+  ExitStatus exit_code_ = ExitSuccess;
+  void SetFailureCode(ExitStatus code);
+
   // Unimplemented copy ctor and operator= ensure we don't copy the auto_ptr.
   Builder(const Builder &other);        // DO NOT IMPLEMENT
   void operator=(const Builder &other); // DO NOT IMPLEMENT
diff --git a/src/build_log.cc b/src/build_log.cc
index 792d1a3..890a3cd 100644
--- a/src/build_log.cc
+++ b/src/build_log.cc
@@ -41,8 +41,6 @@
 #define strtoll _strtoi64
 #endif
 
-using namespace std;
-
 // Implementation details:
 // Each run's log appends to the log file.
 // To load, we run through all log entries in series, throwing away
@@ -53,83 +51,31 @@
 namespace {
 
 const char kFileSignature[] = "# ninja log v%d\n";
-const int kOldestSupportedVersion = 6;
-const int kCurrentVersion = 6;
-
-// 64bit MurmurHash2, by Austin Appleby
-#if defined(_MSC_VER)
-#define BIG_CONSTANT(x) (x)
-#else   // defined(_MSC_VER)
-#define BIG_CONSTANT(x) (x##LLU)
-#endif // !defined(_MSC_VER)
-inline
-uint64_t MurmurHash64A(const void* key, size_t len) {
-  static const uint64_t seed = 0xDECAFBADDECAFBADull;
-  const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
-  const int r = 47;
-  uint64_t h = seed ^ (len * m);
-  const unsigned char* data = (const unsigned char*)key;
-  while (len >= 8) {
-    uint64_t k;
-    memcpy(&k, data, sizeof k);
-    k *= m;
-    k ^= k >> r;
-    k *= m;
-    h ^= k;
-    h *= m;
-    data += 8;
-    len -= 8;
-  }
-  switch (len & 7)
-  {
-  case 7: h ^= uint64_t(data[6]) << 48;
-          NINJA_FALLTHROUGH;
-  case 6: h ^= uint64_t(data[5]) << 40;
-          NINJA_FALLTHROUGH;
-  case 5: h ^= uint64_t(data[4]) << 32;
-          NINJA_FALLTHROUGH;
-  case 4: h ^= uint64_t(data[3]) << 24;
-          NINJA_FALLTHROUGH;
-  case 3: h ^= uint64_t(data[2]) << 16;
-          NINJA_FALLTHROUGH;
-  case 2: h ^= uint64_t(data[1]) << 8;
-          NINJA_FALLTHROUGH;
-  case 1: h ^= uint64_t(data[0]);
-          h *= m;
-  };
-  h ^= h >> r;
-  h *= m;
-  h ^= h >> r;
-  return h;
-}
-#undef BIG_CONSTANT
-
+const int kOldestSupportedVersion = 7;
+const int kCurrentVersion = 7;
 
 }  // namespace
 
 // static
 uint64_t BuildLog::LogEntry::HashCommand(StringPiece command) {
-  return MurmurHash64A(command.str_, command.len_);
+  return rapidhash(command.str_, command.len_);
 }
 
-BuildLog::LogEntry::LogEntry(const string& output)
-  : output(output) {}
+BuildLog::LogEntry::LogEntry(std::string output) : output(std::move(output)) {}
 
-BuildLog::LogEntry::LogEntry(const string& output, uint64_t command_hash,
-  int start_time, int end_time, TimeStamp mtime)
-  : output(output), command_hash(command_hash),
-    start_time(start_time), end_time(end_time), mtime(mtime)
-{}
+BuildLog::LogEntry::LogEntry(const std::string& output, uint64_t command_hash,
+                             int start_time, int end_time, TimeStamp mtime)
+    : output(output), command_hash(command_hash), start_time(start_time),
+      end_time(end_time), mtime(mtime) {}
 
-BuildLog::BuildLog()
-  : log_file_(NULL), needs_recompaction_(false) {}
+BuildLog::BuildLog() = default;
 
 BuildLog::~BuildLog() {
   Close();
 }
 
-bool BuildLog::OpenForWrite(const string& path, const BuildLogUser& user,
-                            string* err) {
+bool BuildLog::OpenForWrite(const std::string& path, const BuildLogUser& user,
+                            std::string* err) {
   if (needs_recompaction_) {
     if (!Recompact(path, user, err))
       return false;
@@ -143,18 +89,19 @@
 
 bool BuildLog::RecordCommand(Edge* edge, int start_time, int end_time,
                              TimeStamp mtime) {
-  string command = edge->EvaluateCommand(true);
+  std::string command = edge->EvaluateCommand(true);
   uint64_t command_hash = LogEntry::HashCommand(command);
-  for (vector<Node*>::iterator out = edge->outputs_.begin();
+  for (std::vector<Node*>::iterator out = edge->outputs_.begin();
        out != edge->outputs_.end(); ++out) {
-    const string& path = (*out)->path();
+    const std::string& path = (*out)->path();
     Entries::iterator i = entries_.find(path);
     LogEntry* log_entry;
     if (i != entries_.end()) {
-      log_entry = i->second;
+      log_entry = i->second.get();
     } else {
       log_entry = new LogEntry(path);
-      entries_.insert(Entries::value_type(log_entry->output, log_entry));
+      // Passes ownership of |log_entry| to the map, but keeps the pointer valid.
+      entries_.emplace(log_entry->output, std::unique_ptr<LogEntry>(log_entry));
     }
     log_entry->command_hash = command_hash;
     log_entry->start_time = start_time;
@@ -230,7 +177,7 @@
       line_start_ = line_end_ + 1;
     }
 
-    line_end_ = (char*)memchr(line_start_, '\n', buf_end_ - line_start_);
+    line_end_ = static_cast<char*>(memchr(line_start_, '\n', buf_end_ - line_start_));
     if (!line_end_) {
       // No newline. Move rest of data to start of buffer, fill rest.
       size_t already_consumed = line_start_ - buf_;
@@ -240,7 +187,7 @@
       size_t read = fread(buf_ + size_rest, 1, sizeof(buf_) - size_rest, file_);
       buf_end_ = buf_ + size_rest + read;
       line_start_ = buf_;
-      line_end_ = (char*)memchr(line_start_, '\n', buf_end_ - line_start_);
+      line_end_ = static_cast<char*>(memchr(line_start_, '\n', buf_end_ - line_start_));
     }
 
     *line_start = line_start_;
@@ -258,7 +205,7 @@
   char* line_end_;
 };
 
-LoadStatus BuildLog::Load(const string& path, string* err) {
+LoadStatus BuildLog::Load(const std::string& path, std::string* err) {
   METRIC_RECORD(".ninja_log load");
   FILE* file = fopen(path.c_str(), "r");
   if (!file) {
@@ -290,7 +237,7 @@
       }
       if (invalid_log_version) {
         fclose(file);
-        unlink(path.c_str());
+        platformAwareUnlink(path.c_str());
         // Don't report this as a failure. A missing build log will cause
         // us to rebuild the outputs anyway.
         return LOAD_NOT_FOUND;
@@ -304,7 +251,7 @@
     const char kFieldSeparator = '\t';
 
     char* start = line_start;
-    char* end = (char*)memchr(start, kFieldSeparator, line_end - start);
+    char* end = static_cast<char*>(memchr(start, kFieldSeparator, line_end - start));
     if (!end)
       continue;
     *end = 0;
@@ -315,24 +262,24 @@
     start_time = atoi(start);
     start = end + 1;
 
-    end = (char*)memchr(start, kFieldSeparator, line_end - start);
+    end = static_cast<char*>(memchr(start, kFieldSeparator, line_end - start));
     if (!end)
       continue;
     *end = 0;
     end_time = atoi(start);
     start = end + 1;
 
-    end = (char*)memchr(start, kFieldSeparator, line_end - start);
+    end = static_cast<char*>(memchr(start, kFieldSeparator, line_end - start));
     if (!end)
       continue;
     *end = 0;
     mtime = strtoll(start, NULL, 10);
     start = end + 1;
 
-    end = (char*)memchr(start, kFieldSeparator, line_end - start);
+    end = static_cast<char*>(memchr(start, kFieldSeparator, line_end - start));
     if (!end)
       continue;
-    string output = string(start, end - start);
+    std::string output(start, end - start);
 
     start = end + 1;
     end = line_end;
@@ -340,10 +287,11 @@
     LogEntry* entry;
     Entries::iterator i = entries_.find(output);
     if (i != entries_.end()) {
-      entry = i->second;
+      entry = i->second.get();
     } else {
-      entry = new LogEntry(output);
-      entries_.insert(Entries::value_type(entry->output, entry));
+      entry = new LogEntry(std::move(output));
+      // Passes ownership of |entry| to the map, but keeps the pointer valid.
+      entries_.emplace(entry->output, std::unique_ptr<LogEntry>(entry));
       ++unique_entry_count;
     }
     ++total_entry_count;
@@ -376,10 +324,10 @@
   return LOAD_SUCCESS;
 }
 
-BuildLog::LogEntry* BuildLog::LookupByOutput(const string& path) {
+BuildLog::LogEntry* BuildLog::LookupByOutput(const std::string& path) {
   Entries::iterator i = entries_.find(path);
   if (i != entries_.end())
-    return i->second;
+    return i->second.get();
   return NULL;
 }
 
@@ -389,12 +337,12 @@
           entry.output.c_str(), entry.command_hash) > 0;
 }
 
-bool BuildLog::Recompact(const string& path, const BuildLogUser& user,
-                         string* err) {
+bool BuildLog::Recompact(const std::string& path, const BuildLogUser& user,
+                         std::string* err) {
   METRIC_RECORD(".ninja_log recompact");
 
   Close();
-  string temp_path = path + ".recompact";
+  std::string temp_path = path + ".recompact";
   FILE* f = fopen(temp_path.c_str(), "wb");
   if (!f) {
     *err = strerror(errno);
@@ -407,25 +355,25 @@
     return false;
   }
 
-  vector<StringPiece> dead_outputs;
-  for (Entries::iterator i = entries_.begin(); i != entries_.end(); ++i) {
-    if (user.IsPathDead(i->first)) {
-      dead_outputs.push_back(i->first);
+  std::vector<StringPiece> dead_outputs;
+  for (const auto& pair : entries_) {
+    if (user.IsPathDead(pair.first)) {
+      dead_outputs.push_back(pair.first);
       continue;
     }
 
-    if (!WriteEntry(f, *i->second)) {
+    if (!WriteEntry(f, *pair.second)) {
       *err = strerror(errno);
       fclose(f);
       return false;
     }
   }
 
-  for (size_t i = 0; i < dead_outputs.size(); ++i)
-    entries_.erase(dead_outputs[i]);
+  for (StringPiece output : dead_outputs)
+    entries_.erase(output);
 
   fclose(f);
-  if (unlink(path.c_str()) < 0) {
+  if (platformAwareUnlink(path.c_str()) < 0) {
     *err = strerror(errno);
     return false;
   }
@@ -457,24 +405,24 @@
     fclose(f);
     return false;
   }
-  for (Entries::iterator i = entries_.begin(); i != entries_.end(); ++i) {
+  for (auto& pair : entries_) {
     bool skip = output_count > 0;
     for (int j = 0; j < output_count; ++j) {
-      if (i->second->output == outputs[j]) {
+      if (pair.second->output == outputs[j]) {
         skip = false;
         break;
       }
     }
     if (!skip) {
-      const TimeStamp mtime = disk_interface.Stat(i->second->output, err);
+      const TimeStamp mtime = disk_interface.Stat(pair.second->output, err);
       if (mtime == -1) {
         fclose(f);
         return false;
       }
-      i->second->mtime = mtime;
+      pair.second->mtime = mtime;
     }
 
-    if (!WriteEntry(f, *i->second)) {
+    if (!WriteEntry(f, *pair.second)) {
       *err = strerror(errno);
       fclose(f);
       return false;
@@ -482,7 +430,7 @@
   }
 
   fclose(f);
-  if (unlink(path.str_) < 0) {
+  if (platformAwareUnlink(path.str_) < 0) {
     *err = strerror(errno);
     return false;
   }
diff --git a/src/build_log.h b/src/build_log.h
index dd72c4c..61bd8ff 100644
--- a/src/build_log.h
+++ b/src/build_log.h
@@ -15,9 +15,11 @@
 #ifndef NINJA_BUILD_LOG_H_
 #define NINJA_BUILD_LOG_H_
 
-#include <string>
 #include <stdio.h>
 
+#include <memory>
+#include <string>
+
 #include "hash_map.h"
 #include "load_status.h"
 #include "timestamp.h"
@@ -57,21 +59,21 @@
 
   struct LogEntry {
     std::string output;
-    uint64_t command_hash;
-    int start_time;
-    int end_time;
-    TimeStamp mtime;
+    uint64_t command_hash = 0;
+    int start_time = 0;
+    int end_time = 0;
+    TimeStamp mtime = 0;
 
     static uint64_t HashCommand(StringPiece command);
 
     // Used by tests.
-    bool operator==(const LogEntry& o) {
+    bool operator==(const LogEntry& o) const {
       return output == o.output && command_hash == o.command_hash &&
           start_time == o.start_time && end_time == o.end_time &&
           mtime == o.mtime;
     }
 
-    explicit LogEntry(const std::string& output);
+    explicit LogEntry(std::string output);
     LogEntry(const std::string& output, uint64_t command_hash,
              int start_time, int end_time, TimeStamp mtime);
   };
@@ -90,7 +92,7 @@
   bool Restat(StringPiece path, const DiskInterface& disk_interface,
               int output_count, char** outputs, std::string* err);
 
-  typedef ExternalStringHashMap<LogEntry*>::Type Entries;
+  typedef ExternalStringHashMap<std::unique_ptr<LogEntry>>::Type Entries;
   const Entries& entries() const { return entries_; }
 
  private:
@@ -99,9 +101,9 @@
   bool OpenForWriteIfNeeded();
 
   Entries entries_;
-  FILE* log_file_;
+  FILE* log_file_ = nullptr;
   std::string log_file_path_;
-  bool needs_recompaction_;
+  bool needs_recompaction_ = false;
 };
 
 #endif // NINJA_BUILD_LOG_H_
diff --git a/src/build_log_perftest.cc b/src/build_log_perftest.cc
index 5a93619..869112f 100644
--- a/src/build_log_perftest.cc
+++ b/src/build_log_perftest.cc
@@ -144,7 +144,7 @@
   printf("min %dms  max %dms  avg %.1fms\n",
          min, max, total / times.size());
 
-  unlink(kTestFilename);
+  platformAwareUnlink(kTestFilename);
 
   return 0;
 }
diff --git a/src/build_log_test.cc b/src/build_log_test.cc
index 12c2dc7..47de8b5 100644
--- a/src/build_log_test.cc
+++ b/src/build_log_test.cc
@@ -27,8 +27,6 @@
 #endif
 #include <cassert>
 
-using namespace std;
-
 namespace {
 
 const char kTestFilename[] = "BuildLogTest-tempfile";
@@ -36,10 +34,10 @@
 struct BuildLogTest : public StateTestWithBuiltinRules, public BuildLogUser {
   virtual void SetUp() {
     // In case a crashing test left a stale file behind.
-    unlink(kTestFilename);
+    platformAwareUnlink(kTestFilename);
   }
   virtual void TearDown() {
-    unlink(kTestFilename);
+    platformAwareUnlink(kTestFilename);
   }
   virtual bool IsPathDead(StringPiece s) const { return false; }
 };
@@ -50,7 +48,7 @@
 "build mid: cat in\n");
 
   BuildLog log1;
-  string err;
+  std::string err;
   EXPECT_TRUE(log1.OpenForWrite(kTestFilename, *this, &err));
   ASSERT_EQ("", err);
   log1.RecordCommand(state_.edges_[0], 15, 18);
@@ -77,7 +75,7 @@
   const size_t kVersionPos = strlen(kExpectedVersion) - 2;  // Points at 'X'.
 
   BuildLog log;
-  string contents, err;
+  std::string contents, err;
 
   EXPECT_TRUE(log.OpenForWrite(kTestFilename, *this, &err));
   ASSERT_EQ("", err);
@@ -104,14 +102,14 @@
 
 TEST_F(BuildLogTest, DoubleEntry) {
   FILE* f = fopen(kTestFilename, "wb");
-  fprintf(f, "# ninja log v6\n");
+  fprintf(f, "# ninja log v7\n");
   fprintf(f, "0\t1\t2\tout\t%" PRIx64 "\n",
       BuildLog::LogEntry::HashCommand("command abc"));
   fprintf(f, "0\t1\t2\tout\t%" PRIx64 "\n",
       BuildLog::LogEntry::HashCommand("command def"));
   fclose(f);
 
-  string err;
+  std::string err;
   BuildLog log;
   EXPECT_TRUE(log.Load(kTestFilename, &err));
   ASSERT_EQ("", err);
@@ -128,7 +126,7 @@
 
   {
     BuildLog log1;
-    string err;
+    std::string err;
     EXPECT_TRUE(log1.OpenForWrite(kTestFilename, *this, &err));
     ASSERT_EQ("", err);
     log1.RecordCommand(state_.edges_[0], 15, 18);
@@ -148,7 +146,7 @@
   // crash when parsing.
   for (off_t size = statbuf.st_size; size > 0; --size) {
     BuildLog log2;
-    string err;
+    std::string err;
     EXPECT_TRUE(log2.OpenForWrite(kTestFilename, *this, &err));
     ASSERT_EQ("", err);
     log2.RecordCommand(state_.edges_[0], 15, 18);
@@ -169,20 +167,20 @@
   fprintf(f, "123 456 0 out command\n");
   fclose(f);
 
-  string err;
+  std::string err;
   BuildLog log;
   EXPECT_TRUE(log.Load(kTestFilename, &err));
-  ASSERT_NE(err.find("version"), string::npos);
+  ASSERT_NE(err.find("version"), std::string::npos);
 }
 
 TEST_F(BuildLogTest, SpacesInOutput) {
   FILE* f = fopen(kTestFilename, "wb");
-  fprintf(f, "# ninja log v6\n");
+  fprintf(f, "# ninja log v7\n");
   fprintf(f, "123\t456\t456\tout with space\t%" PRIx64 "\n",
       BuildLog::LogEntry::HashCommand("command"));
   fclose(f);
 
-  string err;
+  std::string err;
   BuildLog log;
   EXPECT_TRUE(log.Load(kTestFilename, &err));
   ASSERT_EQ("", err);
@@ -200,15 +198,15 @@
   // build log on Windows. This shouldn't crash, and the second version header
   // should be ignored.
   FILE* f = fopen(kTestFilename, "wb");
-  fprintf(f, "# ninja log v6\n");
+  fprintf(f, "# ninja log v7\n");
   fprintf(f, "123\t456\t456\tout\t%" PRIx64 "\n",
       BuildLog::LogEntry::HashCommand("command"));
-  fprintf(f, "# ninja log v6\n");
+  fprintf(f, "# ninja log v7\n");
   fprintf(f, "456\t789\t789\tout2\t%" PRIx64 "\n",
       BuildLog::LogEntry::HashCommand("command2"));
   fclose(f);
 
-  string err;
+  std::string err;
   BuildLog log;
   EXPECT_TRUE(log.Load(kTestFilename, &err));
   ASSERT_EQ("", err);
@@ -229,22 +227,23 @@
 }
 
 struct TestDiskInterface : public DiskInterface {
-  virtual TimeStamp Stat(const string& path, string* err) const {
+  virtual TimeStamp Stat(const std::string& path, std::string* err) const {
     return 4;
   }
-  virtual bool WriteFile(const string& path, const string& contents) {
+  virtual bool WriteFile(const std::string& path, const std::string& contents) {
     assert(false);
     return true;
   }
-  virtual bool MakeDir(const string& path) {
+  virtual bool MakeDir(const std::string& path) {
     assert(false);
     return false;
   }
-  virtual Status ReadFile(const string& path, string* contents, string* err) {
+  virtual Status ReadFile(const std::string& path, std::string* contents,
+                          std::string* err) {
     assert(false);
     return NotFound;
   }
-  virtual int RemoveFile(const string& path) {
+  virtual int RemoveFile(const std::string& path) {
     assert(false);
     return 0;
   }
@@ -252,7 +251,7 @@
 
 TEST_F(BuildLogTest, Restat) {
   FILE* f = fopen(kTestFilename, "wb");
-  fprintf(f, "# ninja log v6\n"
+  fprintf(f, "# ninja log v7\n"
              "1\t2\t3\tout\tcommand\n");
   fclose(f);
   std::string err;
@@ -280,7 +279,7 @@
   // Ninja's build log buffer is currently 256kB. Lines longer than that are
   // silently ignored, but don't affect parsing of other lines.
   FILE* f = fopen(kTestFilename, "wb");
-  fprintf(f, "# ninja log v6\n");
+  fprintf(f, "# ninja log v7\n");
   fprintf(f, "123\t456\t456\tout\tcommand start");
   for (size_t i = 0; i < (512 << 10) / strlen(" more_command"); ++i)
     fputs(" more_command", f);
@@ -289,7 +288,7 @@
       BuildLog::LogEntry::HashCommand("command2"));
   fclose(f);
 
-  string err;
+  std::string err;
   BuildLog log;
   EXPECT_TRUE(log.Load(kTestFilename, &err));
   ASSERT_EQ("", err);
@@ -335,7 +334,7 @@
 "build out2: cat in\n");
 
   BuildLog log1;
-  string err;
+  std::string err;
   EXPECT_TRUE(log1.OpenForWrite(kTestFilename, *this, &err));
   ASSERT_EQ("", err);
   // Record the same edge several times, to trigger recompaction
diff --git a/src/build_test.cc b/src/build_test.cc
index 61b0b6a..856b036 100644
--- a/src/build_test.cc
+++ b/src/build_test.cc
@@ -20,8 +20,9 @@
 
 #include "build_log.h"
 #include "deps_log.h"
+#include "exit_status.h"
 #include "graph.h"
-#include "status.h"
+#include "status_printer.h"
 #include "test.h"
 
 using namespace std;
@@ -616,8 +617,8 @@
   command_runner_.commands_ran_.clear();
   builder.command_runner_.reset(&command_runner_);
   if (!builder.AlreadyUpToDate()) {
-    bool build_res = builder.Build(&err);
-    EXPECT_TRUE(build_res);
+    ExitStatus build_res = builder.Build(&err);
+    EXPECT_EQ(build_res, ExitSuccess);
   }
   builder.command_runner_.release();
 }
@@ -823,7 +824,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("cat1", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
 
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
@@ -837,7 +838,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("cat1", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
 
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
@@ -848,7 +849,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("cat12", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
   // Depending on how the pointers work out, we could've ran
@@ -868,7 +869,7 @@
   state_.Reset();
   EXPECT_TRUE(builder_.AddTarget("cat12", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
   ASSERT_EQ(5u, command_runner_.commands_ran_.size());
   EXPECT_EQ("cat in1 in2 > cat2", command_runner_.commands_ran_[3]);
@@ -886,7 +887,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
   EXPECT_EQ("touch out1 out2", command_runner_.commands_ran_[0]);
@@ -902,7 +903,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out.imp", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
   EXPECT_EQ("touch out out.imp", command_runner_.commands_ran_[0]);
@@ -924,7 +925,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
 }
 
@@ -940,7 +941,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("c5", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(4u, command_runner_.commands_ran_.size());
 
@@ -960,7 +961,7 @@
   EXPECT_TRUE(builder_.AddTarget("c5", &err));
   ASSERT_EQ("", err);
   EXPECT_FALSE(builder_.AlreadyUpToDate());
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ(2u, command_runner_.commands_ran_.size());  // 3->4, 4->5
 }
 
@@ -1001,7 +1002,7 @@
   EXPECT_TRUE(builder_.AddTarget("subdir/dir2/file", &err));
 
   EXPECT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
   ASSERT_EQ(2u, fs_.directories_made_.size());
   EXPECT_EQ("subdir", fs_.directories_made_[0]);
@@ -1086,7 +1087,7 @@
   EXPECT_TRUE(builder_.AddTarget("a", &err));
   ASSERT_EQ("", err);
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
   ASSERT_EQ(2u, command_runner_.commands_ran_.size());
 }
@@ -1119,7 +1120,7 @@
   ASSERT_EQ("cc foo.c", edge->EvaluateCommand());
 
   // explicit dep dirty, expect a rebuild.
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 
@@ -1134,7 +1135,7 @@
   command_runner_.commands_ran_.clear();
   state_.Reset();
   EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 
@@ -1156,7 +1157,7 @@
   command_runner_.commands_ran_.clear();
   state_.Reset();
   EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 }
@@ -1174,7 +1175,7 @@
 
   // foo.o and order-only dep dirty, build both.
   EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
   ASSERT_EQ(2u, command_runner_.commands_ran_.size());
 
@@ -1190,7 +1191,7 @@
   command_runner_.commands_ran_.clear();
   state_.Reset();
   EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
   ASSERT_EQ("cc oo.h.in", command_runner_.commands_ran_[0]);
@@ -1202,7 +1203,7 @@
   command_runner_.commands_ran_.clear();
   state_.Reset();
   EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
   ASSERT_EQ("cc oo.h.in", command_runner_.commands_ran_[0]);
@@ -1251,7 +1252,7 @@
 
   // Only one command to run, because phony runs no command.
   EXPECT_FALSE(builder_.AlreadyUpToDate());
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 }
@@ -1345,7 +1346,7 @@
   ASSERT_EQ("", err);
   EXPECT_TRUE(builder_.AddTarget("test6", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
 
   string ci;
@@ -1363,8 +1364,9 @@
     // Build number 1
     EXPECT_TRUE(builder_.AddTarget("test" + ci, &err));
     ASSERT_EQ("", err);
-    if (!builder_.AlreadyUpToDate())
-      EXPECT_TRUE(builder_.Build(&err));
+    if (!builder_.AlreadyUpToDate()) {
+      EXPECT_EQ(builder_.Build(&err), ExitSuccess);
+    }
     ASSERT_EQ("", err);
 
     // Touch the input file
@@ -1378,7 +1380,7 @@
     // Second build, expect testN edge to be rebuilt
     // and phonyN node's mtime to be updated.
     EXPECT_FALSE(builder_.AlreadyUpToDate());
-    EXPECT_TRUE(builder_.Build(&err));
+    EXPECT_EQ(builder_.Build(&err), ExitSuccess);
     ASSERT_EQ("", err);
     ASSERT_EQ(1u, command_runner_.commands_ran_.size());
     EXPECT_EQ(string("touch test") + ci, command_runner_.commands_ran_[0]);
@@ -1404,7 +1406,7 @@
     EXPECT_TRUE(builder_.AddTarget("test" + ci, &err));
     ASSERT_EQ("", err);
     EXPECT_FALSE(builder_.AlreadyUpToDate());
-    EXPECT_TRUE(builder_.Build(&err));
+    EXPECT_EQ(builder_.Build(&err), ExitSuccess);
     ASSERT_EQ("", err);
     ASSERT_EQ(1u, command_runner_.commands_ran_.size());
     EXPECT_EQ("touch test" + ci, command_runner_.commands_ran_[0]);
@@ -1414,7 +1416,7 @@
     EXPECT_TRUE(builder_.AddTarget("test" + ci, &err));
     ASSERT_EQ("", err);
     EXPECT_FALSE(builder_.AlreadyUpToDate());
-    EXPECT_TRUE(builder_.Build(&err));
+    EXPECT_EQ(builder_.Build(&err), ExitSuccess);
     ASSERT_EQ("", err);
     ASSERT_EQ(1u, command_runner_.commands_ran_.size());
     EXPECT_EQ("touch test" + ci, command_runner_.commands_ran_[0]);
@@ -1438,7 +1440,7 @@
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
   ASSERT_EQ("", err);
 
-  EXPECT_FALSE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitFailure);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
   ASSERT_EQ("subcommand failed", err);
 }
@@ -1459,7 +1461,7 @@
   EXPECT_TRUE(builder_.AddTarget("all", &err));
   ASSERT_EQ("", err);
 
-  EXPECT_FALSE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitFailure);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
   ASSERT_EQ("subcommands failed", err);
 }
@@ -1480,7 +1482,7 @@
   EXPECT_TRUE(builder_.AddTarget("final", &err));
   ASSERT_EQ("", err);
 
-  EXPECT_FALSE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitFailure);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
   ASSERT_EQ("cannot make progress due to previous errors", err);
 }
@@ -1504,7 +1506,7 @@
   EXPECT_TRUE(builder_.AddTarget("final", &err));
   ASSERT_EQ("", err);
 
-  EXPECT_FALSE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitFailure);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
   ASSERT_EQ("cannot make progress due to previous errors", err);
 }
@@ -1578,7 +1580,7 @@
   EXPECT_TRUE(builder_.AddTarget("out.imp", &err));
   EXPECT_FALSE(builder_.AlreadyUpToDate());
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_TRUE(builder_.AlreadyUpToDate());
 
   command_runner_.commands_ran_.clear();
@@ -1601,7 +1603,7 @@
   EXPECT_TRUE(builder_.AddTarget("out.imp", &err));
   EXPECT_FALSE(builder_.AlreadyUpToDate());
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_TRUE(builder_.AlreadyUpToDate());
 
   command_runner_.commands_ran_.clear();
@@ -1635,7 +1637,7 @@
   state_.Reset();
 
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_TRUE(builder_.AlreadyUpToDate());
 }
 
@@ -1651,7 +1653,7 @@
 
   // Run once successfully to get out1 in the log
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   EXPECT_EQ(1u, command_runner_.commands_ran_.size());
 
@@ -1665,7 +1667,7 @@
 
   // Run again with a failure that updates the output file timestamp
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
-  EXPECT_FALSE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitFailure);
   EXPECT_EQ("subcommand failed", err);
   EXPECT_EQ(1u, command_runner_.commands_ran_.size());
 
@@ -1679,7 +1681,7 @@
   // Run again, should rerun even though the output file is up to date on disk
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
   EXPECT_FALSE(builder_.AlreadyUpToDate());
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ(1u, command_runner_.commands_ran_.size());
   EXPECT_EQ("", err);
 }
@@ -1697,7 +1699,7 @@
 
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
   EXPECT_TRUE(builder_.AddTarget("out2", &err));
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   EXPECT_EQ(2u, command_runner_.commands_ran_.size());
 
@@ -1710,7 +1712,7 @@
 
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
   EXPECT_TRUE(builder_.AddTarget("out2", &err));
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   EXPECT_EQ(1u, command_runner_.commands_ran_.size());
 }
@@ -1741,10 +1743,10 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out3", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
-  EXPECT_EQ(3u, command_runner_.commands_ran_.size());
-  EXPECT_EQ(3u, builder_.plan_.command_edge_count());
+  EXPECT_EQ(size_t(3), command_runner_.commands_ran_.size());
+  EXPECT_EQ(3, builder_.plan_.command_edge_count());
   command_runner_.commands_ran_.clear();
   state_.Reset();
 
@@ -1755,7 +1757,7 @@
   // touch out2, we should cancel the build of out3.
   EXPECT_TRUE(builder_.AddTarget("out3", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ(2u, command_runner_.commands_ran_.size());
 
   // If we run again, it should be a no-op, because the build log has recorded
@@ -1776,7 +1778,7 @@
   state_.Reset();
   EXPECT_TRUE(builder_.AddTarget("out3", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ(2u, command_runner_.commands_ran_.size());
 }
 
@@ -1803,7 +1805,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out2", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
   command_runner_.commands_ran_.clear();
   state_.Reset();
@@ -1817,7 +1819,7 @@
   // we shouldn't run the dependent build.
   EXPECT_TRUE(builder_.AddTarget("out2", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 }
 
@@ -1839,7 +1841,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out4", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
 
@@ -1851,12 +1853,12 @@
   // out2 and out3 will be built even though "in" is not touched when built.
   // Then, since out2 is rebuilt, out4 should be rebuilt -- the restat on the
   // "true" rule should not lead to the "touch" edge writing out2 and out3 being
-  // cleard.
+  // cleared.
   command_runner_.commands_ran_.clear();
   state_.Reset();
   EXPECT_TRUE(builder_.AddTarget("out4", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
 }
@@ -1888,7 +1890,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out2", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ(2u, command_runner_.commands_ran_.size());
 
   // See that an entry in the logfile is created, capturing
@@ -1906,7 +1908,7 @@
   state_.Reset();
   EXPECT_TRUE(builder_.AddTarget("out2", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 
   // Check that the logfile entry remains correctly set
@@ -1932,10 +1934,10 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
-  EXPECT_EQ(2u, command_runner_.commands_ran_.size());
-  EXPECT_EQ(2u, builder_.plan_.command_edge_count());
+  EXPECT_EQ(size_t(2), command_runner_.commands_ran_.size());
+  EXPECT_EQ(2, builder_.plan_.command_edge_count());
   BuildLog::LogEntry* log_entry = build_log_.LookupByOutput("out1");
   ASSERT_TRUE(NULL != log_entry);
   ASSERT_EQ(2u, log_entry->mtime);
@@ -1955,10 +1957,10 @@
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
   ASSERT_EQ("", err);
   EXPECT_TRUE(!state_.GetNode("out1", 0)->dirty());
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
-  EXPECT_EQ(1u, command_runner_.commands_ran_.size());
-  EXPECT_EQ(1u, builder_.plan_.command_edge_count());
+  EXPECT_EQ(size_t(1), command_runner_.commands_ran_.size());
+  EXPECT_EQ(1, builder_.plan_.command_edge_count());
 }
 
 TEST_F(BuildWithLogTest, GeneratedPlainDepfileMtime) {
@@ -1976,7 +1978,7 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   EXPECT_FALSE(builder_.AlreadyUpToDate());
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_TRUE(builder_.AlreadyUpToDate());
 
   command_runner_.commands_ran_.clear();
@@ -2019,7 +2021,30 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out3", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
+  ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildDryRun, WithDyndep) {
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+"  command = touch $out\n"
+"rule cp\n"
+"  command = cp $in $out\n"
+"build dd: cp dd-in\n"
+"build out: touch || dd\n"
+"  dyndep = dd\n"
+"build out-copy: cp out\n"
+));
+  fs_.Create("dd-in",
+"ninja_dyndep_version = 1\n"
+"build out: dyndep\n"
+);
+
+  string err;
+  EXPECT_TRUE(builder_.AddTarget("out-copy", &err));
+  ASSERT_EQ("", err);
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
 }
 
@@ -2062,7 +2087,7 @@
   size_t files_created = fs_.files_created_.size();
   size_t files_removed = fs_.files_removed_.size();
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
 
   // The RSP files and temp file to acquire output mtimes were created
@@ -2099,7 +2124,7 @@
   size_t files_created = fs_.files_created_.size();
   size_t files_removed = fs_.files_removed_.size();
 
-  EXPECT_FALSE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitFailure);
   ASSERT_EQ("subcommand failed", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 
@@ -2137,7 +2162,7 @@
   ASSERT_EQ("", err);
 
   // 1. Build for the 1st time (-> populate log)
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 
   // 2. Build again (no change)
@@ -2160,7 +2185,7 @@
   state_.Reset();
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   EXPECT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ(1u, command_runner_.commands_ran_.size());
 }
 
@@ -2183,7 +2208,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
   EXPECT_EQ("", err);
-  EXPECT_FALSE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitInterrupted);
   EXPECT_EQ("interrupted by user", err);
   builder_.Cleanup();
   EXPECT_GT(fs_.Stat("out1", &err), 0);
@@ -2192,7 +2217,7 @@
   // A touched output of an interrupted command should be deleted.
   EXPECT_TRUE(builder_.AddTarget("out2", &err));
   EXPECT_EQ("", err);
-  EXPECT_FALSE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitInterrupted);
   EXPECT_EQ("interrupted by user", err);
   builder_.Cleanup();
   EXPECT_EQ(0, fs_.Stat("out2", &err));
@@ -2234,7 +2259,7 @@
   state_.Reset();
   EXPECT_TRUE(builder_.AddTarget("out2", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 }
@@ -2252,7 +2277,7 @@
   ASSERT_EQ("", err);
   EXPECT_FALSE(builder_.AlreadyUpToDate());
 
-  EXPECT_FALSE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitFailure);
   ASSERT_EQ("subcommand failed", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 }
@@ -2300,7 +2325,7 @@
   // path to the left of the colon.
   fs_.Create("in1.d", "AAA BBB");
 
-  EXPECT_FALSE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitFailure);
   EXPECT_EQ("subcommand failed", err);
 }
 
@@ -2340,7 +2365,7 @@
   std::string err;
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
   EXPECT_EQ("echo 'using in1' && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]);
@@ -2369,7 +2394,7 @@
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
   ASSERT_EQ("", err);
   fs_.Create("in.d", "out1 out2: in1 in2");
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
   EXPECT_EQ("echo 'out1 out2: in1 in2' > in.d && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]);
@@ -2400,7 +2425,7 @@
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
   ASSERT_EQ("", err);
   fs_.Create("in.d", "out1 out2: in1\nout1 out2: in2");
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
   EXPECT_EQ("echo 'out1 out2: in1\\nout1 out2: in2' > in.d && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]);
@@ -2431,7 +2456,7 @@
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
   ASSERT_EQ("", err);
   fs_.Create("in.d", "out1: in1 in2\nout2: in1 in2");
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
   EXPECT_EQ("echo 'out1: in1 in2\\nout2: in1 in2' > in.d && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]);
@@ -2462,7 +2487,7 @@
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
   ASSERT_EQ("", err);
   fs_.Create("in.d", "out1: in1 in2");
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
   EXPECT_EQ("echo 'out1: in1 in2' > in.d && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]);
@@ -2495,7 +2520,7 @@
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
   ASSERT_EQ("", err);
   fs_.Create("in.d", "out2: in1 in2");
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
   EXPECT_EQ("echo 'out2: in1 in2' > in.d && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]);
@@ -2563,7 +2588,7 @@
     EXPECT_TRUE(builder.AddTarget("out", &err));
     ASSERT_EQ("", err);
     fs_.Create("in1.d", "out: in2");
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     EXPECT_EQ("", err);
 
     // The deps file should have been removed.
@@ -2593,7 +2618,7 @@
     command_runner_.commands_ran_.clear();
     EXPECT_TRUE(builder.AddTarget("out", &err));
     ASSERT_EQ("", err);
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     EXPECT_EQ("", err);
 
     // We should have rebuilt the output due to in2 being
@@ -2633,7 +2658,7 @@
     builder.command_runner_.reset(&command_runner_);
     EXPECT_TRUE(builder.AddTarget("out", &err));
     ASSERT_EQ("", err);
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     EXPECT_EQ("", err);
 
     deps_log.Close();
@@ -2667,7 +2692,7 @@
     // Recreate the deps file here because the build expects them to exist.
     fs_.Create("in1.d", "out: ");
 
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     EXPECT_EQ("", err);
 
     // We should have rebuilt the output due to the deps being
@@ -2701,7 +2726,7 @@
   string err;
   EXPECT_TRUE(builder.AddTarget("out", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder.Build(&err));
+  EXPECT_EQ(builder.Build(&err), ExitSuccess);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 
   builder.command_runner_.release();
@@ -2736,7 +2761,7 @@
     // Run the build, out gets built, dep file is created
     EXPECT_TRUE(builder.AddTarget("out", &err));
     ASSERT_EQ("", err);
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 
     // See that an entry in the logfile is created. the input_mtime is 1 since that was
@@ -2760,7 +2785,7 @@
     state.Reset();
     EXPECT_TRUE(builder.AddTarget("out", &err));
     ASSERT_EQ("", err);
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 
     // Check that the logfile entry is still correct
@@ -2816,7 +2841,7 @@
     // Run the build, out gets built, dep file is created
     EXPECT_TRUE(builder.AddTarget("out", &err));
     ASSERT_EQ("", err);
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 
     // See that an entry in the logfile is created. the mtime is 1 due to the command
@@ -2838,7 +2863,7 @@
     state.Reset();
     EXPECT_TRUE(builder.AddTarget("out", &err));
     ASSERT_EQ("", err);
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 
     builder.command_runner_.release();
@@ -2873,7 +2898,7 @@
     state.Reset();
     EXPECT_TRUE(builder.AddTarget("out", &err));
     ASSERT_EQ("", err);
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 
     builder.command_runner_.release();
@@ -2889,7 +2914,7 @@
     state.Reset();
     EXPECT_TRUE(builder.AddTarget("out", &err));
     ASSERT_EQ("", err);
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 
     builder.command_runner_.release();
@@ -2929,7 +2954,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
 }
 
@@ -2961,7 +2986,7 @@
     EXPECT_TRUE(builder.AddTarget("out", &err));
     ASSERT_EQ("", err);
     fs_.Create("in1.d", "out: header.h");
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     EXPECT_EQ("", err);
 
     deps_log.Close();
@@ -2987,7 +3012,7 @@
     command_runner_.commands_ran_.clear();
     EXPECT_TRUE(builder.AddTarget("out", &err));
     ASSERT_EQ("", err);
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     EXPECT_EQ("", err);
 
     // Rule "true" should have run again, but the build of "out" should have
@@ -3020,7 +3045,7 @@
     EXPECT_TRUE(builder.AddTarget("fo o.o", &err));
     ASSERT_EQ("", err);
     fs_.Create("fo o.o.d", "fo\\ o.o: blah.h bar.h\n");
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     EXPECT_EQ("", err);
 
     deps_log.Close();
@@ -3091,7 +3116,7 @@
     EXPECT_TRUE(builder.AddTarget("out2", &err));
     EXPECT_FALSE(builder.AlreadyUpToDate());
 
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     EXPECT_TRUE(builder.AlreadyUpToDate());
 
     deps_log.Close();
@@ -3115,7 +3140,7 @@
     EXPECT_TRUE(builder.AddTarget("out2", &err));
     EXPECT_FALSE(builder.AlreadyUpToDate());
 
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     EXPECT_TRUE(builder.AlreadyUpToDate());
 
     deps_log.Close();
@@ -3168,7 +3193,7 @@
     // Note, different slashes from manifest.
     fs_.Create("a/b\\c\\d/e/fo o.o.d",
                "a\\b\\c\\d\\e\\fo\\ o.o: blah.h bar.h\n");
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     EXPECT_EQ("", err);
 
     deps_log.Close();
@@ -3260,7 +3285,7 @@
 
   // Touch 'header.in', blank dependencies log (create a different one).
   // Building header.h triggers 'restat' outputs cleanup.
-  // Validate that out is rebuilt netherless, as deps are missing.
+  // Validate that out is rebuilt nevertheless, as deps are missing.
   fs_.Tick();
   fs_.Create("header.in", "");
 
@@ -3319,7 +3344,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("cons", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 }
@@ -3360,7 +3385,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(2u, command_runner_.commands_ran_.size());
   EXPECT_EQ("touch tmp tmp.imp", command_runner_.commands_ran_[0]);
@@ -3427,7 +3452,7 @@
   EXPECT_EQ("", err);
 
   size_t files_created = fs_.files_created_.size();
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
 
   ASSERT_EQ(2u, command_runner_.commands_ran_.size());
@@ -3490,7 +3515,7 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   EXPECT_EQ("", err);
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
   EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]);
@@ -3522,7 +3547,7 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   EXPECT_EQ("", err);
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(2u, command_runner_.commands_ran_.size());
   EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]);
@@ -3624,7 +3649,7 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   EXPECT_EQ("", err);
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
   EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]);
@@ -3684,7 +3709,7 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   EXPECT_EQ("", err);
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(4u, command_runner_.commands_ran_.size());
   EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]);
@@ -3717,7 +3742,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
   EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]);
@@ -3753,7 +3778,7 @@
   // Loading the depfile did not give tmp.imp a phony input edge.
   ASSERT_FALSE(GetNode("tmp.imp")->in_edge());
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
 
   // Loading the dyndep file gave tmp.imp a real input edge.
@@ -3792,7 +3817,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
   EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]);
@@ -3823,7 +3848,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
   EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]);
@@ -3894,7 +3919,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out2", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ("", err);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
   EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]);
@@ -3910,7 +3935,7 @@
   // touch "out1", we should cancel the build of "out2".
   EXPECT_TRUE(builder_.AddTarget("out2", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
   EXPECT_EQ("true", command_runner_.commands_ran_[0]);
 }
@@ -3951,7 +3976,7 @@
   EXPECT_TRUE(builder_.AddTarget("out1", &err));
   EXPECT_TRUE(builder_.AddTarget("out2", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
   // Depending on how the pointers in Plan::ready_ work out, the first
@@ -4002,7 +4027,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out2", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
   EXPECT_EQ("cp dd1-in dd1", command_runner_.commands_ran_[0]);
@@ -4047,7 +4072,7 @@
   string err;
   EXPECT_TRUE(builder_.AddTarget("out2", &err));
   ASSERT_EQ("", err);
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
   EXPECT_EQ("cp dd1-in dd1", command_runner_.commands_ran_[0]);
@@ -4087,7 +4112,7 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   EXPECT_EQ("", err);
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(4u, command_runner_.commands_ran_.size());
   EXPECT_EQ("cp dd1-in dd1", command_runner_.commands_ran_[0]);
@@ -4127,7 +4152,7 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   EXPECT_EQ("", err);
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
   ASSERT_EQ(5u, command_runner_.commands_ran_.size());
   EXPECT_EQ("cp dd1-in dd1", command_runner_.commands_ran_[0]);
@@ -4149,7 +4174,7 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   EXPECT_EQ("", err);
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
 
   EXPECT_EQ(2u, command_runner_.commands_ran_.size());
@@ -4165,7 +4190,7 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   ASSERT_EQ("", err);
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
 
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
@@ -4182,7 +4207,7 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   ASSERT_EQ("", err);
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
 
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
@@ -4201,7 +4226,7 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   EXPECT_EQ("", err);
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
 
   EXPECT_EQ(2u, command_runner_.commands_ran_.size());
@@ -4216,7 +4241,7 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   ASSERT_EQ("", err);
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
 
   EXPECT_EQ(2u, command_runner_.commands_ran_.size());
@@ -4232,7 +4257,7 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   ASSERT_EQ("", err);
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
 
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
@@ -4269,11 +4294,11 @@
     EXPECT_TRUE(builder.AddTarget("out2", &err));
     ASSERT_EQ("", err);
 
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     EXPECT_EQ("", err);
 
     // On the first build, only the out2 command is run.
-    ASSERT_EQ(command_runner_.commands_ran_.size(), 1);
+    ASSERT_EQ(command_runner_.commands_ran_.size(), size_t(1));
     EXPECT_EQ("cat in3 > out2", command_runner_.commands_ran_[0]);
 
     // The deps file should have been removed.
@@ -4305,11 +4330,11 @@
     EXPECT_TRUE(builder.AddTarget("out2", &err));
     ASSERT_EQ("", err);
 
-    EXPECT_TRUE(builder.Build(&err));
+    EXPECT_EQ(builder.Build(&err), ExitSuccess);
     EXPECT_EQ("", err);
 
     // The out and validate actions should have been run as well as out2.
-    ASSERT_EQ(command_runner_.commands_ran_.size(), 3);
+    ASSERT_EQ(command_runner_.commands_ran_.size(), size_t(3));
     // out has to run first, as both out2 and validate depend on it.
     EXPECT_EQ("cat in > out", command_runner_.commands_ran_[0]);
 
@@ -4330,7 +4355,7 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   EXPECT_EQ("", err);
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
 
   EXPECT_EQ(2u, command_runner_.commands_ran_.size());
@@ -4345,7 +4370,7 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   ASSERT_EQ("", err);
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
 
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
@@ -4361,7 +4386,7 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   ASSERT_EQ("", err);
 
-  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_EQ(builder_.Build(&err), ExitSuccess);
   EXPECT_EQ("", err);
 
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
diff --git a/src/clean_test.cc b/src/clean_test.cc
index e99909c..39aede3 100644
--- a/src/clean_test.cc
+++ b/src/clean_test.cc
@@ -469,11 +469,11 @@
 struct CleanDeadTest : public CleanTest, public BuildLogUser{
   virtual void SetUp() {
     // In case a crashing test left a stale file behind.
-    unlink(kTestFilename);
+    platformAwareUnlink(kTestFilename);
     CleanTest::SetUp();
   }
   virtual void TearDown() {
-    unlink(kTestFilename);
+    platformAwareUnlink(kTestFilename);
   }
   virtual bool IsPathDead(StringPiece) const { return false; }
 };
diff --git a/src/command_collector.h b/src/command_collector.h
new file mode 100644
index 0000000..003af9f
--- /dev/null
+++ b/src/command_collector.h
@@ -0,0 +1,65 @@
+// Copyright 2024 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_COMMAND_COLLECTOR_H_
+#define NINJA_COMMAND_COLLECTOR_H_
+
+#include <cassert>
+#include <unordered_set>
+#include <vector>
+
+#include "graph.h"
+
+/// Collects the transitive set of edges that lead into a given set
+/// of starting nodes. Used to implement the `compdb-targets` tool.
+///
+/// When collecting inputs, the outputs of phony edges are always ignored
+/// from the result, but are followed by the dependency walk.
+///
+/// Usage is:
+/// - Create instance.
+/// - Call CollectFrom() for each root node to collect edges from.
+/// - Call TakeResult() to retrieve the list of edges.
+///
+struct CommandCollector {
+  void CollectFrom(const Node* node) {
+    assert(node);
+
+    if (!visited_nodes_.insert(node).second)
+      return;
+
+    Edge* edge = node->in_edge();
+    if (!edge || !visited_edges_.insert(edge).second)
+      return;
+
+    for (Node* input_node : edge->inputs_)
+      CollectFrom(input_node);
+
+    if (!edge->is_phony())
+      in_edges.push_back(edge);
+  }
+
+ private:
+  std::unordered_set<const Node*> visited_nodes_;
+  std::unordered_set<Edge*> visited_edges_;
+
+  /// we use a vector to preserve order from requisites to their dependents.
+  /// This may help LSP server performance in languages that support modules,
+  /// but it also ensures that the output of `-t compdb-targets foo` is
+  /// consistent, which is useful in regression tests.
+ public:
+  std::vector<Edge*> in_edges;
+};
+
+#endif  //  NINJA_COMMAND_COLLECTOR_H_
diff --git a/src/debug_flags.cc b/src/debug_flags.cc
index 44b14c4..c83abb4 100644
--- a/src/debug_flags.cc
+++ b/src/debug_flags.cc
@@ -12,6 +12,13 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+#include <stdio.h>
+#include <map>
+#include <vector>
+#include <string>
+
+#include "graph.h"
+
 bool g_explaining = false;
 
 bool g_keep_depfile = false;
diff --git a/src/debug_flags.h b/src/debug_flags.h
index e08a43b..fe73a52 100644
--- a/src/debug_flags.h
+++ b/src/debug_flags.h
@@ -17,10 +17,8 @@
 
 #include <stdio.h>
 
-#define EXPLAIN(fmt, ...) {                                             \
-  if (g_explaining)                                                     \
-    fprintf(stderr, "ninja explain: " fmt "\n", __VA_ARGS__);           \
-}
+struct Edge;
+struct Node;
 
 extern bool g_explaining;
 
diff --git a/src/depfile_parser.cc b/src/depfile_parser.cc
index 7ce7290..73ba69c 100644
--- a/src/depfile_parser.cc
+++ b/src/depfile_parser.cc
@@ -74,10 +74,10 @@
           0,   0,   0,   0,   0,   0,   0,   0, 
           0,   0,   0,   0,   0,   0,   0,   0, 
           0,   0,   0,   0,   0,   0,   0,   0, 
-          0, 128,   0,   0,   0, 128,   0,   0, 
+          0, 128, 128,   0,   0, 128, 128, 128, 
         128, 128,   0, 128, 128, 128, 128, 128, 
         128, 128, 128, 128, 128, 128, 128, 128, 
-        128, 128, 128,   0,   0, 128,   0,   0, 
+        128, 128, 128,   0,   0, 128,   0, 128, 
         128, 128, 128, 128, 128, 128, 128, 128, 
         128, 128, 128, 128, 128, 128, 128, 128, 
         128, 128, 128, 128, 128, 128, 128, 128, 
@@ -105,55 +105,55 @@
       };
       yych = *in;
       if (yybm[0+yych] & 128) {
-        goto yy9;
+        goto yy5;
       }
       if (yych <= '\r') {
         if (yych <= '\t') {
-          if (yych >= 0x01) goto yy4;
+          if (yych >= 0x01) goto yy1;
         } else {
-          if (yych <= '\n') goto yy6;
-          if (yych <= '\f') goto yy4;
-          goto yy8;
+          if (yych <= '\n') goto yy3;
+          if (yych <= '\f') goto yy1;
+          goto yy4;
         }
       } else {
         if (yych <= '$') {
-          if (yych <= '#') goto yy4;
-          goto yy12;
+          if (yych <= '#') goto yy1;
+          goto yy7;
         } else {
-          if (yych <= '?') goto yy4;
-          if (yych <= '\\') goto yy13;
-          goto yy4;
+          if (yych <= '>') goto yy1;
+          if (yych <= '\\') goto yy8;
+          goto yy1;
         }
       }
       ++in;
       {
         break;
       }
-yy4:
+yy1:
       ++in;
-yy5:
+yy2:
       {
         // For any other character (e.g. whitespace), swallow it here,
         // allowing the outer logic to loop around again.
         break;
       }
-yy6:
+yy3:
       ++in;
       {
         // A newline ends the current file name and the current rule.
         have_newline = true;
         break;
       }
-yy8:
+yy4:
       yych = *++in;
-      if (yych == '\n') goto yy6;
-      goto yy5;
-yy9:
+      if (yych == '\n') goto yy3;
+      goto yy2;
+yy5:
       yych = *++in;
       if (yybm[0+yych] & 128) {
-        goto yy9;
+        goto yy5;
       }
-yy11:
+yy6:
       {
         // Got a span of plain text.
         int len = (int)(in - start);
@@ -163,54 +163,54 @@
         out += len;
         continue;
       }
-yy12:
+yy7:
       yych = *++in;
-      if (yych == '$') goto yy14;
-      goto yy5;
-yy13:
+      if (yych == '$') goto yy9;
+      goto yy2;
+yy8:
       yych = *(yymarker = ++in);
       if (yych <= ' ') {
         if (yych <= '\n') {
-          if (yych <= 0x00) goto yy5;
-          if (yych <= '\t') goto yy16;
-          goto yy17;
+          if (yych <= 0x00) goto yy2;
+          if (yych <= '\t') goto yy10;
+          goto yy11;
         } else {
-          if (yych == '\r') goto yy19;
-          if (yych <= 0x1F) goto yy16;
-          goto yy21;
+          if (yych == '\r') goto yy12;
+          if (yych <= 0x1F) goto yy10;
+          goto yy13;
         }
       } else {
         if (yych <= '9') {
-          if (yych == '#') goto yy23;
-          goto yy16;
+          if (yych == '#') goto yy14;
+          goto yy10;
         } else {
-          if (yych <= ':') goto yy25;
-          if (yych == '\\') goto yy27;
-          goto yy16;
+          if (yych <= ':') goto yy15;
+          if (yych == '\\') goto yy17;
+          goto yy10;
         }
       }
-yy14:
+yy9:
       ++in;
       {
         // De-escape dollar character.
         *out++ = '$';
         continue;
       }
-yy16:
+yy10:
       ++in;
-      goto yy11;
-yy17:
+      goto yy6;
+yy11:
       ++in;
       {
         // A line continuation ends the current file name.
         break;
       }
-yy19:
+yy12:
       yych = *++in;
-      if (yych == '\n') goto yy17;
+      if (yych == '\n') goto yy11;
       in = yymarker;
-      goto yy5;
-yy21:
+      goto yy2;
+yy13:
       ++in;
       {
         // 2N+1 backslashes plus space -> N backslashes plus space.
@@ -222,7 +222,7 @@
         *out++ = ' ';
         continue;
       }
-yy23:
+yy14:
       ++in;
       {
         // De-escape hash sign, but preserve other leading backslashes.
@@ -233,17 +233,17 @@
         *out++ = '#';
         continue;
       }
-yy25:
+yy15:
       yych = *++in;
       if (yych <= '\f') {
-        if (yych <= 0x00) goto yy28;
-        if (yych <= 0x08) goto yy26;
-        if (yych <= '\n') goto yy28;
+        if (yych <= 0x00) goto yy18;
+        if (yych <= 0x08) goto yy16;
+        if (yych <= '\n') goto yy18;
       } else {
-        if (yych <= '\r') goto yy28;
-        if (yych == ' ') goto yy28;
+        if (yych <= '\r') goto yy18;
+        if (yych == ' ') goto yy18;
       }
-yy26:
+yy16:
       {
         // De-escape colon sign, but preserve other leading backslashes.
         // Regular expression uses lookahead to make sure that no whitespace
@@ -255,29 +255,29 @@
         *out++ = ':';
         continue;
       }
-yy27:
+yy17:
       yych = *++in;
       if (yych <= ' ') {
         if (yych <= '\n') {
-          if (yych <= 0x00) goto yy11;
-          if (yych <= '\t') goto yy16;
-          goto yy11;
+          if (yych <= 0x00) goto yy6;
+          if (yych <= '\t') goto yy10;
+          goto yy6;
         } else {
-          if (yych == '\r') goto yy11;
-          if (yych <= 0x1F) goto yy16;
-          goto yy30;
+          if (yych == '\r') goto yy6;
+          if (yych <= 0x1F) goto yy10;
+          goto yy19;
         }
       } else {
         if (yych <= '9') {
-          if (yych == '#') goto yy23;
-          goto yy16;
+          if (yych == '#') goto yy14;
+          goto yy10;
         } else {
-          if (yych <= ':') goto yy25;
-          if (yych == '\\') goto yy32;
-          goto yy16;
+          if (yych <= ':') goto yy15;
+          if (yych == '\\') goto yy20;
+          goto yy10;
         }
       }
-yy28:
+yy18:
       ++in;
       {
         // Backslash followed by : and whitespace.
@@ -291,7 +291,7 @@
           have_newline = true;
         break;
       }
-yy30:
+yy19:
       ++in;
       {
         // 2N backslashes plus space -> 2N backslashes, end of filename.
@@ -301,26 +301,26 @@
         out += len - 1;
         break;
       }
-yy32:
+yy20:
       yych = *++in;
       if (yych <= ' ') {
         if (yych <= '\n') {
-          if (yych <= 0x00) goto yy11;
-          if (yych <= '\t') goto yy16;
-          goto yy11;
+          if (yych <= 0x00) goto yy6;
+          if (yych <= '\t') goto yy10;
+          goto yy6;
         } else {
-          if (yych == '\r') goto yy11;
-          if (yych <= 0x1F) goto yy16;
-          goto yy21;
+          if (yych == '\r') goto yy6;
+          if (yych <= 0x1F) goto yy10;
+          goto yy13;
         }
       } else {
         if (yych <= '9') {
-          if (yych == '#') goto yy23;
-          goto yy16;
+          if (yych == '#') goto yy14;
+          goto yy10;
         } else {
-          if (yych <= ':') goto yy25;
-          if (yych == '\\') goto yy27;
-          goto yy16;
+          if (yych <= ':') goto yy15;
+          if (yych == '\\') goto yy17;
+          goto yy10;
         }
       }
     }
diff --git a/src/depfile_parser.in.cc b/src/depfile_parser.in.cc
index 4b5f5fe..69da366 100644
--- a/src/depfile_parser.in.cc
+++ b/src/depfile_parser.in.cc
@@ -134,7 +134,7 @@
         *out++ = '$';
         continue;
       }
-      '\\'+ [^\000\r\n] | [a-zA-Z0-9+,/_:.~()}{%=@\x5B\x5D!\x80-\xFF-]+ {
+      '\\'+ [^\000\r\n] | [a-zA-Z0-9+?"'&,/_:.~()}{%=@\x5B\x5D!\x80-\xFF-]+ {
         // Got a span of plain text.
         int len = (int)(in - start);
         // Need to shift it over if we're overwriting backslashes.
diff --git a/src/depfile_parser_test.cc b/src/depfile_parser_test.cc
index 947ae76..8d6724b 100644
--- a/src/depfile_parser_test.cc
+++ b/src/depfile_parser_test.cc
@@ -62,6 +62,30 @@
   EXPECT_EQ(2u, parser_.ins_.size());
 }
 
+TEST_F(DepfileParserTest, WindowsDrivePaths) {
+  string err;
+  EXPECT_TRUE(Parse("foo.o: //?/c:/bar.h\n", &err));
+  ASSERT_EQ("", err);
+  ASSERT_EQ(1u, parser_.outs_.size());
+  EXPECT_EQ("foo.o", parser_.outs_[0].AsString());
+  EXPECT_EQ(1u, parser_.ins_.size());
+  EXPECT_EQ("//?/c:/bar.h", parser_.ins_[0].AsString());
+}
+
+TEST_F(DepfileParserTest, AmpersandsAndQuotes) {
+  string err;
+  EXPECT_TRUE(Parse("foo&bar.o foo'bar.o foo\"bar.o: foo&bar.h foo'bar.h foo\"bar.h\n", &err));
+  ASSERT_EQ("", err);
+  ASSERT_EQ(3u, parser_.outs_.size());
+  EXPECT_EQ("foo&bar.o", parser_.outs_[0].AsString());
+  EXPECT_EQ("foo'bar.o", parser_.outs_[1].AsString());
+  EXPECT_EQ("foo\"bar.o", parser_.outs_[2].AsString());
+  EXPECT_EQ(3u, parser_.ins_.size());
+  EXPECT_EQ("foo&bar.h", parser_.ins_[0].AsString());
+  EXPECT_EQ("foo'bar.h", parser_.ins_[1].AsString());
+  EXPECT_EQ("foo\"bar.h", parser_.ins_[2].AsString());
+}
+
 TEST_F(DepfileParserTest, CarriageReturnContinuation) {
   string err;
   EXPECT_TRUE(Parse(
diff --git a/src/deps_log.cc b/src/deps_log.cc
index e32a7a9..fa59ef2 100644
--- a/src/deps_log.cc
+++ b/src/deps_log.cc
@@ -15,8 +15,9 @@
 #include "deps_log.h"
 
 #include <assert.h>
-#include <stdio.h>
 #include <errno.h>
+#include <stdint.h>
+#include <stdio.h>
 #include <string.h>
 #ifndef _WIN32
 #include <unistd.h>
@@ -34,12 +35,14 @@
 
 // The version is stored as 4 bytes after the signature and also serves as a
 // byte order mark. Signature and version combined are 16 bytes long.
-const char kFileSignature[] = "# ninjadeps\n";
-const int kCurrentVersion = 4;
+static const char kFileSignature[] = "# ninjadeps\n";
+static const size_t kFileSignatureSize = sizeof(kFileSignature) - 1u;
+
+static const int32_t kCurrentVersion = 4;
 
 // Record size is currently limited to less than the full 32 bit, due to
 // internal buffers having to have this size.
-const unsigned kMaxRecordSize = (1 << 19) - 1;
+static constexpr size_t kMaxRecordSize = (1 << 19) - 1;
 
 DepsLog::~DepsLog() {
   Close();
@@ -59,12 +62,11 @@
 
 bool DepsLog::RecordDeps(Node* node, TimeStamp mtime,
                          const vector<Node*>& nodes) {
-  return RecordDeps(node, mtime, nodes.size(),
-                    nodes.empty() ? NULL : (Node**)&nodes.front());
+  return RecordDeps(node, mtime, nodes.size(), nodes.data());
 }
 
-bool DepsLog::RecordDeps(Node* node, TimeStamp mtime,
-                         int node_count, Node** nodes) {
+bool DepsLog::RecordDeps(Node* node, TimeStamp mtime, int node_count,
+                         Node* const* nodes) {
   // Track whether there's any new data to be recorded.
   bool made_change = false;
 
@@ -160,36 +162,36 @@
     return LOAD_ERROR;
   }
 
-  bool valid_header = true;
-  int version = 0;
-  if (!fgets(buf, sizeof(buf), f) || fread(&version, 4, 1, f) < 1)
-    valid_header = false;
+  bool valid_header = fread(buf, kFileSignatureSize, 1, f) == 1 &&
+                      !memcmp(buf, kFileSignature, kFileSignatureSize);
+
+  int32_t version = 0;
+  bool valid_version =
+      fread(&version, 4, 1, f) == 1 && version == kCurrentVersion;
+
   // Note: For version differences, this should migrate to the new format.
   // But the v1 format could sometimes (rarely) end up with invalid data, so
   // don't migrate v1 to v3 to force a rebuild. (v2 only existed for a few days,
   // and there was no release with it, so pretend that it never happened.)
-  if (!valid_header || strcmp(buf, kFileSignature) != 0 ||
-      version != kCurrentVersion) {
+  if (!valid_header || !valid_version) {
     if (version == 1)
       *err = "deps log version change; rebuilding";
     else
       *err = "bad deps log signature or version; starting over";
     fclose(f);
-    unlink(path.c_str());
+    platformAwareUnlink(path.c_str());
     // Don't report this as a failure.  An empty deps log will cause
     // us to rebuild the outputs anyway.
     return LOAD_SUCCESS;
   }
 
-  long offset;
+  long offset = ftell(f);
   bool read_failed = false;
   int unique_dep_record_count = 0;
   int total_dep_record_count = 0;
   for (;;) {
-    offset = ftell(f);
-
     unsigned size;
-    if (fread(&size, 4, 1, f) < 1) {
+    if (fread(&size, sizeof(size), 1, f) < 1) {
       if (!feof(f))
         read_failed = true;
       break;
@@ -201,9 +203,13 @@
       read_failed = true;
       break;
     }
+    offset += size + sizeof(size);
 
     if (is_deps) {
-      assert(size % 4 == 0);
+      if ((size % 4) != 0) {
+        read_failed = true;
+        break;
+      }
       int* deps_data = reinterpret_cast<int*>(buf);
       int out_id = deps_data[0];
       TimeStamp mtime;
@@ -212,10 +218,18 @@
       deps_data += 3;
       int deps_count = (size / 4) - 3;
 
+      for (int i = 0; i < deps_count; ++i) {
+        int node_id = deps_data[i];
+        if (node_id >= (int)nodes_.size() || !nodes_[node_id]) {
+          read_failed = true;
+          break;
+        }
+      }
+      if (read_failed)
+        break;
+
       Deps* deps = new Deps(mtime, deps_count);
       for (int i = 0; i < deps_count; ++i) {
-        assert(deps_data[i] < (int)nodes_.size());
-        assert(nodes_[deps_data[i]]);
         deps->nodes[i] = nodes_[deps_data[i]];
       }
 
@@ -224,7 +238,10 @@
         ++unique_dep_record_count;
     } else {
       int path_size = size - 4;
-      assert(path_size > 0);  // CanonicalizePath() rejects empty paths.
+      if (path_size <= 0) {
+        read_failed = true;
+        break;
+      }
       // There can be up to 3 bytes of padding.
       if (buf[path_size - 1] == '\0') --path_size;
       if (buf[path_size - 1] == '\0') --path_size;
@@ -244,12 +261,10 @@
       unsigned checksum = *reinterpret_cast<unsigned*>(buf + size - 4);
       int expected_id = ~checksum;
       int id = nodes_.size();
-      if (id != expected_id) {
+      if (id != expected_id || node->id() >= 0) {
         read_failed = true;
         break;
       }
-
-      assert(node->id() < 0);
       node->set_id(id);
       nodes_.push_back(node);
     }
@@ -316,7 +331,7 @@
 
   // OpenForWrite() opens for append.  Make sure it's not appending to a
   // left-over file from a previous recompaction attempt that crashed somehow.
-  unlink(temp_path.c_str());
+  platformAwareUnlink(temp_path.c_str());
 
   DepsLog new_log;
   if (!new_log.OpenForWrite(temp_path, err))
@@ -348,7 +363,7 @@
   deps_.swap(new_log.deps_);
   nodes_.swap(new_log.nodes_);
 
-  if (unlink(path.c_str()) < 0) {
+  if (platformAwareUnlink(path.c_str()) < 0) {
     *err = strerror(errno);
     return false;
   }
@@ -384,6 +399,7 @@
 
 bool DepsLog::RecordId(Node* node) {
   int path_size = node->path().size();
+  assert(path_size > 0 && "Trying to record empty path Node!");
   int padding = (4 - path_size % 4) % 4;  // Pad path to 4 byte boundary.
 
   unsigned size = path_size + padding + 4;
@@ -398,7 +414,6 @@
   if (fwrite(&size, 4, 1, file_) < 1)
     return false;
   if (fwrite(node->path().data(), path_size, 1, file_) < 1) {
-    assert(!node->path().empty());
     return false;
   }
   if (padding && fwrite("\0\0", padding, 1, file_) < 1)
diff --git a/src/deps_log.h b/src/deps_log.h
index 2a1b188..cb82c25 100644
--- a/src/deps_log.h
+++ b/src/deps_log.h
@@ -72,7 +72,8 @@
   // Writing (build-time) interface.
   bool OpenForWrite(const std::string& path, std::string* err);
   bool RecordDeps(Node* node, TimeStamp mtime, const std::vector<Node*>& nodes);
-  bool RecordDeps(Node* node, TimeStamp mtime, int node_count, Node** nodes);
+  bool RecordDeps(Node* node, TimeStamp mtime, int node_count,
+                  Node* const* nodes);
   void Close();
 
   // Reading (startup-time) interface.
diff --git a/src/deps_log_test.cc b/src/deps_log_test.cc
index cb1c925..61a9dcf 100644
--- a/src/deps_log_test.cc
+++ b/src/deps_log_test.cc
@@ -19,6 +19,7 @@
 #include <unistd.h>
 #endif
 
+#include "disk_interface.h"
 #include "graph.h"
 #include "util.h"
 #include "test.h"
@@ -32,11 +33,9 @@
 struct DepsLogTest : public testing::Test {
   virtual void SetUp() {
     // In case a crashing test left a stale file behind.
-    unlink(kTestFilename);
+    platformAwareUnlink(kTestFilename);
   }
-  virtual void TearDown() {
-    unlink(kTestFilename);
-  }
+  virtual void TearDown() { platformAwareUnlink(kTestFilename); }
 };
 
 TEST_F(DepsLogTest, WriteRead) {
@@ -542,4 +541,139 @@
   EXPECT_TRUE(rev_deps == state.GetNode("out.o", 0));
 }
 
+TEST_F(DepsLogTest, MalformedDepsLog) {
+  std::string err;
+  {
+    State state;
+    DepsLog log;
+    EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
+    ASSERT_EQ("", err);
+
+    // First, create a valid log file.
+    std::vector<Node*> deps;
+    deps.push_back(state.GetNode("foo.hh", 0));
+    deps.push_back(state.GetNode("bar.hpp", 0));
+    log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
+    log.Close();
+  }
+
+  // Now read its value, validate it a little.
+  RealDiskInterface disk;
+
+  std::string original_contents;
+  ASSERT_EQ(FileReader::Okay, disk.ReadFile(kTestFilename,
+                                          &original_contents, &err));
+
+  const size_t version_offset = 12;
+  ASSERT_EQ("# ninjadeps\n", original_contents.substr(0, version_offset));
+  ASSERT_EQ('\x04', original_contents[version_offset + 0]);
+  ASSERT_EQ('\x00', original_contents[version_offset + 1]);
+  ASSERT_EQ('\x00', original_contents[version_offset + 2]);
+  ASSERT_EQ('\x00', original_contents[version_offset + 3]);
+
+  // clang-format off
+  static const uint8_t kFirstRecord[] = {
+    // size field == 0x0000000c
+    0x0c, 0x00, 0x00, 0x00,
+    // name field = 'out.o' + 3 bytes of padding.
+    'o', 'u', 't', '.', 'o', 0x00, 0x00, 0x00,
+    // checksum = ~0
+    0xff, 0xff, 0xff, 0xff,
+  };
+  // clang-format on
+  const size_t kFirstRecordLen = sizeof(kFirstRecord);
+  const size_t first_offset = version_offset + 4;
+
+#define COMPARE_RECORD(start_pos, reference, len)  \
+  ASSERT_EQ(original_contents.substr(start_pos, len), std::string(reinterpret_cast<const char*>(reference), len))
+
+  COMPARE_RECORD(first_offset, kFirstRecord, kFirstRecordLen);
+
+  const size_t second_offset = first_offset + kFirstRecordLen;
+  // clang-format off
+  static const uint8_t kSecondRecord[] = {
+    // size field == 0x0000000c
+    0x0c, 0x00, 0x00, 0x00,
+    // name field = 'foo.hh' + 2 bytes of padding.
+    'f', 'o', 'o', '.', 'h', 'h', 0x00, 0x00,
+    // checksum = ~1
+    0xfe, 0xff, 0xff, 0xff,
+  };
+  // clang-format on
+  const size_t kSecondRecordLen = sizeof(kSecondRecord);
+  COMPARE_RECORD(second_offset, kSecondRecord, kSecondRecordLen);
+
+  // Then start generating corrupted versions and trying to load them.
+  const char kBadLogFile[] = "DepsLogTest-corrupted.tempfile";
+
+  // Helper lambda to rewrite the bad log file with new content.
+  auto write_bad_log_file =
+      [&disk, &kBadLogFile](const std::string& bad_contents) -> bool {
+    (void)disk.RemoveFile(kBadLogFile);
+    return disk.WriteFile(kBadLogFile, bad_contents);
+  };
+
+  // First, corrupt the header.
+  std::string bad_contents = original_contents;
+  bad_contents[0] = '@';
+
+  ASSERT_TRUE(write_bad_log_file(bad_contents)) << strerror(errno);
+  {
+    State state;
+    DepsLog log;
+    err.clear();
+    ASSERT_EQ(LOAD_SUCCESS, log.Load(kBadLogFile, &state, &err));
+    ASSERT_EQ("bad deps log signature or version; starting over", err);
+  }
+
+  // Second, truncate the version.
+  bad_contents = original_contents.substr(0, version_offset + 3);
+  ASSERT_TRUE(write_bad_log_file(bad_contents)) << strerror(errno);
+  {
+    State state;
+    DepsLog log;
+    err.clear();
+    ASSERT_EQ(LOAD_SUCCESS, log.Load(kBadLogFile, &state, &err));
+    ASSERT_EQ("bad deps log signature or version; starting over", err);
+  }
+
+  // Truncate first record's |size| field. The loader should recover.
+  bad_contents = original_contents.substr(0, version_offset + 4 + 3);
+  ASSERT_TRUE(write_bad_log_file(bad_contents)) << strerror(errno);
+  {
+    State state;
+    DepsLog log;
+    err.clear();
+    ASSERT_EQ(LOAD_SUCCESS, log.Load(kBadLogFile, &state, &err));
+    ASSERT_EQ("", err);
+  }
+
+  // Corrupt first record |size| value.
+  bad_contents = original_contents;
+  bad_contents[first_offset + 0] = '\x55';
+  bad_contents[first_offset + 1] = '\xaa';
+  bad_contents[first_offset + 2] = '\xff';
+  bad_contents[first_offset + 3] = '\xff';
+  ASSERT_TRUE(write_bad_log_file(bad_contents)) << strerror(errno);
+  {
+    State state;
+    DepsLog log;
+    err.clear();
+    ASSERT_EQ(LOAD_SUCCESS, log.Load(kBadLogFile, &state, &err));
+    ASSERT_EQ("premature end of file; recovering", err);
+  }
+
+  // Make first record |size| less than 4.
+  bad_contents = original_contents;
+  bad_contents[first_offset] = '\x01';
+  ASSERT_TRUE(write_bad_log_file(bad_contents)) << strerror(errno);
+  {
+    State state;
+    DepsLog log;
+    err.clear();
+    ASSERT_EQ(LOAD_SUCCESS, log.Load(kBadLogFile, &state, &err));
+    ASSERT_EQ("premature end of file; recovering", err);
+  }
+}
+
 }  // anonymous namespace
diff --git a/src/disk_interface.cc b/src/disk_interface.cc
index 8102a0e..7f86e19 100644
--- a/src/disk_interface.cc
+++ b/src/disk_interface.cc
@@ -158,14 +158,14 @@
 }
 
 // RealDiskInterface -----------------------------------------------------------
-RealDiskInterface::RealDiskInterface() 
+RealDiskInterface::RealDiskInterface()
 #ifdef _WIN32
 : use_cache_(false), long_paths_enabled_(false) {
   // Probe ntdll.dll for RtlAreLongPathsEnabled, and call it if it exists.
   HINSTANCE ntdll_lib = ::GetModuleHandleW(L"ntdll");
   if (ntdll_lib) {
     typedef BOOLEAN(WINAPI FunctionType)();
-    auto* func_ptr = reinterpret_cast<FunctionType*>(
+    auto* func_ptr = FunctionCast<FunctionType*>(
         ::GetProcAddress(ntdll_lib, "RtlAreLongPathsEnabled"));
     if (func_ptr) {
       long_paths_enabled_ = (*func_ptr)();
@@ -246,7 +246,7 @@
 }
 
 bool RealDiskInterface::WriteFile(const string& path, const string& contents) {
-  FILE* fp = fopen(path.c_str(), "w");
+  FILE* fp = fopen(path.c_str(), "wb");
   if (fp == NULL) {
     Error("WriteFile(%s): Unable to create file. %s",
           path.c_str(), strerror(errno));
@@ -306,7 +306,7 @@
     SetFileAttributesA(path.c_str(), attributes & ~FILE_ATTRIBUTE_READONLY);
   }
   if (attributes & FILE_ATTRIBUTE_DIRECTORY) {
-    // remove() deletes both files and directories. On Windows we have to 
+    // remove() deletes both files and directories. On Windows we have to
     // select the correct function (DeleteFile will yield Permission Denied when
     // used on a directory)
     // This fixes the behavior of ninja -t clean in some cases
diff --git a/src/disk_interface_test.cc b/src/disk_interface_test.cc
index e8d869c..b09ed04 100644
--- a/src/disk_interface_test.cc
+++ b/src/disk_interface_test.cc
@@ -259,7 +259,7 @@
 
 struct StatTest : public StateTestWithBuiltinRules,
                   public DiskInterface {
-  StatTest() : scan_(&state_, NULL, NULL, this, NULL) {}
+  StatTest() : scan_(&state_, NULL, NULL, this, NULL, NULL) {}
 
   // DiskInterface implementation.
   virtual TimeStamp Stat(const string& path, string* err) const;
diff --git a/src/dyndep.cc b/src/dyndep.cc
index a0d699d..15e6984 100644
--- a/src/dyndep.cc
+++ b/src/dyndep.cc
@@ -20,6 +20,7 @@
 #include "debug_flags.h"
 #include "disk_interface.h"
 #include "dyndep_parser.h"
+#include "explanations.h"
 #include "graph.h"
 #include "state.h"
 #include "util.h"
@@ -37,15 +38,14 @@
   node->set_dyndep_pending(false);
 
   // Load the dyndep information from the file.
-  EXPLAIN("loading dyndep file '%s'", node->path().c_str());
+  explanations_.Record(node, "loading dyndep file '%s'", node->path().c_str());
+
   if (!LoadDyndepFile(node, ddf, err))
     return false;
 
   // Update each edge that specified this node as its dyndep binding.
   std::vector<Edge*> const& out_edges = node->out_edges();
-  for (std::vector<Edge*>::const_iterator oe = out_edges.begin();
-       oe != out_edges.end(); ++oe) {
-    Edge* const edge = *oe;
+  for (Edge* edge : out_edges) {
     if (edge->dyndep_ != node)
       continue;
 
@@ -65,10 +65,9 @@
   }
 
   // Reject extra outputs in dyndep file.
-  for (DyndepFile::const_iterator oe = ddf->begin(); oe != ddf->end();
-       ++oe) {
-    if (!oe->second.used_) {
-      Edge* const edge = oe->first;
+  for (const auto& dyndep_output : *ddf) {
+    if (!dyndep_output.second.used_) {
+      Edge* const edge = dyndep_output.first;
       *err = ("dyndep file '" + node->path() + "' mentions output "
               "'" + edge->outputs_[0]->path() + "' whose build statement "
               "does not have a dyndep binding for the file");
@@ -94,15 +93,13 @@
   edge->implicit_outs_ += dyndeps->implicit_outputs_.size();
 
   // Add this edge as incoming to each new output.
-  for (std::vector<Node*>::const_iterator i =
-           dyndeps->implicit_outputs_.begin();
-       i != dyndeps->implicit_outputs_.end(); ++i) {
-    if ((*i)->in_edge()) {
+  for (Node* node : dyndeps->implicit_outputs_) {
+    if (node->in_edge()) {
       // This node already has an edge producing it.
-      *err = "multiple rules generate " + (*i)->path();
+      *err = "multiple rules generate " + node->path();
       return false;
     }
-    (*i)->set_in_edge(edge);
+    node->set_in_edge(edge);
   }
 
   // Add the dyndep-discovered inputs to the edge.
@@ -112,10 +109,8 @@
   edge->implicit_deps_ += dyndeps->implicit_inputs_.size();
 
   // Add this edge as outgoing from each new input.
-  for (std::vector<Node*>::const_iterator i =
-           dyndeps->implicit_inputs_.begin();
-       i != dyndeps->implicit_inputs_.end(); ++i)
-    (*i)->AddOutEdge(edge);
+  for (Node* node : dyndeps->implicit_inputs_)
+    node->AddOutEdge(edge);
 
   return true;
 }
diff --git a/src/dyndep.h b/src/dyndep.h
index 907f921..5999800 100644
--- a/src/dyndep.h
+++ b/src/dyndep.h
@@ -19,6 +19,8 @@
 #include <string>
 #include <vector>
 
+#include "explanations.h"
+
 struct DiskInterface;
 struct Edge;
 struct Node;
@@ -42,8 +44,10 @@
 /// DyndepLoader loads dynamically discovered dependencies, as
 /// referenced via the "dyndep" attribute in build files.
 struct DyndepLoader {
-  DyndepLoader(State* state, DiskInterface* disk_interface)
-      : state_(state), disk_interface_(disk_interface) {}
+  DyndepLoader(State* state, DiskInterface* disk_interface,
+               Explanations* explanations = nullptr)
+      : state_(state), disk_interface_(disk_interface),
+        explanations_(explanations) {}
 
   /// Load a dyndep file from the given node's path and update the
   /// build graph with the new information.  One overload accepts
@@ -59,6 +63,7 @@
 
   State* state_;
   DiskInterface* disk_interface_;
+  mutable OptionalExplanations explanations_;
 };
 
 #endif  // NINJA_DYNDEP_LOADER_H_
diff --git a/src/dyndep_parser.cc b/src/dyndep_parser.cc
index 1b4dddd..f35f48d 100644
--- a/src/dyndep_parser.cc
+++ b/src/dyndep_parser.cc
@@ -88,7 +88,6 @@
   if (major != 1 || minor != 0) {
     return lexer_.Error(
       string("unsupported 'ninja_dyndep_version = ") + version + "'", err);
-    return false;
   }
   return true;
 }
@@ -96,11 +95,7 @@
 bool DyndepParser::ParseLet(string* key, EvalString* value, string* err) {
   if (!lexer_.ReadIdent(key))
     return lexer_.Error("expected variable name", err);
-  if (!ExpectToken(Lexer::EQUALS, err))
-    return false;
-  if (!lexer_.ReadVarValue(value, err))
-    return false;
-  return true;
+  return (ExpectToken(Lexer::EQUALS, err) && lexer_.ReadVarValue(value, err));
 }
 
 bool DyndepParser::ParseEdge(string* err) {
@@ -200,8 +195,8 @@
   }
 
   dyndeps->implicit_inputs_.reserve(ins.size());
-  for (vector<EvalString>::iterator i = ins.begin(); i != ins.end(); ++i) {
-    string path = i->Evaluate(&env_);
+  for (const EvalString& in : ins) {
+    string path = in.Evaluate(&env_);
     if (path.empty())
       return lexer_.Error("empty path", err);
     uint64_t slash_bits;
@@ -211,11 +206,10 @@
   }
 
   dyndeps->implicit_outputs_.reserve(outs.size());
-  for (vector<EvalString>::iterator i = outs.begin(); i != outs.end(); ++i) {
-    string path = i->Evaluate(&env_);
+  for (const EvalString& out : outs) {
+    string path = out.Evaluate(&env_);
     if (path.empty())
       return lexer_.Error("empty path", err);
-    string path_err;
     uint64_t slash_bits;
     CanonicalizePath(&path, &slash_bits);
     Node* n = state_->GetNode(path, slash_bits);
diff --git a/src/elide_middle.cc b/src/elide_middle.cc
new file mode 100644
index 0000000..cf17da5
--- /dev/null
+++ b/src/elide_middle.cc
@@ -0,0 +1,276 @@
+// Copyright 2024 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "elide_middle.h"
+
+#include <assert.h>
+#include <string.h>
+
+// Convenience class used to iterate over the ANSI color sequences
+// of an input string. Note that this ignores non-color related
+// ANSI sequences. Usage is:
+//
+//  - Create instance, passing the input string to the constructor.
+//  - Loop over each sequence with:
+//
+//        AnsiColorSequenceIterator iter;
+//        while (iter.HasSequence()) {
+//          .. use iter.SequenceStart() and iter.SequenceEnd()
+//          iter.NextSequence();
+//        }
+//
+struct AnsiColorSequenceIterator {
+  // Constructor takes input string .
+  AnsiColorSequenceIterator(const std::string& input)
+      : input_(input.data()), input_end_(input_ + input.size()) {
+    FindNextSequenceFrom(input_);
+  }
+
+  // Return true if an ANSI sequence was found.
+  bool HasSequence() const { return cur_end_ != 0; }
+
+  // Start of the current sequence.
+  size_t SequenceStart() const { return cur_start_; }
+
+  // End of the current sequence (index of the first character
+  // following the sequence).
+  size_t SequenceEnd() const { return cur_end_; }
+
+  // Size of the current sequence in characters.
+  size_t SequenceSize() const { return cur_end_ - cur_start_; }
+
+  // Returns true if |input_index| belongs to the current sequence.
+  bool SequenceContains(size_t input_index) const {
+    return (input_index >= cur_start_ && input_index < cur_end_);
+  }
+
+  // Find the next sequence, if any, from the input.
+  // Returns false is there is no more sequence.
+  bool NextSequence() {
+    if (FindNextSequenceFrom(input_ + cur_end_))
+      return true;
+
+    cur_start_ = 0;
+    cur_end_ = 0;
+    return false;
+  }
+
+  // Reset iterator to start of input.
+  void Reset() {
+    cur_start_ = cur_end_ = 0;
+    FindNextSequenceFrom(input_);
+  }
+
+ private:
+  // Find the next sequence from the input, |from| being the starting position
+  // for the search, and must be in the [input_, input_end_] interval. On
+  // success, returns true after setting cur_start_ and cur_end_, on failure,
+  // return false.
+  bool FindNextSequenceFrom(const char* from) {
+    assert(from >= input_ && from <= input_end_);
+    auto* seq =
+        static_cast<const char*>(::memchr(from, '\x1b', input_end_ - from));
+    if (!seq)
+      return false;
+
+    // The smallest possible color sequence if '\x1c[0m` and has four
+    // characters.
+    if (seq + 4 > input_end_)
+      return false;
+
+    if (seq[1] != '[')
+      return FindNextSequenceFrom(seq + 1);
+
+    // Skip parameters (digits + ; separator)
+    auto is_parameter_char = [](char ch) -> bool {
+      return (ch >= '0' && ch <= '9') || ch == ';';
+    };
+
+    const char* end = seq + 2;
+    while (is_parameter_char(end[0])) {
+      if (++end == input_end_)
+        return false;  // Incomplete sequence (no command).
+    }
+
+    if (*end++ != 'm') {
+      // Not a color sequence. Restart the search after the first
+      // character following the [, in case this was a 3-char ANSI
+      // sequence (which is ignored here).
+      return FindNextSequenceFrom(seq + 3);
+    }
+
+    // Found it!
+    cur_start_ = seq - input_;
+    cur_end_ = end - input_;
+    return true;
+  }
+
+  size_t cur_start_ = 0;
+  size_t cur_end_ = 0;
+  const char* input_;
+  const char* input_end_;
+};
+
+// A class used to iterate over all characters of an input string,
+// and return its visible position in the terminal, and whether that
+// specific character is visible (or otherwise part of an ANSI color sequence).
+//
+// Example sequence and iterations, where 'ANSI' represents an ANSI Color
+// sequence, and | is used to express concatenation
+//
+//   |abcd|ANSI|efgh|ANSI|ijk|      input string
+//
+//                11 1111 111
+//    0123 4567 8901 2345 678       input indices
+//
+//                          1
+//    0123 4444 4567 8888 890       visible positions
+//
+//    TTTT FFFF TTTT FFFF TTT       is_visible
+//
+// Usage is:
+//
+//     VisibleInputCharsIterator iter(input);
+//     while (iter.HasChar()) {
+//       ... use iter.InputIndex() to get input index of current char.
+//       ... use iter.VisiblePosition() to get its visible position.
+//       ... use iter.IsVisible() to check whether the current char is visible.
+//
+//       NextChar();
+//     }
+//
+struct VisibleInputCharsIterator {
+  VisibleInputCharsIterator(const std::string& input)
+      : input_size_(input.size()), ansi_iter_(input) {}
+
+  // Return true if there is a character in the sequence.
+  bool HasChar() const { return input_index_ < input_size_; }
+
+  // Return current input index.
+  size_t InputIndex() const { return input_index_; }
+
+  // Return current visible position.
+  size_t VisiblePosition() const { return visible_pos_; }
+
+  // Return true if the current input character is visible
+  // (i.e. not part of an ANSI color sequence).
+  bool IsVisible() const { return !ansi_iter_.SequenceContains(input_index_); }
+
+  // Find next character from the input.
+  void NextChar() {
+    visible_pos_ += IsVisible();
+    if (++input_index_ == ansi_iter_.SequenceEnd()) {
+      ansi_iter_.NextSequence();
+    }
+  }
+
+ private:
+  size_t input_size_;
+  size_t input_index_ = 0;
+  size_t visible_pos_ = 0;
+  AnsiColorSequenceIterator ansi_iter_;
+};
+
+void ElideMiddleInPlace(std::string& str, size_t max_width) {
+  if (str.size() <= max_width) {
+    return;
+  }
+  // Look for an ESC character. If there is none, use a fast path
+  // that avoids any intermediate allocations.
+  if (str.find('\x1b') == std::string::npos) {
+    const int ellipsis_width = 3;  // Space for "...".
+
+    // If max width is too small, do not keep anything from the input.
+    if (max_width <= ellipsis_width) {
+      str.assign("...", max_width);
+      return;
+    }
+
+    // Keep only |max_width - ellipsis_size| visible characters from the input
+    // which will be split into two spans separated by "...".
+    const size_t remaining_size = max_width - ellipsis_width;
+    const size_t left_span_size = remaining_size / 2;
+    const size_t right_span_size = remaining_size - left_span_size;
+
+    // Replace the gap in the input between the spans with "..."
+    const size_t gap_start = left_span_size;
+    const size_t gap_end = str.size() - right_span_size;
+    str.replace(gap_start, gap_end - gap_start, "...");
+    return;
+  }
+
+  // Compute visible width.
+  size_t visible_width = str.size();
+  for (AnsiColorSequenceIterator ansi(str); ansi.HasSequence();
+       ansi.NextSequence()) {
+    visible_width -= ansi.SequenceSize();
+  }
+
+  if (visible_width <= max_width)
+    return;
+
+  // Compute the widths of the ellipsis, left span and right span
+  // visible space.
+  const size_t ellipsis_width = max_width < 3 ? max_width : 3;
+  const size_t visible_left_span_size = (max_width - ellipsis_width) / 2;
+  const size_t visible_right_span_size =
+      (max_width - ellipsis_width) - visible_left_span_size;
+
+  // Compute the gap of visible characters that will be replaced by
+  // the ellipsis in visible space.
+  const size_t visible_gap_start = visible_left_span_size;
+  const size_t visible_gap_end = visible_width - visible_right_span_size;
+
+  std::string result;
+  result.reserve(str.size());
+
+  // Parse the input chars info to:
+  //
+  // 1) Append any characters belonging to the left span (visible or not).
+  //
+  // 2) Add the ellipsis ("..." truncated to ellipsis_width).
+  //    Note that its color is inherited from the left span chars
+  //    which will never end with an ANSI sequence.
+  //
+  // 3) Append any ANSI sequence that appears inside the gap. This
+  //    ensures the characters after the ellipsis appear with
+  //    the right color,
+  //
+  // 4) Append any remaining characters (visible or not) to the result.
+  //
+  VisibleInputCharsIterator iter(str);
+
+  // Step 1 - determine left span length in input chars.
+  for (; iter.HasChar(); iter.NextChar()) {
+    if (iter.VisiblePosition() == visible_gap_start)
+      break;
+  }
+  result.append(str.begin(), str.begin() + iter.InputIndex());
+
+  // Step 2 - Append the possibly-truncated ellipsis.
+  result.append("...", ellipsis_width);
+
+  // Step 3 - Append elided ANSI sequences to the result.
+  for (; iter.HasChar(); iter.NextChar()) {
+    if (iter.VisiblePosition() == visible_gap_end)
+      break;
+    if (!iter.IsVisible())
+      result.push_back(str[iter.InputIndex()]);
+  }
+
+  // Step 4 - Append anything else.
+  result.append(str.begin() + iter.InputIndex(), str.end());
+
+  str = std::move(result);
+}
diff --git a/src/elide_middle.h b/src/elide_middle.h
new file mode 100644
index 0000000..128a997
--- /dev/null
+++ b/src/elide_middle.h
@@ -0,0 +1,27 @@
+// Copyright 2024 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_ELIDE_MIDDLE_H_
+#define NINJA_ELIDE_MIDDLE_H_
+
+#include <cstddef>
+#include <string>
+
+/// Elide the given string @a str with '...' in the middle if the length
+/// exceeds @a max_width. Note that this handles ANSI color sequences
+/// properly (non-color related sequences are ignored, but using them
+/// would wreak the cursor position or terminal state anyway).
+void ElideMiddleInPlace(std::string& str, size_t max_width);
+
+#endif  // NINJA_ELIDE_MIDDLE_H_
diff --git a/src/elide_middle_perftest.cc b/src/elide_middle_perftest.cc
new file mode 100644
index 0000000..94a8ccb
--- /dev/null
+++ b/src/elide_middle_perftest.cc
@@ -0,0 +1,72 @@
+// Copyright 2024 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <vector>
+
+#include "elide_middle.h"
+#include "metrics.h"
+
+static const char* kTestInputs[] = {
+  "01234567890123456789",
+  "012345\x1B[0;35m67890123456789",
+  "abcd\x1b[1;31mefg\x1b[0mhlkmnopqrstuvwxyz",
+};
+
+int main() {
+  std::vector<int> times;
+
+  int64_t kMaxTimeMillis = 5 * 1000;
+  int64_t base_time = GetTimeMillis();
+
+  const int kRuns = 100;
+  for (int j = 0; j < kRuns; ++j) {
+    int64_t start = GetTimeMillis();
+    if (start >= base_time + kMaxTimeMillis)
+      break;
+
+    const int kNumRepetitions = 2000;
+    for (int count = kNumRepetitions; count > 0; --count) {
+      for (const char* input : kTestInputs) {
+        size_t input_len = ::strlen(input);
+        for (size_t max_width = input_len; max_width > 0; --max_width) {
+          std::string str(input, input_len);
+          ElideMiddleInPlace(str, max_width);
+        }
+      }
+    }
+
+    int delta = (int)(GetTimeMillis() - start);
+    times.push_back(delta);
+  }
+
+  int min = times[0];
+  int max = times[0];
+  float total = 0;
+  for (size_t i = 0; i < times.size(); ++i) {
+    total += times[i];
+    if (times[i] < min)
+      min = times[i];
+    else if (times[i] > max)
+      max = times[i];
+  }
+
+  printf("min %dms  max %dms  avg %.1fms\n", min, max, total / times.size());
+
+  return 0;
+}
diff --git a/src/elide_middle_test.cc b/src/elide_middle_test.cc
new file mode 100644
index 0000000..ed80e4e
--- /dev/null
+++ b/src/elide_middle_test.cc
@@ -0,0 +1,101 @@
+// Copyright 2024 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "elide_middle.h"
+
+#include "test.h"
+
+namespace {
+
+std::string ElideMiddle(const std::string& str, size_t width) {
+  std::string result = str;
+  ElideMiddleInPlace(result, width);
+  return result;
+}
+
+}  // namespace
+
+
+TEST(ElideMiddle, NothingToElide) {
+  std::string input = "Nothing to elide in this short string.";
+  EXPECT_EQ(input, ElideMiddle(input, 80));
+  EXPECT_EQ(input, ElideMiddle(input, 38));
+  EXPECT_EQ("", ElideMiddle(input, 0));
+  EXPECT_EQ(".", ElideMiddle(input, 1));
+  EXPECT_EQ("..", ElideMiddle(input, 2));
+  EXPECT_EQ("...", ElideMiddle(input, 3));
+}
+
+TEST(ElideMiddle, ElideInTheMiddle) {
+  std::string input = "01234567890123456789";
+  EXPECT_EQ("...9", ElideMiddle(input, 4));
+  EXPECT_EQ("0...9", ElideMiddle(input, 5));
+  EXPECT_EQ("012...789", ElideMiddle(input, 9));
+  EXPECT_EQ("012...6789", ElideMiddle(input, 10));
+  EXPECT_EQ("0123...6789", ElideMiddle(input, 11));
+  EXPECT_EQ("01234567...23456789", ElideMiddle(input, 19));
+  EXPECT_EQ("01234567890123456789", ElideMiddle(input, 20));
+}
+
+// A few ANSI escape sequences. These macros make the following
+// test easier to read and understand.
+#define MAGENTA "\x1B[0;35m"
+#define NOTHING "\33[m"
+#define RED "\x1b[1;31m"
+#define RESET "\x1b[0m"
+
+TEST(ElideMiddle, ElideAnsiEscapeCodes) {
+  std::string input = "012345" MAGENTA "67890123456789";
+  EXPECT_EQ("012..." MAGENTA "6789", ElideMiddle(input, 10));
+  EXPECT_EQ("012345" MAGENTA "67...23456789", ElideMiddle(input, 19));
+
+  EXPECT_EQ("Nothing " NOTHING " string.",
+            ElideMiddle("Nothing " NOTHING " string.", 18));
+  EXPECT_EQ("0" NOTHING "12...6789",
+            ElideMiddle("0" NOTHING "1234567890123456789", 10));
+
+  input = "abcd" RED "efg" RESET "hlkmnopqrstuvwxyz";
+  EXPECT_EQ("" RED RESET, ElideMiddle(input, 0));
+  EXPECT_EQ("." RED RESET, ElideMiddle(input, 1));
+  EXPECT_EQ(".." RED RESET, ElideMiddle(input, 2));
+  EXPECT_EQ("..." RED RESET, ElideMiddle(input, 3));
+  EXPECT_EQ("..." RED RESET "z", ElideMiddle(input, 4));
+  EXPECT_EQ("a..." RED RESET "z", ElideMiddle(input, 5));
+  EXPECT_EQ("a..." RED RESET "yz", ElideMiddle(input, 6));
+  EXPECT_EQ("ab..." RED RESET "yz", ElideMiddle(input, 7));
+  EXPECT_EQ("ab..." RED RESET "xyz", ElideMiddle(input, 8));
+  EXPECT_EQ("abc..." RED RESET "xyz", ElideMiddle(input, 9));
+  EXPECT_EQ("abc..." RED RESET "wxyz", ElideMiddle(input, 10));
+  EXPECT_EQ("abcd..." RED RESET "wxyz", ElideMiddle(input, 11));
+  EXPECT_EQ("abcd..." RED RESET "vwxyz", ElideMiddle(input, 12));
+
+  EXPECT_EQ("abcd" RED "ef..." RESET "uvwxyz", ElideMiddle(input, 15));
+  EXPECT_EQ("abcd" RED "ef..." RESET "tuvwxyz", ElideMiddle(input, 16));
+  EXPECT_EQ("abcd" RED "efg..." RESET "tuvwxyz", ElideMiddle(input, 17));
+  EXPECT_EQ("abcd" RED "efg..." RESET "stuvwxyz", ElideMiddle(input, 18));
+  EXPECT_EQ("abcd" RED "efg" RESET "h...stuvwxyz", ElideMiddle(input, 19));
+
+  input = "abcdef" RED "A" RESET "BC";
+  EXPECT_EQ("..." RED RESET "C", ElideMiddle(input, 4));
+  EXPECT_EQ("a..." RED RESET "C", ElideMiddle(input, 5));
+  EXPECT_EQ("a..." RED RESET "BC", ElideMiddle(input, 6));
+  EXPECT_EQ("ab..." RED RESET "BC", ElideMiddle(input, 7));
+  EXPECT_EQ("ab..." RED "A" RESET "BC", ElideMiddle(input, 8));
+  EXPECT_EQ("abcdef" RED "A" RESET "BC", ElideMiddle(input, 9));
+}
+
+#undef RESET
+#undef RED
+#undef NOTHING
+#undef MAGENTA
diff --git a/src/eval_env.cc b/src/eval_env.cc
index 796a326..9f6a5dd 100644
--- a/src/eval_env.cc
+++ b/src/eval_env.cc
@@ -31,22 +31,22 @@
   bindings_[key] = val;
 }
 
-void BindingEnv::AddRule(const Rule* rule) {
+void BindingEnv::AddRule(std::unique_ptr<const Rule> rule) {
   assert(LookupRuleCurrentScope(rule->name()) == NULL);
-  rules_[rule->name()] = rule;
+  rules_[rule->name()] = std::move(rule);
 }
 
 const Rule* BindingEnv::LookupRuleCurrentScope(const string& rule_name) {
-  map<string, const Rule*>::iterator i = rules_.find(rule_name);
+  auto i = rules_.find(rule_name);
   if (i == rules_.end())
     return NULL;
-  return i->second;
+  return i->second.get();
 }
 
 const Rule* BindingEnv::LookupRule(const string& rule_name) {
-  map<string, const Rule*>::iterator i = rules_.find(rule_name);
+  auto i = rules_.find(rule_name);
   if (i != rules_.end())
-    return i->second;
+    return i->second.get();
   if (parent_)
     return parent_->LookupRule(rule_name);
   return NULL;
@@ -63,6 +63,16 @@
   return &i->second;
 }
 
+std::unique_ptr<Rule> Rule::Phony() {
+  auto rule = std::unique_ptr<Rule>(new Rule("phony"));
+  rule->phony_ = true;
+  return rule;
+}
+
+bool Rule::IsPhony() const {
+  return phony_;
+}
+
 // static
 bool Rule::IsReservedBinding(const string& var) {
   return var == "command" ||
@@ -78,7 +88,7 @@
       var == "msvc_deps_prefix";
 }
 
-const map<string, const Rule*>& BindingEnv::GetRules() const {
+const map<string, std::unique_ptr<const Rule>>& BindingEnv::GetRules() const {
   return rules_;
 }
 
@@ -99,6 +109,10 @@
 }
 
 string EvalString::Evaluate(Env* env) const {
+  if (parsed_.empty()) {
+    return single_token_;
+  }
+
   string result;
   for (TokenList::const_iterator i = parsed_.begin(); i != parsed_.end(); ++i) {
     if (i->second == RAW)
@@ -110,40 +124,57 @@
 }
 
 void EvalString::AddText(StringPiece text) {
-  // Add it to the end of an existing RAW token if possible.
-  if (!parsed_.empty() && parsed_.back().second == RAW) {
-    parsed_.back().first.append(text.str_, text.len_);
+  if (parsed_.empty()) {
+    single_token_.append(text.begin(), text.end());
+  } else if (!parsed_.empty() && parsed_.back().second == RAW) {
+    parsed_.back().first.append(text.begin(), text.end());
   } else {
-    parsed_.push_back(make_pair(text.AsString(), RAW));
+    parsed_.push_back(std::make_pair(text.AsString(), RAW));
   }
 }
+
 void EvalString::AddSpecial(StringPiece text) {
-  parsed_.push_back(make_pair(text.AsString(), SPECIAL));
+  if (parsed_.empty() && !single_token_.empty()) {
+    // Going from one to two tokens, so we can no longer apply
+    // our single_token_ optimization and need to push everything
+    // onto the vector.
+    parsed_.push_back(std::make_pair(std::move(single_token_), RAW));
+  }
+  parsed_.push_back(std::make_pair(text.AsString(), SPECIAL));
 }
 
 string EvalString::Serialize() const {
   string result;
-  for (TokenList::const_iterator i = parsed_.begin();
-       i != parsed_.end(); ++i) {
+  if (parsed_.empty() && !single_token_.empty()) {
     result.append("[");
-    if (i->second == SPECIAL)
-      result.append("$");
-    result.append(i->first);
+    result.append(single_token_);
     result.append("]");
+  } else {
+    for (const auto& pair : parsed_) {
+      result.append("[");
+      if (pair.second == SPECIAL)
+        result.append("$");
+      result.append(pair.first.begin(), pair.first.end());
+      result.append("]");
+    }
   }
   return result;
 }
 
 string EvalString::Unparse() const {
   string result;
-  for (TokenList::const_iterator i = parsed_.begin();
-       i != parsed_.end(); ++i) {
-    bool special = (i->second == SPECIAL);
-    if (special)
-      result.append("${");
-    result.append(i->first);
-    if (special)
-      result.append("}");
+  if (parsed_.empty() && !single_token_.empty()) {
+    result.append(single_token_.begin(), single_token_.end());
+  } else {
+    for (TokenList::const_iterator i = parsed_.begin();
+         i != parsed_.end(); ++i) {
+      bool special = (i->second == SPECIAL);
+      if (special)
+        result.append("${");
+      result.append(i->first.begin(), i->first.end());
+      if (special)
+        result.append("}");
+    }
   }
   return result;
 }
diff --git a/src/eval_env.h b/src/eval_env.h
index 677dc21..84c87fc 100644
--- a/src/eval_env.h
+++ b/src/eval_env.h
@@ -16,6 +16,7 @@
 #define NINJA_EVAL_ENV_H_
 
 #include <map>
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -39,8 +40,8 @@
   /// @return The string with variables not expanded.
   std::string Unparse() const;
 
-  void Clear() { parsed_.clear(); }
-  bool empty() const { return parsed_.empty(); }
+  void Clear() { parsed_.clear(); single_token_.clear(); }
+  bool empty() const { return parsed_.empty() && single_token_.empty(); }
 
   void AddText(StringPiece text);
   void AddSpecial(StringPiece text);
@@ -53,12 +54,22 @@
   enum TokenType { RAW, SPECIAL };
   typedef std::vector<std::pair<std::string, TokenType> > TokenList;
   TokenList parsed_;
+
+  // If we hold only a single RAW token, then we keep it here instead of
+  // pushing it on TokenList. This saves a bunch of allocations for
+  // what is a common case. If parsed_ is nonempty, then this value
+  // must be ignored.
+  std::string single_token_;
 };
 
 /// An invocable build command and associated metadata (description, etc.).
 struct Rule {
   explicit Rule(const std::string& name) : name_(name) {}
 
+  static std::unique_ptr<Rule> Phony();
+
+  bool IsPhony() const;
+
   const std::string& name() const { return name_; }
 
   void AddBinding(const std::string& key, const EvalString& val);
@@ -74,6 +85,7 @@
   std::string name_;
   typedef std::map<std::string, EvalString> Bindings;
   Bindings bindings_;
+  bool phony_ = false;
 };
 
 /// An Env which contains a mapping of variables to values
@@ -85,10 +97,10 @@
   virtual ~BindingEnv() {}
   virtual std::string LookupVariable(const std::string& var);
 
-  void AddRule(const Rule* rule);
+  void AddRule(std::unique_ptr<const Rule> rule);
   const Rule* LookupRule(const std::string& rule_name);
   const Rule* LookupRuleCurrentScope(const std::string& rule_name);
-  const std::map<std::string, const Rule*>& GetRules() const;
+  const std::map<std::string, std::unique_ptr<const Rule>>& GetRules() const;
 
   void AddBinding(const std::string& key, const std::string& val);
 
@@ -102,7 +114,7 @@
 
 private:
   std::map<std::string, std::string> bindings_;
-  std::map<std::string, const Rule*> rules_;
+  std::map<std::string, std::unique_ptr<const Rule>> rules_;
   BindingEnv* parent_;
 };
 
diff --git a/src/exit_status.h b/src/exit_status.h
index a714ece..73f9470 100644
--- a/src/exit_status.h
+++ b/src/exit_status.h
@@ -15,10 +15,19 @@
 #ifndef NINJA_EXIT_STATUS_H_
 #define NINJA_EXIT_STATUS_H_
 
-enum ExitStatus {
-  ExitSuccess,
+// The underlying type of the ExitStatus enum, used to represent a platform-specific
+// process exit code.
+#ifdef _WIN32
+#define EXIT_STATUS_TYPE unsigned long
+#else  // !_WIN32
+#define EXIT_STATUS_TYPE int
+#endif  // !_WIN32
+
+
+enum ExitStatus : EXIT_STATUS_TYPE {
+  ExitSuccess=0,
   ExitFailure,
-  ExitInterrupted
+  ExitInterrupted=130,
 };
 
 #endif  // NINJA_EXIT_STATUS_H_
diff --git a/src/explanations.h b/src/explanations.h
new file mode 100644
index 0000000..375b29f
--- /dev/null
+++ b/src/explanations.h
@@ -0,0 +1,88 @@
+// Copyright 2024 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+
+#include <stdarg.h>
+#include <stdio.h>
+
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+/// A class used to record a list of explanation strings associated
+/// with a given 'item' pointer. This is used to implement the
+/// `-d explain` feature.
+struct Explanations {
+ public:
+  /// Record an explanation for |item| if this instance is enabled.
+  void Record(const void* item, const char* fmt, ...) {
+    va_list args;
+    va_start(args, fmt);
+    RecordArgs(item, fmt, args);
+    va_end(args);
+  }
+
+  /// Same as Record(), but uses a va_list to pass formatting arguments.
+  void RecordArgs(const void* item, const char* fmt, va_list args) {
+    char buffer[1024];
+    vsnprintf(buffer, sizeof(buffer), fmt, args);
+    map_[item].emplace_back(buffer);
+  }
+
+  /// Lookup the explanations recorded for |item|, and append them
+  /// to |*out|, if any.
+  void LookupAndAppend(const void* item, std::vector<std::string>* out) {
+    auto it = map_.find(item);
+    if (it == map_.end())
+      return;
+
+    for (const auto& explanation : it->second)
+      out->push_back(explanation);
+  }
+
+ private:
+  std::unordered_map<const void*, std::vector<std::string>> map_;
+};
+
+/// Convenience wrapper for an Explanations pointer, which can be null
+/// if no explanations need to be recorded.
+struct OptionalExplanations {
+  OptionalExplanations(Explanations* explanations)
+      : explanations_(explanations) {}
+
+  void Record(const void* item, const char* fmt, ...) {
+    if (explanations_) {
+      va_list args;
+      va_start(args, fmt);
+      explanations_->RecordArgs(item, fmt, args);
+      va_end(args);
+    }
+  }
+
+  void RecordArgs(const void* item, const char* fmt, va_list args) {
+    if (explanations_)
+      explanations_->RecordArgs(item, fmt, args);
+  }
+
+  void LookupAndAppend(const void* item, std::vector<std::string>* out) {
+    if (explanations_)
+      explanations_->LookupAndAppend(item, out);
+  }
+
+  Explanations* ptr() const { return explanations_; }
+
+ private:
+  Explanations* explanations_;
+};
diff --git a/src/explanations_test.cc b/src/explanations_test.cc
new file mode 100644
index 0000000..e46600f
--- /dev/null
+++ b/src/explanations_test.cc
@@ -0,0 +1,97 @@
+// Copyright 2024 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "explanations.h"
+
+#include "test.h"
+
+namespace {
+
+const void* MakeItem(size_t v) {
+  return reinterpret_cast<const void*>(v);
+}
+
+}  // namespace
+
+TEST(Explanations, Explanations) {
+  Explanations exp;
+
+  exp.Record(MakeItem(1), "first explanation");
+  exp.Record(MakeItem(1), "second explanation");
+  exp.Record(MakeItem(2), "third explanation");
+  exp.Record(MakeItem(2), "fourth %s", "explanation");
+
+  std::vector<std::string> list;
+
+  exp.LookupAndAppend(MakeItem(0), &list);
+  ASSERT_TRUE(list.empty());
+
+  exp.LookupAndAppend(MakeItem(1), &list);
+  ASSERT_EQ(2u, list.size());
+  EXPECT_EQ(list[0], "first explanation");
+  EXPECT_EQ(list[1], "second explanation");
+
+  exp.LookupAndAppend(MakeItem(2), &list);
+  ASSERT_EQ(4u, list.size());
+  EXPECT_EQ(list[0], "first explanation");
+  EXPECT_EQ(list[1], "second explanation");
+  EXPECT_EQ(list[2], "third explanation");
+  EXPECT_EQ(list[3], "fourth explanation");
+}
+
+TEST(Explanations, OptionalExplanationsNonNull) {
+  Explanations parent;
+  OptionalExplanations exp(&parent);
+
+  exp.Record(MakeItem(1), "first explanation");
+  exp.Record(MakeItem(1), "second explanation");
+  exp.Record(MakeItem(2), "third explanation");
+  exp.Record(MakeItem(2), "fourth %s", "explanation");
+
+  std::vector<std::string> list;
+
+  exp.LookupAndAppend(MakeItem(0), &list);
+  ASSERT_TRUE(list.empty());
+
+  exp.LookupAndAppend(MakeItem(1), &list);
+  ASSERT_EQ(2u, list.size());
+  EXPECT_EQ(list[0], "first explanation");
+  EXPECT_EQ(list[1], "second explanation");
+
+  exp.LookupAndAppend(MakeItem(2), &list);
+  ASSERT_EQ(4u, list.size());
+  EXPECT_EQ(list[0], "first explanation");
+  EXPECT_EQ(list[1], "second explanation");
+  EXPECT_EQ(list[2], "third explanation");
+  EXPECT_EQ(list[3], "fourth explanation");
+}
+
+TEST(Explanations, OptionalExplanationsWithNullPointer) {
+  OptionalExplanations exp(nullptr);
+
+  exp.Record(MakeItem(1), "first explanation");
+  exp.Record(MakeItem(1), "second explanation");
+  exp.Record(MakeItem(2), "third explanation");
+  exp.Record(MakeItem(2), "fourth %s", "explanation");
+
+  std::vector<std::string> list;
+  exp.LookupAndAppend(MakeItem(0), &list);
+  ASSERT_TRUE(list.empty());
+
+  exp.LookupAndAppend(MakeItem(1), &list);
+  ASSERT_TRUE(list.empty());
+
+  exp.LookupAndAppend(MakeItem(2), &list);
+  ASSERT_TRUE(list.empty());
+}
diff --git a/src/graph.cc b/src/graph.cc
index 31b109a..84c6006 100644
--- a/src/graph.cc
+++ b/src/graph.cc
@@ -91,7 +91,8 @@
     if (!node->StatIfNecessary(disk_interface_, err))
       return false;
     if (!node->exists())
-      EXPLAIN("%s has no in-edge and is missing", node->path().c_str());
+      explanations_.Record(node, "%s has no in-edge and is missing",
+                           node->path().c_str());
     node->set_dirty(!node->exists());
     return true;
   }
@@ -151,7 +152,7 @@
       if (!err->empty())
         return false;
       // Failed to load dependency info: rebuild to regenerate it.
-      // LoadDeps() did EXPLAIN() already, no need to do it here.
+      // LoadDeps() did explanations_->Record() already, no need to do it here.
       dirty = edge->deps_missing_ = true;
     }
   }
@@ -182,7 +183,7 @@
       // If a regular input is dirty (or missing), we're dirty.
       // Otherwise consider mtime.
       if ((*i)->dirty()) {
-        EXPLAIN("%s is dirty", (*i)->path().c_str());
+        explanations_.Record(node, "%s is dirty", (*i)->path().c_str());
         dirty = true;
       } else {
         if (!most_recent_input || (*i)->mtime() > most_recent_input->mtime()) {
@@ -282,8 +283,9 @@
     // Phony edges don't write any output.  Outputs are only dirty if
     // there are no inputs and we're missing the output.
     if (edge->inputs_.empty() && !output->exists()) {
-      EXPLAIN("output %s of phony edge with no inputs doesn't exist",
-              output->path().c_str());
+      explanations_.Record(
+          output, "output %s of phony edge with no inputs doesn't exist",
+          output->path().c_str());
       return true;
     }
 
@@ -299,7 +301,8 @@
 
   // Dirty if we're missing the output.
   if (!output->exists()) {
-    EXPLAIN("output %s doesn't exist", output->path().c_str());
+    explanations_.Record(output, "output %s doesn't exist",
+                         output->path().c_str());
     return true;
   }
 
@@ -319,11 +322,12 @@
 
   // Dirty if the output is older than the input.
   if (!used_restat && most_recent_input && output->mtime() < most_recent_input->mtime()) {
-    EXPLAIN("output %s older than most recent input %s "
-            "(%" PRId64 " vs %" PRId64 ")",
-            output->path().c_str(),
-            most_recent_input->path().c_str(),
-            output->mtime(), most_recent_input->mtime());
+    explanations_.Record(output,
+                         "output %s older than most recent input %s "
+                         "(%" PRId64 " vs %" PRId64 ")",
+                         output->path().c_str(),
+                         most_recent_input->path().c_str(), output->mtime(),
+                         most_recent_input->mtime());
     return true;
   }
 
@@ -335,7 +339,8 @@
         // May also be dirty due to the command changing since the last build.
         // But if this is a generator rule, the command changing does not make us
         // dirty.
-        EXPLAIN("command line changed for %s", output->path().c_str());
+        explanations_.Record(output, "command line changed for %s",
+                             output->path().c_str());
         return true;
       }
       if (most_recent_input && entry->mtime < most_recent_input->mtime()) {
@@ -345,14 +350,18 @@
         // exited with an error or was interrupted. If this was a restat rule,
         // then we only check the recorded mtime against the most recent input
         // mtime and ignore the actual output's mtime above.
-        EXPLAIN("recorded mtime of %s older than most recent input %s (%" PRId64 " vs %" PRId64 ")",
-                output->path().c_str(), most_recent_input->path().c_str(),
-                entry->mtime, most_recent_input->mtime());
+        explanations_.Record(
+            output,
+            "recorded mtime of %s older than most recent input %s (%" PRId64
+            " vs %" PRId64 ")",
+            output->path().c_str(), most_recent_input->path().c_str(),
+            entry->mtime, most_recent_input->mtime());
         return true;
       }
     }
     if (!entry && !generator) {
-      EXPLAIN("command line not found in log for %s", output->path().c_str());
+      explanations_.Record(output, "command line not found in log for %s",
+                           output->path().c_str());
       return true;
     }
   }
@@ -487,28 +496,6 @@
   return result;
 }
 
-void Edge::CollectInputs(bool shell_escape,
-                         std::vector<std::string>* out) const {
-  for (std::vector<Node*>::const_iterator it = inputs_.begin();
-       it != inputs_.end(); ++it) {
-    std::string path = (*it)->PathDecanonicalized();
-    if (shell_escape) {
-      std::string unescaped;
-      unescaped.swap(path);
-#ifdef _WIN32
-      GetWin32EscapedString(unescaped, &path);
-#else
-      GetShellEscapedString(unescaped, &path);
-#endif
-    }
-#if __cplusplus >= 201103L
-    out->push_back(std::move(path));
-#else
-    out->push_back(path);
-#endif
-  }
-}
-
 std::string Edge::EvaluateCommand(const bool incl_rsp_file) const {
   string command = GetBinding("command");
   if (incl_rsp_file) {
@@ -572,7 +559,7 @@
 }
 
 bool Edge::is_phony() const {
-  return rule_ == &State::kPhonyRule;
+  return rule_->IsPhony();
 }
 
 bool Edge::use_console() const {
@@ -666,8 +653,9 @@
     return false;
   }
   // On a missing depfile: return false and empty *err.
+  Node* first_output = edge->outputs_[0];
   if (content.empty()) {
-    EXPLAIN("depfile '%s' is missing", path.c_str());
+    explanations_.Record(first_output, "depfile '%s' is missing", path.c_str());
     return false;
   }
 
@@ -692,11 +680,12 @@
 
   // Check that this depfile matches the edge's output, if not return false to
   // mark the edge as dirty.
-  Node* first_output = edge->outputs_[0];
   StringPiece opath = StringPiece(first_output->path());
   if (opath != *primary_out) {
-    EXPLAIN("expected depfile '%s' to mention '%s', got '%s'", path.c_str(),
-            first_output->path().c_str(), primary_out->AsString().c_str());
+    explanations_.Record(first_output,
+                         "expected depfile '%s' to mention '%s', got '%s'",
+                         path.c_str(), first_output->path().c_str(),
+                         primary_out->AsString().c_str());
     return false;
   }
 
@@ -737,23 +726,27 @@
   Node* output = edge->outputs_[0];
   DepsLog::Deps* deps = deps_log_ ? deps_log_->GetDeps(output) : NULL;
   if (!deps) {
-    EXPLAIN("deps for '%s' are missing", output->path().c_str());
+    explanations_.Record(output, "deps for '%s' are missing",
+                         output->path().c_str());
     return false;
   }
 
   // Deps are invalid if the output is newer than the deps.
   if (output->mtime() > deps->mtime) {
-    EXPLAIN("stored deps info out of date for '%s' (%" PRId64 " vs %" PRId64 ")",
-            output->path().c_str(), deps->mtime, output->mtime());
+    explanations_.Record(output,
+                         "stored deps info out of date for '%s' (%" PRId64
+                         " vs %" PRId64 ")",
+                         output->path().c_str(), deps->mtime, output->mtime());
     return false;
   }
 
-  vector<Node*>::iterator implicit_dep =
-      PreallocateSpace(edge, deps->node_count);
-  for (int i = 0; i < deps->node_count; ++i, ++implicit_dep) {
-    Node* node = deps->nodes[i];
-    *implicit_dep = node;
-    node->AddOutEdge(edge);
+  Node** nodes = deps->nodes;
+  size_t node_count = deps->node_count;
+  edge->inputs_.insert(edge->inputs_.end() - edge->order_only_deps_,
+                       nodes, nodes + node_count);
+  edge->implicit_deps_ += node_count;
+  for (size_t i = 0; i < node_count; ++i) {
+    nodes[i]->AddOutEdge(edge);
   }
   return true;
 }
@@ -765,3 +758,47 @@
   edge->implicit_deps_ += count;
   return edge->inputs_.end() - edge->order_only_deps_ - count;
 }
+
+void InputsCollector::VisitNode(const Node* node) {
+  const Edge* edge = node->in_edge();
+
+  if (!edge)  // A source file.
+    return;
+
+  // Add inputs of the producing edge to the result,
+  // except if they are themselves produced by a phony
+  // edge.
+  for (const Node* input : edge->inputs_) {
+    if (!visited_nodes_.insert(input).second)
+      continue;
+
+    VisitNode(input);
+
+    const Edge* input_edge = input->in_edge();
+    if (!(input_edge && input_edge->is_phony())) {
+      inputs_.push_back(input);
+    }
+  }
+}
+
+std::vector<std::string> InputsCollector::GetInputsAsStrings(
+    bool shell_escape) const {
+  std::vector<std::string> result;
+  result.reserve(inputs_.size());
+
+  for (const Node* input : inputs_) {
+    std::string unescaped = input->PathDecanonicalized();
+    if (shell_escape) {
+      std::string path;
+#ifdef _WIN32
+      GetWin32EscapedString(unescaped, &path);
+#else
+      GetShellEscapedString(unescaped, &path);
+#endif
+      result.push_back(std::move(path));
+    } else {
+      result.push_back(std::move(unescaped));
+    }
+  }
+  return result;
+}
diff --git a/src/graph.h b/src/graph.h
index 820a265..d98f1f9 100644
--- a/src/graph.h
+++ b/src/graph.h
@@ -16,13 +16,15 @@
 #define NINJA_GRAPH_H_
 
 #include <algorithm>
+#include <queue>
 #include <set>
 #include <string>
 #include <vector>
-#include <queue>
 
 #include "dyndep.h"
 #include "eval_env.h"
+#include "explanations.h"
+#include "jobserver.h"
 #include "timestamp.h"
 #include "util.h"
 
@@ -200,9 +202,6 @@
 
   void Dump(const char* prefix="") const;
 
-  // Append all edge explicit inputs to |*out|. Possibly with shell escaping.
-  void CollectInputs(bool shell_escape, std::vector<std::string>* out) const;
-
   // critical_path_weight is the priority during build scheduling. The
   // "critical path" between this edge's inputs and any target node is
   // the path which maximises the sum oof weights along that path.
@@ -265,6 +264,9 @@
   bool use_console() const;
   bool maybe_phonycycle_diagnostic() const;
 
+  /// A Jobserver slot instance. Invalid by default.
+  Jobserver::Slot job_slot_;
+
   // Historical info: how long did this edge take last time,
   // as per .ninja_log, if known? Defaults to -1 if unknown.
   int64_t prev_elapsed_time_millis = -1;
@@ -283,9 +285,11 @@
 struct ImplicitDepLoader {
   ImplicitDepLoader(State* state, DepsLog* deps_log,
                     DiskInterface* disk_interface,
-                    DepfileParserOptions const* depfile_parser_options)
+                    DepfileParserOptions const* depfile_parser_options,
+                    Explanations* explanations)
       : state_(state), disk_interface_(disk_interface), deps_log_(deps_log),
-        depfile_parser_options_(depfile_parser_options) {}
+        depfile_parser_options_(depfile_parser_options),
+        explanations_(explanations) {}
 
   /// Load implicit dependencies for \a edge.
   /// @return false on error (without filling \a err if info is just missing
@@ -319,6 +323,7 @@
   DiskInterface* disk_interface_;
   DepsLog* deps_log_;
   DepfileParserOptions const* depfile_parser_options_;
+  OptionalExplanations explanations_;
 };
 
 
@@ -327,11 +332,12 @@
 struct DependencyScan {
   DependencyScan(State* state, BuildLog* build_log, DepsLog* deps_log,
                  DiskInterface* disk_interface,
-                 DepfileParserOptions const* depfile_parser_options)
-      : build_log_(build_log),
-        disk_interface_(disk_interface),
-        dep_loader_(state, deps_log, disk_interface, depfile_parser_options),
-        dyndep_loader_(state, disk_interface) {}
+                 DepfileParserOptions const* depfile_parser_options,
+                 Explanations* explanations)
+      : build_log_(build_log), disk_interface_(disk_interface),
+        dep_loader_(state, deps_log, disk_interface, depfile_parser_options,
+                    explanations),
+        dyndep_loader_(state, disk_interface), explanations_(explanations) {}
 
   /// Update the |dirty_| state of the given nodes by transitively inspecting
   /// their input edges.
@@ -375,10 +381,13 @@
   bool RecomputeOutputDirty(const Edge* edge, const Node* most_recent_input,
                             const std::string& command, Node* output);
 
+  void RecordExplanation(const Node* node, const char* fmt, ...);
+
   BuildLog* build_log_;
   DiskInterface* disk_interface_;
   ImplicitDepLoader dep_loader_;
   DyndepLoader dyndep_loader_;
+  OptionalExplanations explanations_;
 };
 
 // Implements a less comparison for edges by priority, where highest
@@ -417,4 +426,41 @@
   }
 };
 
+/// A class used to collect the transitive set of inputs from a given set
+/// of starting nodes. Used to implement the `inputs` tool.
+///
+/// When collecting inputs, the outputs of phony edges are always ignored
+/// from the result, but are followed by the dependency walk.
+///
+/// Usage is:
+/// - Create instance.
+/// - Call VisitNode() for each root node to collect inputs from.
+/// - Call inputs() to retrieve the list of input node pointers.
+/// - Call GetInputsAsStrings() to retrieve the list of inputs as a string
+/// vector.
+///
+struct InputsCollector {
+  /// Visit a single @arg node during this collection.
+  void VisitNode(const Node* node);
+
+  /// Retrieve list of visited input nodes. A dependency always appears
+  /// before its dependents in the result, but final order depends on the
+  /// order of the VisitNode() calls performed before this.
+  const std::vector<const Node*>& inputs() const { return inputs_; }
+
+  /// Same as inputs(), but returns the list of visited nodes as a list of
+  /// strings, with optional shell escaping.
+  std::vector<std::string> GetInputsAsStrings(bool shell_escape = false) const;
+
+  /// Reset collector state.
+  void Reset() {
+    inputs_.clear();
+    visited_nodes_.clear();
+  }
+
+ private:
+  std::vector<const Node*> inputs_;
+  std::set<const Node*> visited_nodes_;
+};
+
 #endif  // NINJA_GRAPH_H_
diff --git a/src/graph_test.cc b/src/graph_test.cc
index fb0513c..d29118a 100644
--- a/src/graph_test.cc
+++ b/src/graph_test.cc
@@ -13,14 +13,15 @@
 // limitations under the License.
 
 #include "graph.h"
-#include "build.h"
 
+#include "build.h"
+#include "command_collector.h"
 #include "test.h"
 
 using namespace std;
 
 struct GraphTest : public StateTestWithBuiltinRules {
-  GraphTest() : scan_(&state_, NULL, NULL, &fs_, NULL) {}
+  GraphTest() : scan_(&state_, NULL, NULL, &fs_, NULL, NULL) {}
 
   VirtualFileSystem fs_;
   DependencyScan scan_;
@@ -108,7 +109,7 @@
 "build out | out.imp: cat in\n"));
 
   Edge* edge = GetNode("out")->in_edge();
-  EXPECT_EQ(2, edge->outputs_.size());
+  EXPECT_EQ(size_t(2), edge->outputs_.size());
   EXPECT_EQ("out", edge->outputs_[0]->path());
   EXPECT_EQ("out.imp", edge->outputs_[1]->path());
   EXPECT_EQ(1, edge->implicit_outs_);
@@ -150,7 +151,7 @@
 "build | out.imp: cat in\n"));
 
   Edge* edge = GetNode("out.imp")->in_edge();
-  EXPECT_EQ(1, edge->outputs_.size());
+  EXPECT_EQ(size_t(1), edge->outputs_.size());
   EXPECT_EQ("out.imp", edge->outputs_[0]->path());
   EXPECT_EQ(1, edge->implicit_outs_);
   EXPECT_EQ(edge, GetNode("out.imp")->in_edge());
@@ -215,28 +216,90 @@
   }
 }
 
-TEST_F(GraphTest, CollectInputs) {
+TEST_F(GraphTest, InputsCollector) {
+  // Build plan for the following graph:
+  //
+  //      in1
+  //       |___________
+  //       |           |
+  //      ===         ===
+  //       |           |
+  //      out1        mid1
+  //       |       ____|_____
+  //       |      |          |
+  //       |     ===      =======
+  //       |      |       |     |
+  //       |     out2    out3  out4
+  //       |      |       |
+  //      =======phony======
+  //              |
+  //             all
+  //
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+                                      "build out1: cat in1\n"
+                                      "build mid1: cat in1\n"
+                                      "build out2: cat mid1\n"
+                                      "build out3 out4: cat mid1\n"
+                                      "build all: phony out1 out2 out3\n"));
+
+  InputsCollector collector;
+
+  // Start visit from out1, this should add in1 to the inputs.
+  collector.Reset();
+  collector.VisitNode(GetNode("out1"));
+  auto inputs = collector.GetInputsAsStrings();
+  ASSERT_EQ(1u, inputs.size());
+  EXPECT_EQ("in1", inputs[0]);
+
+  // Add a visit from out2, this should add mid1.
+  collector.VisitNode(GetNode("out2"));
+  inputs = collector.GetInputsAsStrings();
+  ASSERT_EQ(2u, inputs.size());
+  EXPECT_EQ("in1", inputs[0]);
+  EXPECT_EQ("mid1", inputs[1]);
+
+  // Another visit from all, this should add out1, out2 and out3,
+  // but not out4.
+  collector.VisitNode(GetNode("all"));
+  inputs = collector.GetInputsAsStrings();
+  ASSERT_EQ(5u, inputs.size());
+  EXPECT_EQ("in1", inputs[0]);
+  EXPECT_EQ("mid1", inputs[1]);
+  EXPECT_EQ("out1", inputs[2]);
+  EXPECT_EQ("out2", inputs[3]);
+  EXPECT_EQ("out3", inputs[4]);
+
+  collector.Reset();
+
+  // Starting directly from all, will add out1 before mid1 compared
+  // to the previous example above.
+  collector.VisitNode(GetNode("all"));
+  inputs = collector.GetInputsAsStrings();
+  ASSERT_EQ(5u, inputs.size());
+  EXPECT_EQ("in1", inputs[0]);
+  EXPECT_EQ("out1", inputs[1]);
+  EXPECT_EQ("mid1", inputs[2]);
+  EXPECT_EQ("out2", inputs[3]);
+  EXPECT_EQ("out3", inputs[4]);
+}
+
+TEST_F(GraphTest, InputsCollectorWithEscapes) {
   ASSERT_NO_FATAL_FAILURE(AssertParse(
       &state_,
       "build out$ 1: cat in1 in2 in$ with$ space | implicit || order_only\n"));
 
-  std::vector<std::string> inputs;
-  Edge* edge = GetNode("out 1")->in_edge();
-
-  // Test without shell escaping.
-  inputs.clear();
-  edge->CollectInputs(false, &inputs);
-  EXPECT_EQ(5u, inputs.size());
+  InputsCollector collector;
+  collector.VisitNode(GetNode("out 1"));
+  auto inputs = collector.GetInputsAsStrings();
+  ASSERT_EQ(5u, inputs.size());
   EXPECT_EQ("in1", inputs[0]);
   EXPECT_EQ("in2", inputs[1]);
   EXPECT_EQ("in with space", inputs[2]);
   EXPECT_EQ("implicit", inputs[3]);
   EXPECT_EQ("order_only", inputs[4]);
 
-  // Test with shell escaping.
-  inputs.clear();
-  edge->CollectInputs(true, &inputs);
-  EXPECT_EQ(5u, inputs.size());
+  inputs = collector.GetInputsAsStrings(true);
+  ASSERT_EQ(5u, inputs.size());
   EXPECT_EQ("in1", inputs[0]);
   EXPECT_EQ("in2", inputs[1]);
 #ifdef _WIN32
@@ -248,6 +311,54 @@
   EXPECT_EQ("order_only", inputs[4]);
 }
 
+TEST_F(GraphTest, CommandCollector) {
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+                                      "build out1: cat in1\n"
+                                      "build mid1: cat in1\n"
+                                      "build out2: cat mid1\n"
+                                      "build out3 out4: cat mid1\n"
+                                      "build all: phony out1 out2 out3\n"));
+  {
+    CommandCollector collector;
+    auto& edges = collector.in_edges;
+
+    // Start visit from out2; this should add `build mid1` and `build out2` to
+    // the edge list.
+    collector.CollectFrom(GetNode("out2"));
+    ASSERT_EQ(2u, edges.size());
+    EXPECT_EQ("cat in1 > mid1", edges[0]->EvaluateCommand());
+    EXPECT_EQ("cat mid1 > out2", edges[1]->EvaluateCommand());
+
+    // Add a visit from out1, this should append `build out1`
+    collector.CollectFrom(GetNode("out1"));
+    ASSERT_EQ(3u, edges.size());
+    EXPECT_EQ("cat in1 > out1", edges[2]->EvaluateCommand());
+
+    // Another visit from all; this should add edges for out1, out2 and out3,
+    // but not all (because it's phony).
+    collector.CollectFrom(GetNode("all"));
+    ASSERT_EQ(4u, edges.size());
+    EXPECT_EQ("cat in1 > mid1", edges[0]->EvaluateCommand());
+    EXPECT_EQ("cat mid1 > out2", edges[1]->EvaluateCommand());
+    EXPECT_EQ("cat in1 > out1", edges[2]->EvaluateCommand());
+    EXPECT_EQ("cat mid1 > out3 out4", edges[3]->EvaluateCommand());
+  }
+
+  {
+    CommandCollector collector;
+    auto& edges = collector.in_edges;
+
+    // Starting directly from all, will add `build out1` before `build mid1`
+    // compared to the previous example above.
+    collector.CollectFrom(GetNode("all"));
+    ASSERT_EQ(4u, edges.size());
+    EXPECT_EQ("cat in1 > out1", edges[0]->EvaluateCommand());
+    EXPECT_EQ("cat in1 > mid1", edges[1]->EvaluateCommand());
+    EXPECT_EQ("cat mid1 > out2", edges[2]->EvaluateCommand());
+    EXPECT_EQ("cat mid1 > out3 out4", edges[3]->EvaluateCommand());
+  }
+}
+
 TEST_F(GraphTest, VarInOutPathEscaping) {
   ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
 "build a$ b: cat no'space with$ space$$ no\"space2\n"));
@@ -438,7 +549,7 @@
   // but the depfile also adds b as an input), the deps should have been loaded
   // only once:
   Edge* edge = GetNode("a")->in_edge();
-  EXPECT_EQ(1, edge->inputs_.size());
+  EXPECT_EQ(size_t(1), edge->inputs_.size());
   EXPECT_EQ("b", edge->inputs_[0]->path());
 }
 
@@ -463,7 +574,7 @@
   // but c's in_edge has b as input but the depfile also adds |edge| as
   // output)), the deps should have been loaded only once:
   Edge* edge = GetNode("a")->in_edge();
-  EXPECT_EQ(1, edge->inputs_.size());
+  EXPECT_EQ(size_t(1), edge->inputs_.size());
   EXPECT_EQ("c", edge->inputs_[0]->path());
 }
 
@@ -490,7 +601,7 @@
   // but c's in_edge has b as input but the depfile also adds |edge| as
   // output)), the deps should have been loaded only once:
   Edge* edge = GetNode("a")->in_edge();
-  EXPECT_EQ(1, edge->inputs_.size());
+  EXPECT_EQ(size_t(1), edge->inputs_.size());
   EXPECT_EQ("c", edge->inputs_[0]->path());
 }
 
@@ -534,13 +645,13 @@
   EXPECT_FALSE(GetNode("dd")->dyndep_pending());
 
   Edge* edge = GetNode("out")->in_edge();
-  ASSERT_EQ(1u, edge->outputs_.size());
+  ASSERT_EQ(size_t(1), edge->outputs_.size());
   EXPECT_EQ("out", edge->outputs_[0]->path());
-  ASSERT_EQ(2u, edge->inputs_.size());
+  ASSERT_EQ(size_t(2), edge->inputs_.size());
   EXPECT_EQ("in", edge->inputs_[0]->path());
   EXPECT_EQ("dd", edge->inputs_[1]->path());
-  EXPECT_EQ(0u, edge->implicit_deps_);
-  EXPECT_EQ(1u, edge->order_only_deps_);
+  EXPECT_EQ(0, edge->implicit_deps_);
+  EXPECT_EQ(1, edge->order_only_deps_);
   EXPECT_FALSE(edge->GetBindingBool("restat"));
 }
 
@@ -564,14 +675,14 @@
   EXPECT_FALSE(GetNode("dd")->dyndep_pending());
 
   Edge* edge = GetNode("out1")->in_edge();
-  ASSERT_EQ(1u, edge->outputs_.size());
+  ASSERT_EQ(size_t(1), edge->outputs_.size());
   EXPECT_EQ("out1", edge->outputs_[0]->path());
-  ASSERT_EQ(3u, edge->inputs_.size());
+  ASSERT_EQ(size_t(3), edge->inputs_.size());
   EXPECT_EQ("in", edge->inputs_[0]->path());
   EXPECT_EQ("out2", edge->inputs_[1]->path());
   EXPECT_EQ("dd", edge->inputs_[2]->path());
-  EXPECT_EQ(1u, edge->implicit_deps_);
-  EXPECT_EQ(1u, edge->order_only_deps_);
+  EXPECT_EQ(1, edge->implicit_deps_);
+  EXPECT_EQ(1, edge->order_only_deps_);
   EXPECT_FALSE(edge->GetBindingBool("restat"));
 }
 
@@ -697,35 +808,35 @@
   EXPECT_FALSE(GetNode("dd")->dyndep_pending());
 
   Edge* edge1 = GetNode("out1")->in_edge();
-  ASSERT_EQ(2u, edge1->outputs_.size());
+  ASSERT_EQ(size_t(2), edge1->outputs_.size());
   EXPECT_EQ("out1", edge1->outputs_[0]->path());
   EXPECT_EQ("out1imp", edge1->outputs_[1]->path());
-  EXPECT_EQ(1u, edge1->implicit_outs_);
-  ASSERT_EQ(3u, edge1->inputs_.size());
+  EXPECT_EQ(1, edge1->implicit_outs_);
+  ASSERT_EQ(size_t(3), edge1->inputs_.size());
   EXPECT_EQ("in1", edge1->inputs_[0]->path());
   EXPECT_EQ("in1imp", edge1->inputs_[1]->path());
   EXPECT_EQ("dd", edge1->inputs_[2]->path());
-  EXPECT_EQ(1u, edge1->implicit_deps_);
-  EXPECT_EQ(1u, edge1->order_only_deps_);
+  EXPECT_EQ(1, edge1->implicit_deps_);
+  EXPECT_EQ(1, edge1->order_only_deps_);
   EXPECT_FALSE(edge1->GetBindingBool("restat"));
   EXPECT_EQ(edge1, GetNode("out1imp")->in_edge());
   Node* in1imp = GetNode("in1imp");
-  ASSERT_EQ(1u, in1imp->out_edges().size());
+  ASSERT_EQ(size_t(1), in1imp->out_edges().size());
   EXPECT_EQ(edge1, in1imp->out_edges()[0]);
 
   Edge* edge2 = GetNode("out2")->in_edge();
-  ASSERT_EQ(1u, edge2->outputs_.size());
+  ASSERT_EQ(size_t(1), edge2->outputs_.size());
   EXPECT_EQ("out2", edge2->outputs_[0]->path());
-  EXPECT_EQ(0u, edge2->implicit_outs_);
-  ASSERT_EQ(3u, edge2->inputs_.size());
+  EXPECT_EQ(0, edge2->implicit_outs_);
+  ASSERT_EQ(size_t(3), edge2->inputs_.size());
   EXPECT_EQ("in2", edge2->inputs_[0]->path());
   EXPECT_EQ("in2imp", edge2->inputs_[1]->path());
   EXPECT_EQ("dd", edge2->inputs_[2]->path());
-  EXPECT_EQ(1u, edge2->implicit_deps_);
-  EXPECT_EQ(1u, edge2->order_only_deps_);
+  EXPECT_EQ(1, edge2->implicit_deps_);
+  EXPECT_EQ(1, edge2->order_only_deps_);
   EXPECT_TRUE(edge2->GetBindingBool("restat"));
   Node* in2imp = GetNode("in2imp");
-  ASSERT_EQ(1u, in2imp->out_edges().size());
+  ASSERT_EQ(size_t(1), in2imp->out_edges().size());
   EXPECT_EQ(edge2, in2imp->out_edges()[0]);
 }
 
@@ -915,12 +1026,12 @@
 
   // Verify that "out.d" was loaded exactly once despite
   // circular reference discovered from dyndep file.
-  ASSERT_EQ(3u, edge->inputs_.size());
+  ASSERT_EQ(size_t(3), edge->inputs_.size());
   EXPECT_EQ("in", edge->inputs_[0]->path());
   EXPECT_EQ("inimp", edge->inputs_[1]->path());
   EXPECT_EQ("dd", edge->inputs_[2]->path());
-  EXPECT_EQ(1u, edge->implicit_deps_);
-  EXPECT_EQ(1u, edge->order_only_deps_);
+  EXPECT_EQ(1, edge->implicit_deps_);
+  EXPECT_EQ(1, edge->order_only_deps_);
 }
 
 TEST_F(GraphTest, Validation) {
@@ -934,7 +1045,7 @@
   EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), &validation_nodes, &err));
   ASSERT_EQ("", err);
 
-  ASSERT_EQ(validation_nodes.size(), 1);
+  ASSERT_EQ(validation_nodes.size(), size_t(1));
   EXPECT_EQ(validation_nodes[0]->path(), "validate");
 
   EXPECT_TRUE(GetNode("out")->dirty());
@@ -1006,7 +1117,7 @@
     queue.push(edges[i]);
   }
 
-  EXPECT_EQ(queue.size(), n_edges);
+  EXPECT_EQ(queue.size(), static_cast<size_t>(n_edges));
   for (int i = 0; i < n_edges; ++i) {
     EXPECT_EQ(queue.top(), edges[n_edges - 1 - i]);
     queue.pop();
diff --git a/src/hash_map.h b/src/hash_map.h
index 4353609..4361c80 100644
--- a/src/hash_map.h
+++ b/src/hash_map.h
@@ -20,40 +20,8 @@
 #include "string_piece.h"
 #include "util.h"
 
-// MurmurHash2, by Austin Appleby
-static inline
-unsigned int MurmurHash2(const void* key, size_t len) {
-  static const unsigned int seed = 0xDECAFBAD;
-  const unsigned int m = 0x5bd1e995;
-  const int r = 24;
-  unsigned int h = seed ^ len;
-  const unsigned char* data = (const unsigned char*)key;
-  while (len >= 4) {
-    unsigned int k;
-    memcpy(&k, data, sizeof k);
-    k *= m;
-    k ^= k >> r;
-    k *= m;
-    h *= m;
-    h ^= k;
-    data += 4;
-    len -= 4;
-  }
-  switch (len) {
-  case 3: h ^= data[2] << 16;
-          NINJA_FALLTHROUGH;
-  case 2: h ^= data[1] << 8;
-          NINJA_FALLTHROUGH;
-  case 1: h ^= data[0];
-    h *= m;
-  };
-  h ^= h >> 13;
-  h *= m;
-  h ^= h >> 15;
-  return h;
-}
-
-#include <unordered_map>
+#include "third_party/emhash/hash_table8.hpp"
+#include "third_party/rapidhash/rapidhash.h"
 
 namespace std {
 template<>
@@ -62,7 +30,7 @@
   typedef size_t result_type;
 
   size_t operator()(StringPiece key) const {
-    return MurmurHash2(key.str_, key.len_);
+    return rapidhash(key.str_, key.len_);
   }
 };
 }
@@ -73,7 +41,7 @@
 /// mapping StringPiece => Foo*.
 template<typename V>
 struct ExternalStringHashMap {
-  typedef std::unordered_map<StringPiece, V> Type;
+  typedef emhash8::HashMap<StringPiece, V> Type;
 };
 
 #endif // NINJA_MAP_H_
diff --git a/src/includes_normalize_test.cc b/src/includes_normalize_test.cc
index 12965f9..659d170 100644
--- a/src/includes_normalize_test.cc
+++ b/src/includes_normalize_test.cc
@@ -135,7 +135,8 @@
   }
 
   kExactlyMaxPath[_MAX_PATH] = '\0';
-  EXPECT_EQ(strlen(kExactlyMaxPath), _MAX_PATH);
+  // This is a relatively safe cast as we can expect that _MAX_PATH will never be negative
+  EXPECT_EQ(strlen(kExactlyMaxPath), static_cast<size_t>(_MAX_PATH));
 
   string forward_slashes(kExactlyMaxPath);
   replace(forward_slashes.begin(), forward_slashes.end(), '\\', '/');
@@ -161,7 +162,7 @@
       kExactlyMaxPath[i] = 'a';
   }
   kExactlyMaxPath[_MAX_PATH] = '\0';
-  EXPECT_EQ(strlen(kExactlyMaxPath), _MAX_PATH);
+  EXPECT_EQ(strlen(kExactlyMaxPath), static_cast<size_t>(_MAX_PATH));
 
   // Make sure a path that's exactly _MAX_PATH long fails with a proper error.
   EXPECT_FALSE(normalizer.Normalize(kExactlyMaxPath, &result, &err));
diff --git a/src/jobserver-posix.cc b/src/jobserver-posix.cc
new file mode 100644
index 0000000..d907380
--- /dev/null
+++ b/src/jobserver-posix.cc
@@ -0,0 +1,131 @@
+// Copyright 2024 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <string>
+
+#include "jobserver.h"
+#include "util.h"
+
+namespace {
+
+// Return true if |fd| is a fifo or pipe descriptor.
+bool IsFifoDescriptor(int fd) {
+  struct stat info;
+  int ret = ::fstat(fd, &info);
+  return (ret == 0) && ((info.st_mode & S_IFMT) == S_IFIFO);
+}
+
+// Implementation of Jobserver::Client for Posix systems
+class PosixJobserverClient : public Jobserver::Client {
+ public:
+  virtual ~PosixJobserverClient() {
+    if (write_fd_ >= 0)
+      ::close(write_fd_);
+    if (read_fd_ >= 0)
+      ::close(read_fd_);
+  }
+
+  Jobserver::Slot TryAcquire() override {
+    if (has_implicit_slot_) {
+      has_implicit_slot_ = false;
+      return Jobserver::Slot::CreateImplicit();
+    }
+    uint8_t slot_char = '\0';
+    int ret;
+    do {
+      ret = ::read(read_fd_, &slot_char, 1);
+    } while (ret < 0 && errno == EINTR);
+    if (ret == 1) {
+      return Jobserver::Slot::CreateExplicit(slot_char);
+    }
+    return Jobserver::Slot();
+  }
+
+  void Release(Jobserver::Slot slot) override {
+    if (!slot.IsValid())
+      return;
+
+    if (slot.IsImplicit()) {
+      assert(!has_implicit_slot_ && "Implicit slot cannot be released twice!");
+      has_implicit_slot_ = true;
+      return;
+    }
+
+    uint8_t slot_char = slot.GetExplicitValue();
+    int ret;
+    do {
+      ret = ::write(write_fd_, &slot_char, 1);
+    } while (ret < 0 && errno == EINTR);
+    (void)ret;  // Nothing can be done in case of error here.
+  }
+
+  // Initialize with FIFO file path.
+  bool InitWithFifo(const std::string& fifo_path, std::string* error) {
+    if (fifo_path.empty()) {
+      *error = "Empty fifo path";
+      return false;
+    }
+    read_fd_ = ::open(fifo_path.c_str(), O_RDONLY | O_NONBLOCK | O_CLOEXEC);
+    if (read_fd_ < 0) {
+      *error =
+          std::string("Error opening fifo for reading: ") + strerror(errno);
+      return false;
+    }
+    if (!IsFifoDescriptor(read_fd_)) {
+      *error = "Not a fifo path: " + fifo_path;
+      // Let destructor close read_fd_.
+      return false;
+    }
+    write_fd_ = ::open(fifo_path.c_str(), O_WRONLY | O_NONBLOCK | O_CLOEXEC);
+    if (write_fd_ < 0) {
+      *error =
+          std::string("Error opening fifo for writing: ") + strerror(errno);
+      // Let destructor close read_fd_
+      return false;
+    }
+    return true;
+  }
+
+ private:
+  // Set to true if the implicit slot has not been acquired yet.
+  bool has_implicit_slot_ = true;
+
+  // read and write descriptors.
+  int read_fd_ = -1;
+  int write_fd_ = -1;
+};
+
+}  // namespace
+
+// static
+std::unique_ptr<Jobserver::Client> Jobserver::Client::Create(
+    const Jobserver::Config& config, std::string* error) {
+  bool success = false;
+  auto client = std::unique_ptr<PosixJobserverClient>(new PosixJobserverClient);
+  if (config.mode == Jobserver::Config::kModePosixFifo) {
+    success = client->InitWithFifo(config.path, error);
+  } else {
+    *error = "Unsupported jobserver mode";
+  }
+  if (!success)
+    client.reset();
+  return client;
+}
diff --git a/src/jobserver-win32.cc b/src/jobserver-win32.cc
new file mode 100644
index 0000000..bc11d9a
--- /dev/null
+++ b/src/jobserver-win32.cc
@@ -0,0 +1,105 @@
+// Copyright 2024 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <assert.h>
+#include <windows.h>
+
+#include "jobserver.h"
+#include "util.h"
+
+namespace {
+
+// Implementation of Jobserver::Client for Win32 systems.
+// At the moment, only the semaphore scheme is supported,
+// even when running under Cygwin which could support the
+// pipe version, in theory.
+class Win32JobserverClient : public Jobserver::Client {
+ public:
+  virtual ~Win32JobserverClient() {
+    // NOTE: OpenSemaphore() returns NULL on failure.
+    if (IsValid()) {
+      ::CloseHandle(handle_);
+    }
+  }
+
+  Jobserver::Slot TryAcquire() override {
+    if (IsValid()) {
+      if (has_implicit_slot_) {
+        has_implicit_slot_ = false;
+        return Jobserver::Slot::CreateImplicit();
+      }
+
+      DWORD ret = ::WaitForSingleObject(handle_, 0);
+      if (ret == WAIT_OBJECT_0) {
+        // Hard-code value 1 for the explicit slot value.
+        return Jobserver::Slot::CreateExplicit(1);
+      }
+    }
+    return Jobserver::Slot();
+  }
+
+  void Release(Jobserver::Slot slot) override {
+    if (!slot.IsValid())
+      return;
+
+    if (slot.IsImplicit()) {
+      assert(!has_implicit_slot_ && "Implicit slot cannot be released twice!");
+      has_implicit_slot_ = true;
+      return;
+    }
+
+    // Nothing can be done in case of error here.
+    (void)::ReleaseSemaphore(handle_, 1, NULL);
+  }
+
+  bool InitWithSemaphore(const std::string& name, std::string* error) {
+    handle_ = ::OpenSemaphoreA(SYNCHRONIZE | SEMAPHORE_MODIFY_STATE, FALSE,
+                               name.c_str());
+    if (handle_ == NULL) {
+      *error = "Error opening semaphore: " + GetLastErrorString();
+      return false;
+    }
+    return true;
+  }
+
+ protected:
+  bool IsValid() const {
+    // NOTE: OpenSemaphore() returns NULL on failure, not INVALID_HANDLE_VALUE.
+    return handle_ != NULL;
+  }
+
+  // Set to true if the implicit slot has not been acquired yet.
+  bool has_implicit_slot_ = true;
+
+  // Semaphore handle. NULL means not in use.
+  HANDLE handle_ = NULL;
+};
+
+}  // namespace
+
+// static
+std::unique_ptr<Jobserver::Client> Jobserver::Client::Create(
+    const Jobserver::Config& config, std::string* error) {
+  bool success = false;
+  auto client =
+      std::unique_ptr<Win32JobserverClient>(new Win32JobserverClient());
+  if (config.mode == Jobserver::Config::kModeWin32Semaphore) {
+    success = client->InitWithSemaphore(config.path, error);
+  } else {
+    *error = "Unsupported jobserver mode";
+  }
+  if (!success)
+    client.reset();
+  return client;
+}
diff --git a/src/jobserver.cc b/src/jobserver.cc
new file mode 100644
index 0000000..e195517
--- /dev/null
+++ b/src/jobserver.cc
@@ -0,0 +1,208 @@
+// Copyright 2024 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "jobserver.h"
+
+#include <assert.h>
+#include <stdio.h>
+
+#include <vector>
+
+#include "string_piece.h"
+
+namespace {
+
+// If |input| starts with |prefix|, return true and sets |*value| to the rest
+// of the input. Otherwise return false.
+bool GetPrefixedValue(StringPiece input, StringPiece prefix,
+                      StringPiece* value) {
+  assert(prefix.len_ > 0);
+  if (input.len_ < prefix.len_ || memcmp(prefix.str_, input.str_, prefix.len_))
+    return false;
+
+  *value = StringPiece(input.str_ + prefix.len_, input.len_ - prefix.len_);
+  return true;
+}
+
+// Try to read a comma-separated pair of file descriptors from |input|.
+// On success return true and set |config->mode| accordingly. Otherwise return
+// false if the input doesn't follow the appropriate format. Note that the
+// values are not saved since pipe mode is not supported.
+bool GetFileDescriptorPair(StringPiece input, Jobserver::Config* config) {
+  int read_fd = 1, write_fd = -1;
+  std::string pair = input.AsString();
+  if (sscanf(pair.c_str(), "%d,%d", &read_fd, &write_fd) != 2)
+    return false;
+
+  // From
+  // https://www.gnu.org/software/make/manual/html_node/POSIX-Jobserver.html Any
+  // negative descriptor means the feature is disabled.
+  if (read_fd < 0 || write_fd < 0)
+    config->mode = Jobserver::Config::kModeNone;
+  else
+    config->mode = Jobserver::Config::kModePipe;
+
+  return true;
+}
+
+}  // namespace
+
+// static
+const int16_t Jobserver::Slot::kImplicitValue;
+
+uint8_t Jobserver::Slot::GetExplicitValue() const {
+  assert(IsExplicit());
+  return static_cast<uint8_t>(value_);
+}
+
+bool Jobserver::ParseMakeFlagsValue(const char* makeflags_env,
+                                    Jobserver::Config* config,
+                                    std::string* error) {
+  *config = Config();
+
+  if (!makeflags_env || !makeflags_env[0]) {
+    /// Return default Config instance with kModeNone if input is null or empty.
+    return true;
+  }
+
+  // Decompose input into vector of space or tab separated string pieces.
+  std::vector<StringPiece> args;
+  const char* p = makeflags_env;
+  while (*p) {
+    const char* next_space = strpbrk(p, " \t");
+    if (!next_space) {
+      args.emplace_back(p);
+      break;
+    }
+
+    if (next_space > p)
+      args.emplace_back(p, next_space - p);
+
+    p = next_space + 1;
+  }
+
+  // clang-format off
+  //
+  // From:
+  // https://www.gnu.org/software/make/manual/html_node/POSIX-Jobserver.html
+  //
+  // """
+  // Your tool may also examine the first word of the MAKEFLAGS variable and
+  // look for the character n. If this character is present then make was
+  // invoked with the ‘-n’ option and your tool may want to stop without
+  // performing any operations.
+  // """
+  //
+  // Where according to
+  // https://www.gnu.org/software/make/manual/html_node/Options_002fRecursion.html
+  // MAKEFLAGS begins with all "flag letters" passed to make.
+  //
+  // Experimentation shows that GNU Make 4.3, at least, will set MAKEFLAGS with
+  // an initial space if no letter flag are passed to its invocation (except -j),
+  // i.e.:
+  //
+  //    make -ks --> MAKEFLAGS="ks"
+  //    make -j  --> MAKEFLAGS=" -j"
+  //    make -ksj --> MAKEFLAGS="ks -j"
+  //    make -ks -j3  --> MAKEFLAGS="ks -j3 --jobserver-auth=3,4"
+  //    make -j3      --> MAKEFLAGS=" -j3 --jobserver-auth=3,4"
+  //
+  // However, other jobserver implementation will not, for example the one
+  // at https://github.com/rust-lang/jobserver-rs will set MAKEFLAGS to just
+  // "--jobserver-fds=R,W --jobserver-auth=R,W" instead, without an initial
+  // space.
+  //
+  // Another implementation is from Rust's Cargo itself which will set it to
+  // "-j --jobserver-fds=R,W --jobserver-auth=R,W".
+  //
+  // For the record --jobserver-fds=R,W is an old undocumented and deprecated
+  // version of --jobserver-auth=R,W that was implemented by GNU Make before 4.2
+  // was released, and some tooling may depend on it. Hence it makes sense to
+  // define both --jobserver-fds and --jobserver-auth at the same time, since
+  // the last recognized one should win in client code.
+  //
+  // The initial space will have been stripped by the loop above, but we can
+  // still support the requirement by ignoring the first arg if it begins with a
+  // dash (-).
+  //
+  // clang-format on
+  if (!args.empty() && args[0][0] != '-' &&
+      memchr(args[0].str_, 'n', args[0].len_) != nullptr) {
+    return true;
+  }
+
+  // Loop over all arguments, the last one wins, except in case of errors.
+  for (const auto& arg : args) {
+    StringPiece value;
+
+    // Handle --jobserver-auth=... here.
+    if (GetPrefixedValue(arg, "--jobserver-auth=", &value)) {
+      if (GetFileDescriptorPair(value, config)) {
+        continue;
+      }
+      StringPiece fifo_path;
+      if (GetPrefixedValue(value, "fifo:", &fifo_path)) {
+        config->mode = Jobserver::Config::kModePosixFifo;
+        config->path = fifo_path.AsString();
+      } else {
+        config->mode = Jobserver::Config::kModeWin32Semaphore;
+        config->path = value.AsString();
+      }
+      continue;
+    }
+
+    // Handle --jobserver-fds which is an old undocumented variant of
+    // --jobserver-auth that only accepts a pair of file descriptor.
+    // This was replaced by --jobserver-auth=R,W in GNU Make 4.2.
+    if (GetPrefixedValue(arg, "--jobserver-fds=", &value)) {
+      if (!GetFileDescriptorPair(value, config)) {
+        *error = "Invalid file descriptor pair [" + value.AsString() + "]";
+        return false;
+      }
+      config->mode = Jobserver::Config::kModePipe;
+      continue;
+    }
+
+    // Ignore this argument. This assumes that MAKEFLAGS does not
+    // use spaces to separate the option from its argument, e.g.
+    // `--jobserver-auth <something>`, which has been confirmed with
+    // Make 4.3, even if it receives such a value in its own env.
+  }
+
+  return true;
+}
+
+bool Jobserver::ParseNativeMakeFlagsValue(const char* makeflags_env,
+                                          Jobserver::Config* config,
+                                          std::string* error) {
+  if (!ParseMakeFlagsValue(makeflags_env, config, error))
+    return false;
+
+  if (config->mode == Jobserver::Config::kModePipe) {
+    *error = "Pipe-based protocol is not supported!";
+    return false;
+  }
+#ifdef _WIN32
+  if (config->mode == Jobserver::Config::kModePosixFifo) {
+    *error = "FIFO mode is not supported on Windows!";
+    return false;
+  }
+#else   // !_WIN32
+  if (config->mode == Jobserver::Config::kModeWin32Semaphore) {
+    *error = "Semaphore mode is not supported on Posix!";
+    return false;
+  }
+#endif  // !_WIN32
+  return true;
+}
diff --git a/src/jobserver.h b/src/jobserver.h
new file mode 100644
index 0000000..723efdb
--- /dev/null
+++ b/src/jobserver.h
@@ -0,0 +1,218 @@
+// Copyright 2024 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <utility>
+
+/// Jobserver provides types related to managing a pool of "job slots"
+/// using the GNU Make jobserver ptocol described at:
+///
+/// https://www.gnu.org/software/make/manual/html_node/Job-Slots.html
+///
+struct Jobserver {
+  /// A Jobserver::Slot models a single job slot that can be acquired from.
+  /// or released to a jobserver pool. This class is move-only, and can
+  /// wrap three types of values:
+  ///
+  /// - An "invalid" value (the default), used to indicate errors, e.g.
+  ///   that no slot could be acquired from the pool.
+  ///
+  /// - The "implicit" value, used to model the job slot that is implicitly
+  ///   assigned to a jobserver client by the parent process that spawned
+  ///   it.
+  ///
+  /// - The "explicit" values, which correspond to an actual byte read from
+  ///   the slot pool's pipe (for Posix), or a semaphore decrement operation
+  ///   (for Windows).
+  ///
+  /// Use IsValid(), IsImplicit(), HasValue() to test for categories.
+  ///
+  /// TECHNICAL NOTE: This design complies with the requirements laid out
+  /// on https://www.gnu.org/software/make/manual/html_node/POSIX-Jobserver.html
+  /// which requires clients to write back the exact token values they
+  /// received from a Posix pipe.
+  ///
+  /// Note that *currently* all pool implementations write the same token
+  /// values to the pipe ('+' for GNU Make, and '|' for the Rust jobserver),
+  /// and do not care about the values written back by clients.
+  ///
+  struct Slot {
+    /// Default constructor creates invalid instance.
+    Slot() = default;
+
+    /// Move operations are allowed.
+    Slot(Slot&& o) noexcept : value_(o.value_) { o.value_ = -1; }
+
+    Slot& operator=(Slot&& o) noexcept {
+      if (this != &o) {
+        this->value_ = o.value_;
+        o.value_ = -1;
+      }
+      return *this;
+    }
+
+    /// Copy operations are disallowed.
+    Slot(const Slot&) = delete;
+    Slot& operator=(const Slot&) = delete;
+
+    /// Return true if this instance is valid, i.e. that it is either
+    /// implicit or explicit job slot.
+    bool IsValid() const { return value_ >= 0; }
+
+    /// Return true if this instance represents an implicit job slot.
+    bool IsImplicit() const { return value_ == kImplicitValue; }
+
+    /// Return true if this instance represents an explicit job slot
+    bool IsExplicit() const { return IsValid() && !IsImplicit(); }
+
+    /// Return value of an explicit slot. It is a runtime error to call
+    /// this from an invalid instance.
+    uint8_t GetExplicitValue() const;
+
+    /// Create instance for explicit byte value.
+    static Slot CreateExplicit(uint8_t value) {
+      return Slot(static_cast<int16_t>(value));
+    }
+
+    /// Create instance for the implicit value.
+    static Slot CreateImplicit() { return Slot(kImplicitValue); }
+
+   private:
+    Slot(int16_t value) : value_(value) {}
+
+    static constexpr int16_t kImplicitValue = 256;
+
+    int16_t value_ = -1;
+  };
+
+  /// A Jobserver::Config models how to access or implement a GNU jobserver
+  /// implementation.
+  struct Config {
+    /// Different implementation modes for the slot pool.
+    ///
+    /// kModeNone means there is no pool.
+    ///
+    /// kModePipe means that `--jobserver-auth=R,W` is used to
+    ///    pass a pair of file descriptors to client processes. This also
+    ///    matches `--jobserver-fds=R,W` which is an old undocumented
+    ///    variant of the same scheme. This mode is not supported by
+    ///    Ninja, but recognized by the parser.
+    ///
+    /// kModePosixFifo means that `--jobserver-auth=fifo:PATH` is used to
+    ///    pass the path of a Posix FIFO to client processes. This is not
+    ///    supported on Windows. Implemented by GNU Make 4.4 and above
+    ///    when `--jobserver-style=fifo` is used.
+    ///
+    /// kModeWin32Semaphore means that `--jobserver-auth=SEMAPHORE_NAME` is
+    ///    used to pass the name of a Win32 semaphore to client processes.
+    ///    This is not supported on Posix.
+    ///
+    /// kModeDefault is the default mode to enable on the current platform.
+    ///    This is an alias for kModeWin32Semaphore on Windows ,and
+    ///    kModePosixFifo on Posix.
+    enum Mode {
+      kModeNone = 0,
+      kModePipe,
+      kModePosixFifo,
+      kModeWin32Semaphore,
+#ifdef _WIN32
+      kModeDefault = kModeWin32Semaphore,
+#else   // _WIN32
+      kModeDefault = kModePosixFifo,
+#endif  // _WIN32
+    };
+
+    /// Implementation mode for the pool.
+    Mode mode = kModeNone;
+
+    /// For kModeFifo, this is the path to the Unix FIFO to use.
+    /// For kModeSemaphore, this is the name of the Win32 semaphore to use.
+    std::string path;
+
+    /// Return true if this instance matches an active implementation mode.
+    /// This does not try to validate configuration parameters though.
+    bool HasMode() { return mode != kModeNone; }
+  };
+
+  /// Parse the value of a MAKEFLAGS environment variable. On success return
+  /// true and set |*config|. On failure, return false and set |*error| to
+  /// explain what's wrong. If |makeflags_env| is nullptr or an empty string,
+  /// this returns success and sets |config->mode| to Config::kModeNone.
+  static bool ParseMakeFlagsValue(const char* makeflags_env, Config* config,
+                                  std::string* error);
+
+  /// A variant of ParseMakeFlagsValue() that will return an error if the parsed
+  /// result is not compatible with the native system. I.e.:
+  ///
+  ///   --jobserver-auth=R,W is not supported on any system (but recognized to
+  ///       provide a relevant error message to the user).
+  ///
+  ///   --jobserver-auth=NAME onlw works on Windows.
+  ///
+  ///   --jobserver-auth=fifo:PATH only works on Posix.
+  ///
+  static bool ParseNativeMakeFlagsValue(const char* makeflags_env,
+                                        Config* config, std::string* error);
+
+  /// A Jobserver::Client instance models a client of an external GNU jobserver
+  /// pool, which can be implemented as a Unix FIFO, or a Windows named
+  /// semaphore. Usage is the following:
+  ///
+  ///  - Call Jobserver::Client::Create(), passing a Config value as argument,
+  ///    (e.g. one initialized with ParseNativeMakeFlagsValue()) to create
+  ///    a new instance.
+  ///
+  ///  - Call TryAcquire() to try to acquire a job slot from the pool.
+  ///    If the result is not an invalid slot, store it until the
+  ///    corresponding command completes, then call Release() to send it
+  ///    back to the pool.
+  ///
+  ///  - It is important that all acquired slots are released to the pool,
+  ///    even if Ninja terminates early (e.g. due to a build command failing).
+  ///
+  class Client {
+   public:
+    virtual ~Client() {}
+
+    /// Try to acquire a slot from the pool. On failure, i.e. if no slot
+    /// can be acquired, this returns an invalid Token instance.
+    ///
+    /// Note that this will always return the implicit slot value the first
+    /// time this is called, without reading anything from the pool, as
+    /// specified by the protocol. This implicit value *must* be released
+    /// just like any other one. In general, users of this class should not
+    /// care about this detail, except unit-tests.
+    virtual Slot TryAcquire() { return Slot(); }
+
+    /// Release a slot to the pool. Does nothing if slot is invalid,
+    /// or if writing to the pool fails (and if this is not the implicit slot).
+    /// If the pool is destroyed before Ninja, then only the implicit slot
+    /// can be acquired in the next calls (if it was released). This simply
+    /// enforces serialization of all commands, instead of blocking.
+    virtual void Release(Slot slot) {}
+
+    /// Create a new Client instance from a given configuration. On failure,
+    /// this returns null after setting |*error|. Note that it is an error to
+    /// call this function with |config.HasMode() == false|.
+    static std::unique_ptr<Client> Create(const Config&, std::string* error);
+
+   protected:
+    Client() = default;
+  };
+};
diff --git a/src/jobserver_test.cc b/src/jobserver_test.cc
new file mode 100644
index 0000000..7941340
--- /dev/null
+++ b/src/jobserver_test.cc
@@ -0,0 +1,401 @@
+// Copyright 2024 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "jobserver.h"
+
+#include "test.h"
+
+#ifndef _WIN32
+#include <fcntl.h>
+#include <unistd.h>
+#endif
+
+namespace {
+
+#ifndef _WIN32
+struct ScopedTestFd {
+  explicit ScopedTestFd(int fd) : fd_(fd) {}
+
+  ~ScopedTestFd() {
+    if (IsValid())
+      ::close(fd_);
+  }
+
+  bool IsValid() const { return fd_ >= 0; }
+
+  int fd_ = -1;
+};
+#endif  // !_WIN32
+
+}  // namespace
+
+TEST(Jobserver, SlotTest) {
+  // Default construction.
+  Jobserver::Slot slot;
+  EXPECT_FALSE(slot.IsValid());
+
+  // Construct implicit slot
+  Jobserver::Slot slot0 = Jobserver::Slot::CreateImplicit();
+  EXPECT_TRUE(slot0.IsValid());
+  EXPECT_TRUE(slot0.IsImplicit());
+  EXPECT_FALSE(slot0.IsExplicit());
+
+  // Construct explicit slots
+  auto slot1 = Jobserver::Slot::CreateExplicit(10u);
+  EXPECT_TRUE(slot1.IsValid());
+  EXPECT_FALSE(slot1.IsImplicit());
+  EXPECT_TRUE(slot1.IsExplicit());
+  EXPECT_EQ(10u, slot1.GetExplicitValue());
+
+  auto slot2 = Jobserver::Slot::CreateExplicit(42u);
+  EXPECT_TRUE(slot2.IsValid());
+  EXPECT_FALSE(slot2.IsImplicit());
+  EXPECT_TRUE(slot2.IsExplicit());
+  EXPECT_EQ(42u, slot2.GetExplicitValue());
+
+  // Move operation.
+  slot2 = std::move(slot1);
+  EXPECT_FALSE(slot1.IsValid());
+  EXPECT_TRUE(slot2.IsValid());
+  EXPECT_TRUE(slot2.IsExplicit());
+  ASSERT_EQ(10u, slot2.GetExplicitValue());
+
+  slot1 = std::move(slot0);
+  EXPECT_FALSE(slot0.IsValid());
+  EXPECT_TRUE(slot1.IsValid());
+  EXPECT_TRUE(slot1.IsImplicit());
+  EXPECT_FALSE(slot1.IsExplicit());
+}
+
+TEST(Jobserver, ParseMakeFlagsValue) {
+  Jobserver::Config config;
+  std::string error;
+
+  // Passing nullptr does not crash.
+  config = {};
+  error.clear();
+  ASSERT_TRUE(Jobserver::ParseMakeFlagsValue(nullptr, &config, &error));
+  EXPECT_EQ(Jobserver::Config::kModeNone, config.mode);
+
+  // Passing an empty string does not crash.
+  config = {};
+  error.clear();
+  ASSERT_TRUE(Jobserver::ParseMakeFlagsValue("", &config, &error));
+  EXPECT_EQ(Jobserver::Config::kModeNone, config.mode);
+
+  // Passing a string that only contains whitespace does not crash.
+  config = {};
+  error.clear();
+  ASSERT_TRUE(Jobserver::ParseMakeFlagsValue("  \t", &config, &error));
+  EXPECT_EQ(Jobserver::Config::kModeNone, config.mode);
+
+  // Passing an `n` in the first word reports no mode.
+  config = {};
+  error.clear();
+  ASSERT_TRUE(Jobserver::ParseMakeFlagsValue("kns --jobserver-auth=fifo:foo",
+                                             &config, &error));
+  EXPECT_EQ(Jobserver::Config::kModeNone, config.mode);
+
+  // Passing "--jobserver-auth=fifo:<path>" works.
+  config = {};
+  error.clear();
+  ASSERT_TRUE(Jobserver::ParseMakeFlagsValue("--jobserver-auth=fifo:foo",
+                                             &config, &error));
+  EXPECT_EQ(Jobserver::Config::kModePosixFifo, config.mode);
+  EXPECT_EQ("foo", config.path);
+
+  // Passing an initial " -j" or " -j<count>" works.
+  config = {};
+  error.clear();
+  ASSERT_TRUE(Jobserver::ParseMakeFlagsValue(" -j --jobserver-auth=fifo:foo",
+                                             &config, &error));
+  EXPECT_EQ(Jobserver::Config::kModePosixFifo, config.mode);
+  EXPECT_EQ("foo", config.path);
+
+  // Passing an initial " -j<count>" works.
+  config = {};
+  error.clear();
+  ASSERT_TRUE(Jobserver::ParseMakeFlagsValue(" -j10 --jobserver-auth=fifo:foo",
+                                             &config, &error));
+  EXPECT_EQ(Jobserver::Config::kModePosixFifo, config.mode);
+  EXPECT_EQ("foo", config.path);
+
+  // Passing an `n` in the first word _after_ a dash works though, i.e.
+  // It is not interpreted as GNU Make dry-run flag.
+  config = {};
+  error.clear();
+  ASSERT_TRUE(Jobserver::ParseMakeFlagsValue(
+      "-one-flag --jobserver-auth=fifo:foo", &config, &error));
+  EXPECT_EQ(Jobserver::Config::kModePosixFifo, config.mode);
+
+  config = {};
+  error.clear();
+  ASSERT_TRUE(Jobserver::ParseMakeFlagsValue("--jobserver-auth=semaphore_name",
+                                             &config, &error));
+  EXPECT_EQ(Jobserver::Config::kModeWin32Semaphore, config.mode);
+  EXPECT_EQ("semaphore_name", config.path);
+
+  config = {};
+  error.clear();
+  ASSERT_TRUE(Jobserver::ParseMakeFlagsValue("--jobserver-auth=10,42", &config,
+                                             &error));
+  EXPECT_EQ(Jobserver::Config::kModePipe, config.mode);
+
+  config = {};
+  error.clear();
+  ASSERT_TRUE(Jobserver::ParseMakeFlagsValue("--jobserver-auth=-1,42", &config,
+                                             &error));
+  EXPECT_EQ(Jobserver::Config::kModeNone, config.mode);
+
+  config = {};
+  error.clear();
+  ASSERT_TRUE(Jobserver::ParseMakeFlagsValue("--jobserver-auth=10,-42", &config,
+                                             &error));
+  EXPECT_EQ(Jobserver::Config::kModeNone, config.mode);
+
+  config = {};
+  error.clear();
+  ASSERT_TRUE(Jobserver::ParseMakeFlagsValue(
+      "--jobserver-auth=10,42 --jobserver-fds=12,44 "
+      "--jobserver-auth=fifo:/tmp/fifo",
+      &config, &error));
+  EXPECT_EQ(Jobserver::Config::kModePosixFifo, config.mode);
+  EXPECT_EQ("/tmp/fifo", config.path);
+
+  config = {};
+  error.clear();
+  ASSERT_FALSE(
+      Jobserver::ParseMakeFlagsValue("--jobserver-fds=10,", &config, &error));
+  EXPECT_EQ("Invalid file descriptor pair [10,]", error);
+}
+
+TEST(Jobserver, ParseNativeMakeFlagsValue) {
+  Jobserver::Config config;
+  std::string error;
+
+  // --jobserver-auth=R,W is not supported.
+  config = {};
+  error.clear();
+  EXPECT_FALSE(Jobserver::ParseNativeMakeFlagsValue("--jobserver-auth=3,4",
+                                                    &config, &error));
+  EXPECT_EQ(error, "Pipe-based protocol is not supported!");
+
+#ifdef _WIN32
+  // --jobserver-auth=NAME works on Windows.
+  config = {};
+  error.clear();
+  ASSERT_TRUE(Jobserver::ParseNativeMakeFlagsValue(
+      "--jobserver-auth=semaphore_name", &config, &error));
+  EXPECT_EQ(Jobserver::Config::kModeWin32Semaphore, config.mode);
+  EXPECT_EQ("semaphore_name", config.path);
+
+  // --jobserver-auth=fifo:PATH does not work on Windows.
+  config = {};
+  error.clear();
+  ASSERT_FALSE(Jobserver::ParseNativeMakeFlagsValue("--jobserver-auth=fifo:foo",
+                                                    &config, &error));
+  EXPECT_EQ(error, "FIFO mode is not supported on Windows!");
+#else   // !_WIN32
+  // --jobserver-auth=NAME does not work on Posix
+  config = {};
+  error.clear();
+  ASSERT_FALSE(Jobserver::ParseNativeMakeFlagsValue(
+      "--jobserver-auth=semaphore_name", &config, &error));
+  EXPECT_EQ(error, "Semaphore mode is not supported on Posix!");
+
+  // --jobserver-auth=fifo:PATH works on Posix
+  config = {};
+  error.clear();
+  ASSERT_TRUE(Jobserver::ParseNativeMakeFlagsValue("--jobserver-auth=fifo:foo",
+                                                   &config, &error));
+  EXPECT_EQ(Jobserver::Config::kModePosixFifo, config.mode);
+  EXPECT_EQ("foo", config.path);
+#endif  // !_WIN32
+}
+
+TEST(Jobserver, NullJobserver) {
+  Jobserver::Config config;
+  ASSERT_EQ(Jobserver::Config::kModeNone, config.mode);
+
+  std::string error;
+  std::unique_ptr<Jobserver::Client> client =
+      Jobserver::Client::Create(config, &error);
+  EXPECT_FALSE(client.get());
+  EXPECT_EQ("Unsupported jobserver mode", error);
+}
+
+#ifdef _WIN32
+
+#include <windows.h>
+
+// Scoped HANDLE class for the semaphore.
+struct ScopedSemaphoreHandle {
+  ScopedSemaphoreHandle(HANDLE handle) : handle_(handle) {}
+  ~ScopedSemaphoreHandle() {
+    if (handle_)
+      ::CloseHandle(handle_);
+  }
+  HANDLE get() const { return handle_; }
+
+ private:
+  HANDLE handle_ = NULL;
+};
+
+TEST(Jobserver, Win32SemaphoreClient) {
+  // Create semaphore with initial token count.
+  const size_t kExplicitCount = 10;
+  const char kSemaphoreName[] = "ninja_test_jobserver_semaphore";
+  ScopedSemaphoreHandle handle(
+      ::CreateSemaphoreA(NULL, static_cast<DWORD>(kExplicitCount),
+                         static_cast<DWORD>(kExplicitCount), kSemaphoreName));
+  ASSERT_TRUE(handle.get()) << GetLastErrorString();
+
+  // Create new client instance.
+  Jobserver::Config config;
+  config.mode = Jobserver::Config::kModeWin32Semaphore;
+  config.path = kSemaphoreName;
+
+  std::string error;
+  std::unique_ptr<Jobserver::Client> client =
+      Jobserver::Client::Create(config, &error);
+  EXPECT_TRUE(client.get()) << error;
+  EXPECT_TRUE(error.empty()) << error;
+
+  Jobserver::Slot slot;
+  std::vector<Jobserver::Slot> slots;
+
+  // Read the implicit slot.
+  slot = client->TryAcquire();
+  EXPECT_TRUE(slot.IsValid());
+  EXPECT_TRUE(slot.IsImplicit());
+  slots.push_back(std::move(slot));
+
+  // Read the explicit slots.
+  for (size_t n = 0; n < kExplicitCount; ++n) {
+    slot = client->TryAcquire();
+    EXPECT_TRUE(slot.IsValid());
+    EXPECT_TRUE(slot.IsExplicit());
+    slots.push_back(std::move(slot));
+  }
+
+  // Pool should be empty now.
+  slot = client->TryAcquire();
+  EXPECT_FALSE(slot.IsValid());
+
+  // Release the slots again.
+  while (!slots.empty()) {
+    client->Release(std::move(slots.back()));
+    slots.pop_back();
+  }
+
+  slot = client->TryAcquire();
+  EXPECT_TRUE(slot.IsValid());
+  EXPECT_TRUE(slot.IsImplicit());
+  slots.push_back(std::move(slot));
+
+  for (size_t n = 0; n < kExplicitCount; ++n) {
+    slot = client->TryAcquire();
+    EXPECT_TRUE(slot.IsValid());
+    EXPECT_TRUE(slot.IsExplicit()) << n;
+    slots.push_back(std::move(slot));
+  }
+
+  // And the pool should be empty again.
+  slot = client->TryAcquire();
+  EXPECT_FALSE(slot.IsValid());
+}
+#else   // !_WIN32
+TEST(Jobserver, PosixFifoClient) {
+  ScopedTempDir temp_dir;
+  temp_dir.CreateAndEnter("ninja_test_jobserver_fifo");
+
+  // Create the Fifo, then write kSlotCount slots into it.
+  std::string fifo_path = temp_dir.temp_dir_name_ + "fifo";
+  int ret = mknod(fifo_path.c_str(), S_IFIFO | 0666, 0);
+  ASSERT_EQ(0, ret) << "Could not create FIFO at: " << fifo_path;
+
+  const size_t kSlotCount = 5;
+
+  ScopedTestFd write_fd(::open(fifo_path.c_str(), O_RDWR));
+  ASSERT_TRUE(write_fd.IsValid()) << "Cannot open FIFO at: " << strerror(errno);
+  for (size_t n = 0; n < kSlotCount; ++n) {
+    uint8_t slot_byte = static_cast<uint8_t>('0' + n);
+    ::write(write_fd.fd_, &slot_byte, 1);
+  }
+  // Keep the file descriptor opened to ensure the fifo's content
+  // persists in kernel memory.
+
+  // Create new client instance.
+  Jobserver::Config config;
+  config.mode = Jobserver::Config::kModePosixFifo;
+  config.path = fifo_path;
+
+  std::string error;
+  std::unique_ptr<Jobserver::Client> client =
+      Jobserver::Client::Create(config, &error);
+  EXPECT_TRUE(client.get());
+  EXPECT_TRUE(error.empty()) << error;
+
+  // Read slots from the pool, and store them
+  std::vector<Jobserver::Slot> slots;
+
+  // First slot is always implicit.
+  slots.push_back(client->TryAcquire());
+  ASSERT_TRUE(slots.back().IsValid());
+  EXPECT_TRUE(slots.back().IsImplicit());
+
+  // Then read kSlotCount slots from the pipe and verify their value.
+  for (size_t n = 0; n < kSlotCount; ++n) {
+    Jobserver::Slot slot = client->TryAcquire();
+    ASSERT_TRUE(slot.IsValid()) << "Slot #" << n + 1;
+    EXPECT_EQ(static_cast<uint8_t>('0' + n), slot.GetExplicitValue());
+    slots.push_back(std::move(slot));
+  }
+
+  // Pool should be empty now, so next TryAcquire() will fail.
+  Jobserver::Slot slot = client->TryAcquire();
+  EXPECT_FALSE(slot.IsValid());
+}
+
+TEST(Jobserver, PosixFifoClientWithWrongPath) {
+  ScopedTempDir temp_dir;
+  temp_dir.CreateAndEnter("ninja_test_jobserver_fifo");
+
+  // Create a regular file.
+  std::string file_path = temp_dir.temp_dir_name_ + "not_a_fifo";
+  int fd = ::open(file_path.c_str(), O_CREAT | O_RDWR, 0660);
+  ASSERT_GE(fd, 0) << "Could not create file: " << strerror(errno);
+  ::close(fd);
+
+  // Create new client instance, passing the file path for the fifo.
+  Jobserver::Config config;
+  config.mode = Jobserver::Config::kModePosixFifo;
+  config.path = file_path;
+
+  std::string error;
+  std::unique_ptr<Jobserver::Client> client =
+      Jobserver::Client::Create(config, &error);
+  EXPECT_FALSE(client.get());
+  EXPECT_FALSE(error.empty());
+  EXPECT_EQ("Not a fifo path: " + file_path, error);
+
+  // Do the same with an empty file path.
+  error.clear();
+  config.path.clear();
+  client = Jobserver::Client::Create(config, &error);
+  EXPECT_FALSE(client.get());
+  EXPECT_FALSE(error.empty());
+  EXPECT_EQ("Empty fifo path", error);
+}
+#endif  // !_WIN32
diff --git a/src/json.h b/src/json.h
index f39c759..3e5cf78 100644
--- a/src/json.h
+++ b/src/json.h
@@ -17,7 +17,7 @@
 
 #include <string>
 
-// Encode a string in JSON format without encolsing quotes
+// Encode a string in JSON format without enclosing quotes
 std::string EncodeJSONString(const std::string& in);
 
 // Print a string in JSON format to stdout without enclosing quotes
diff --git a/src/lexer.cc b/src/lexer.cc
index e5729f0..8edcf7b 100644
--- a/src/lexer.cc
+++ b/src/lexer.cc
@@ -164,289 +164,289 @@
 	};
 	yych = *p;
 	if (yybm[0+yych] & 32) {
-		goto yy9;
+		goto yy6;
 	}
 	if (yych <= '^') {
 		if (yych <= ',') {
 			if (yych <= '\f') {
-				if (yych <= 0x00) goto yy2;
-				if (yych == '\n') goto yy6;
-				goto yy4;
+				if (yych <= 0x00) goto yy1;
+				if (yych == '\n') goto yy4;
+				goto yy2;
 			} else {
-				if (yych <= '\r') goto yy8;
-				if (yych == '#') goto yy12;
-				goto yy4;
+				if (yych <= '\r') goto yy5;
+				if (yych == '#') goto yy8;
+				goto yy2;
 			}
 		} else {
 			if (yych <= ':') {
-				if (yych == '/') goto yy4;
-				if (yych <= '9') goto yy13;
-				goto yy16;
+				if (yych == '/') goto yy2;
+				if (yych <= '9') goto yy9;
+				goto yy11;
 			} else {
 				if (yych <= '=') {
-					if (yych <= '<') goto yy4;
-					goto yy18;
+					if (yych <= '<') goto yy2;
+					goto yy12;
 				} else {
-					if (yych <= '@') goto yy4;
-					if (yych <= 'Z') goto yy13;
-					goto yy4;
+					if (yych <= '@') goto yy2;
+					if (yych <= 'Z') goto yy9;
+					goto yy2;
 				}
 			}
 		}
 	} else {
 		if (yych <= 'i') {
 			if (yych <= 'b') {
-				if (yych == '`') goto yy4;
-				if (yych <= 'a') goto yy13;
-				goto yy20;
+				if (yych == '`') goto yy2;
+				if (yych <= 'a') goto yy9;
+				goto yy13;
 			} else {
-				if (yych == 'd') goto yy21;
-				if (yych <= 'h') goto yy13;
-				goto yy22;
+				if (yych == 'd') goto yy14;
+				if (yych <= 'h') goto yy9;
+				goto yy15;
 			}
 		} else {
 			if (yych <= 'r') {
-				if (yych == 'p') goto yy23;
-				if (yych <= 'q') goto yy13;
-				goto yy24;
+				if (yych == 'p') goto yy16;
+				if (yych <= 'q') goto yy9;
+				goto yy17;
 			} else {
 				if (yych <= 'z') {
-					if (yych <= 's') goto yy25;
-					goto yy13;
+					if (yych <= 's') goto yy18;
+					goto yy9;
 				} else {
-					if (yych == '|') goto yy26;
-					goto yy4;
+					if (yych == '|') goto yy19;
+					goto yy2;
 				}
 			}
 		}
 	}
-yy2:
+yy1:
 	++p;
 	{ token = TEOF;     break; }
+yy2:
+	++p;
+yy3:
+	{ token = ERROR;    break; }
 yy4:
 	++p;
-yy5:
-	{ token = ERROR;    break; }
-yy6:
-	++p;
 	{ token = NEWLINE;  break; }
-yy8:
+yy5:
 	yych = *++p;
-	if (yych == '\n') goto yy28;
-	goto yy5;
-yy9:
+	if (yych == '\n') goto yy20;
+	goto yy3;
+yy6:
 	yyaccept = 0;
 	yych = *(q = ++p);
 	if (yybm[0+yych] & 32) {
-		goto yy9;
+		goto yy6;
 	}
 	if (yych <= '\f') {
-		if (yych == '\n') goto yy6;
+		if (yych == '\n') goto yy4;
 	} else {
-		if (yych <= '\r') goto yy30;
-		if (yych == '#') goto yy32;
+		if (yych <= '\r') goto yy21;
+		if (yych == '#') goto yy23;
 	}
-yy11:
+yy7:
 	{ token = INDENT;   break; }
-yy12:
+yy8:
 	yyaccept = 1;
 	yych = *(q = ++p);
-	if (yych <= 0x00) goto yy5;
-	goto yy33;
-yy13:
+	if (yych <= 0x00) goto yy3;
+	goto yy24;
+yy9:
 	yych = *++p;
-yy14:
+yy10:
 	if (yybm[0+yych] & 64) {
-		goto yy13;
+		goto yy9;
 	}
 	{ token = IDENT;    break; }
-yy16:
+yy11:
 	++p;
 	{ token = COLON;    break; }
-yy18:
+yy12:
 	++p;
 	{ token = EQUALS;   break; }
-yy20:
+yy13:
 	yych = *++p;
-	if (yych == 'u') goto yy36;
-	goto yy14;
-yy21:
+	if (yych == 'u') goto yy25;
+	goto yy10;
+yy14:
 	yych = *++p;
-	if (yych == 'e') goto yy37;
-	goto yy14;
-yy22:
+	if (yych == 'e') goto yy26;
+	goto yy10;
+yy15:
 	yych = *++p;
-	if (yych == 'n') goto yy38;
-	goto yy14;
-yy23:
+	if (yych == 'n') goto yy27;
+	goto yy10;
+yy16:
 	yych = *++p;
-	if (yych == 'o') goto yy39;
-	goto yy14;
-yy24:
+	if (yych == 'o') goto yy28;
+	goto yy10;
+yy17:
 	yych = *++p;
-	if (yych == 'u') goto yy40;
-	goto yy14;
-yy25:
+	if (yych == 'u') goto yy29;
+	goto yy10;
+yy18:
 	yych = *++p;
-	if (yych == 'u') goto yy41;
-	goto yy14;
-yy26:
+	if (yych == 'u') goto yy30;
+	goto yy10;
+yy19:
 	yych = *++p;
-	if (yych == '@') goto yy42;
-	if (yych == '|') goto yy44;
+	if (yych == '@') goto yy31;
+	if (yych == '|') goto yy32;
 	{ token = PIPE;     break; }
-yy28:
+yy20:
 	++p;
 	{ token = NEWLINE;  break; }
-yy30:
+yy21:
 	yych = *++p;
-	if (yych == '\n') goto yy28;
-yy31:
+	if (yych == '\n') goto yy20;
+yy22:
 	p = q;
 	if (yyaccept == 0) {
-		goto yy11;
+		goto yy7;
 	} else {
-		goto yy5;
+		goto yy3;
 	}
-yy32:
+yy23:
 	yych = *++p;
-yy33:
+yy24:
 	if (yybm[0+yych] & 128) {
-		goto yy32;
+		goto yy23;
 	}
-	if (yych <= 0x00) goto yy31;
+	if (yych <= 0x00) goto yy22;
 	++p;
 	{ continue; }
-yy36:
+yy25:
 	yych = *++p;
-	if (yych == 'i') goto yy46;
-	goto yy14;
-yy37:
+	if (yych == 'i') goto yy33;
+	goto yy10;
+yy26:
 	yych = *++p;
-	if (yych == 'f') goto yy47;
-	goto yy14;
-yy38:
+	if (yych == 'f') goto yy34;
+	goto yy10;
+yy27:
 	yych = *++p;
-	if (yych == 'c') goto yy48;
-	goto yy14;
-yy39:
+	if (yych == 'c') goto yy35;
+	goto yy10;
+yy28:
 	yych = *++p;
-	if (yych == 'o') goto yy49;
-	goto yy14;
-yy40:
+	if (yych == 'o') goto yy36;
+	goto yy10;
+yy29:
 	yych = *++p;
-	if (yych == 'l') goto yy50;
-	goto yy14;
-yy41:
+	if (yych == 'l') goto yy37;
+	goto yy10;
+yy30:
 	yych = *++p;
-	if (yych == 'b') goto yy51;
-	goto yy14;
-yy42:
+	if (yych == 'b') goto yy38;
+	goto yy10;
+yy31:
 	++p;
 	{ token = PIPEAT;   break; }
-yy44:
+yy32:
 	++p;
 	{ token = PIPE2;    break; }
-yy46:
+yy33:
 	yych = *++p;
-	if (yych == 'l') goto yy52;
-	goto yy14;
-yy47:
+	if (yych == 'l') goto yy39;
+	goto yy10;
+yy34:
 	yych = *++p;
-	if (yych == 'a') goto yy53;
-	goto yy14;
-yy48:
+	if (yych == 'a') goto yy40;
+	goto yy10;
+yy35:
 	yych = *++p;
-	if (yych == 'l') goto yy54;
-	goto yy14;
-yy49:
+	if (yych == 'l') goto yy41;
+	goto yy10;
+yy36:
 	yych = *++p;
-	if (yych == 'l') goto yy55;
-	goto yy14;
-yy50:
+	if (yych == 'l') goto yy42;
+	goto yy10;
+yy37:
 	yych = *++p;
-	if (yych == 'e') goto yy57;
-	goto yy14;
-yy51:
+	if (yych == 'e') goto yy43;
+	goto yy10;
+yy38:
 	yych = *++p;
-	if (yych == 'n') goto yy59;
-	goto yy14;
-yy52:
+	if (yych == 'n') goto yy44;
+	goto yy10;
+yy39:
 	yych = *++p;
-	if (yych == 'd') goto yy60;
-	goto yy14;
-yy53:
+	if (yych == 'd') goto yy45;
+	goto yy10;
+yy40:
 	yych = *++p;
-	if (yych == 'u') goto yy62;
-	goto yy14;
-yy54:
+	if (yych == 'u') goto yy46;
+	goto yy10;
+yy41:
 	yych = *++p;
-	if (yych == 'u') goto yy63;
-	goto yy14;
-yy55:
+	if (yych == 'u') goto yy47;
+	goto yy10;
+yy42:
 	yych = *++p;
 	if (yybm[0+yych] & 64) {
-		goto yy13;
+		goto yy9;
 	}
 	{ token = POOL;     break; }
-yy57:
+yy43:
 	yych = *++p;
 	if (yybm[0+yych] & 64) {
-		goto yy13;
+		goto yy9;
 	}
 	{ token = RULE;     break; }
-yy59:
+yy44:
 	yych = *++p;
-	if (yych == 'i') goto yy64;
-	goto yy14;
-yy60:
+	if (yych == 'i') goto yy48;
+	goto yy10;
+yy45:
 	yych = *++p;
 	if (yybm[0+yych] & 64) {
-		goto yy13;
+		goto yy9;
 	}
 	{ token = BUILD;    break; }
-yy62:
+yy46:
 	yych = *++p;
-	if (yych == 'l') goto yy65;
-	goto yy14;
-yy63:
+	if (yych == 'l') goto yy49;
+	goto yy10;
+yy47:
 	yych = *++p;
-	if (yych == 'd') goto yy66;
-	goto yy14;
-yy64:
+	if (yych == 'd') goto yy50;
+	goto yy10;
+yy48:
 	yych = *++p;
-	if (yych == 'n') goto yy67;
-	goto yy14;
-yy65:
+	if (yych == 'n') goto yy51;
+	goto yy10;
+yy49:
 	yych = *++p;
-	if (yych == 't') goto yy68;
-	goto yy14;
-yy66:
+	if (yych == 't') goto yy52;
+	goto yy10;
+yy50:
 	yych = *++p;
-	if (yych == 'e') goto yy70;
-	goto yy14;
-yy67:
+	if (yych == 'e') goto yy53;
+	goto yy10;
+yy51:
 	yych = *++p;
-	if (yych == 'j') goto yy72;
-	goto yy14;
-yy68:
+	if (yych == 'j') goto yy54;
+	goto yy10;
+yy52:
 	yych = *++p;
 	if (yybm[0+yych] & 64) {
-		goto yy13;
+		goto yy9;
 	}
 	{ token = DEFAULT;  break; }
-yy70:
+yy53:
 	yych = *++p;
 	if (yybm[0+yych] & 64) {
-		goto yy13;
+		goto yy9;
 	}
 	{ token = INCLUDE;  break; }
-yy72:
+yy54:
 	yych = *++p;
-	if (yych != 'a') goto yy14;
+	if (yych != 'a') goto yy10;
 	yych = *++p;
 	if (yybm[0+yych] & 64) {
-		goto yy13;
+		goto yy9;
 	}
 	{ token = SUBNINJA; break; }
 }
@@ -512,38 +512,38 @@
 	};
 	yych = *p;
 	if (yybm[0+yych] & 128) {
-		goto yy81;
+		goto yy59;
 	}
-	if (yych <= 0x00) goto yy77;
-	if (yych == '$') goto yy84;
-	goto yy79;
-yy77:
+	if (yych <= 0x00) goto yy56;
+	if (yych == '$') goto yy60;
+	goto yy57;
+yy56:
 	++p;
 	{ break; }
-yy79:
+yy57:
 	++p;
-yy80:
+yy58:
 	{ break; }
-yy81:
+yy59:
 	yych = *++p;
 	if (yybm[0+yych] & 128) {
-		goto yy81;
+		goto yy59;
 	}
 	{ continue; }
-yy84:
+yy60:
 	yych = *(q = ++p);
-	if (yych == '\n') goto yy85;
-	if (yych == '\r') goto yy87;
-	goto yy80;
-yy85:
+	if (yych == '\n') goto yy61;
+	if (yych == '\r') goto yy62;
+	goto yy58;
+yy61:
 	++p;
 	{ continue; }
-yy87:
+yy62:
 	yych = *++p;
-	if (yych == '\n') goto yy89;
+	if (yych == '\n') goto yy63;
 	p = q;
-	goto yy80;
-yy89:
+	goto yy58;
+yy63:
 	++p;
 	{ continue; }
 }
@@ -595,17 +595,17 @@
 	};
 	yych = *p;
 	if (yybm[0+yych] & 128) {
-		goto yy95;
+		goto yy65;
 	}
 	++p;
 	{
       last_token_ = start;
       return false;
     }
-yy95:
+yy65:
 	yych = *++p;
 	if (yybm[0+yych] & 128) {
-		goto yy95;
+		goto yy65;
 	}
 	{
       out->assign(start, p - start);
@@ -665,33 +665,33 @@
 	};
 	yych = *p;
 	if (yybm[0+yych] & 16) {
-		goto yy102;
+		goto yy68;
 	}
 	if (yych <= '\r') {
-		if (yych <= 0x00) goto yy100;
-		if (yych <= '\n') goto yy105;
-		goto yy107;
+		if (yych <= 0x00) goto yy67;
+		if (yych <= '\n') goto yy69;
+		goto yy70;
 	} else {
-		if (yych <= ' ') goto yy105;
-		if (yych <= '$') goto yy109;
-		goto yy105;
+		if (yych <= ' ') goto yy69;
+		if (yych <= '$') goto yy71;
+		goto yy69;
 	}
-yy100:
+yy67:
 	++p;
 	{
       last_token_ = start;
       return Error("unexpected EOF", err);
     }
-yy102:
+yy68:
 	yych = *++p;
 	if (yybm[0+yych] & 16) {
-		goto yy102;
+		goto yy68;
 	}
 	{
       eval->AddText(StringPiece(start, p - start));
       continue;
     }
-yy105:
+yy69:
 	++p;
 	{
       if (path) {
@@ -704,112 +704,112 @@
         continue;
       }
     }
-yy107:
+yy70:
 	yych = *++p;
-	if (yych == '\n') goto yy110;
+	if (yych == '\n') goto yy72;
 	{
       last_token_ = start;
       return Error(DescribeLastError(), err);
     }
-yy109:
+yy71:
 	yych = *++p;
 	if (yybm[0+yych] & 64) {
-		goto yy122;
+		goto yy79;
 	}
 	if (yych <= ' ') {
 		if (yych <= '\f') {
-			if (yych == '\n') goto yy114;
-			goto yy112;
+			if (yych == '\n') goto yy75;
+			goto yy73;
 		} else {
-			if (yych <= '\r') goto yy117;
-			if (yych <= 0x1F) goto yy112;
-			goto yy118;
+			if (yych <= '\r') goto yy76;
+			if (yych <= 0x1F) goto yy73;
+			goto yy77;
 		}
 	} else {
 		if (yych <= '/') {
-			if (yych == '$') goto yy120;
-			goto yy112;
+			if (yych == '$') goto yy78;
+			goto yy73;
 		} else {
-			if (yych <= ':') goto yy125;
-			if (yych <= '`') goto yy112;
-			if (yych <= '{') goto yy127;
-			goto yy112;
+			if (yych <= ':') goto yy80;
+			if (yych <= '`') goto yy73;
+			if (yych <= '{') goto yy81;
+			goto yy73;
 		}
 	}
-yy110:
+yy72:
 	++p;
 	{
       if (path)
         p = start;
       break;
     }
-yy112:
+yy73:
 	++p;
-yy113:
+yy74:
 	{
       last_token_ = start;
       return Error("bad $-escape (literal $ must be written as $$)", err);
     }
-yy114:
+yy75:
 	yych = *++p;
 	if (yybm[0+yych] & 32) {
-		goto yy114;
+		goto yy75;
 	}
 	{
       continue;
     }
-yy117:
+yy76:
 	yych = *++p;
-	if (yych == '\n') goto yy128;
-	goto yy113;
-yy118:
+	if (yych == '\n') goto yy82;
+	goto yy74;
+yy77:
 	++p;
 	{
       eval->AddText(StringPiece(" ", 1));
       continue;
     }
-yy120:
+yy78:
 	++p;
 	{
       eval->AddText(StringPiece("$", 1));
       continue;
     }
-yy122:
+yy79:
 	yych = *++p;
 	if (yybm[0+yych] & 64) {
-		goto yy122;
+		goto yy79;
 	}
 	{
       eval->AddSpecial(StringPiece(start + 1, p - start - 1));
       continue;
     }
-yy125:
+yy80:
 	++p;
 	{
       eval->AddText(StringPiece(":", 1));
       continue;
     }
-yy127:
+yy81:
 	yych = *(q = ++p);
 	if (yybm[0+yych] & 128) {
-		goto yy131;
+		goto yy83;
 	}
-	goto yy113;
-yy128:
+	goto yy74;
+yy82:
 	yych = *++p;
-	if (yych == ' ') goto yy128;
+	if (yych == ' ') goto yy82;
 	{
       continue;
     }
-yy131:
+yy83:
 	yych = *++p;
 	if (yybm[0+yych] & 128) {
-		goto yy131;
+		goto yy83;
 	}
-	if (yych == '}') goto yy134;
+	if (yych == '}') goto yy84;
 	p = q;
-	goto yy113;
-yy134:
+	goto yy74;
+yy84:
 	++p;
 	{
       eval->AddSpecial(StringPiece(start + 2, p - start - 3));
diff --git a/src/line_printer.cc b/src/line_printer.cc
index 12e82b3..4a7b0bb 100644
--- a/src/line_printer.cc
+++ b/src/line_printer.cc
@@ -28,6 +28,7 @@
 #include <sys/time.h>
 #endif
 
+#include "elide_middle.h"
 #include "util.h"
 
 using namespace std;
@@ -81,7 +82,7 @@
     CONSOLE_SCREEN_BUFFER_INFO csbi;
     GetConsoleScreenBufferInfo(console_, &csbi);
 
-    to_print = ElideMiddle(to_print, static_cast<size_t>(csbi.dwSize.X));
+    ElideMiddleInPlace(to_print, static_cast<size_t>(csbi.dwSize.X));
     if (supports_color_) {  // this means ENABLE_VIRTUAL_TERMINAL_PROCESSING
                             // succeeded
       printf("%s\x1B[K", to_print.c_str());  // Clear to end of line.
@@ -108,7 +109,7 @@
     // line-wrapping.
     winsize size;
     if ((ioctl(STDOUT_FILENO, TIOCGWINSZ, &size) == 0) && size.ws_col) {
-      to_print = ElideMiddle(to_print, size.ws_col);
+      ElideMiddleInPlace(to_print, size.ws_col);
     }
     printf("%s", to_print.c_str());
     printf("\x1B[K");  // Clear to end of line.
diff --git a/src/manifest_parser.cc b/src/manifest_parser.cc
index c4b2980..04b3191 100644
--- a/src/manifest_parser.cc
+++ b/src/manifest_parser.cc
@@ -18,6 +18,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 
+#include <memory>
 #include <vector>
 
 #include "graph.h"
@@ -143,7 +144,7 @@
   if (env_->LookupRuleCurrentScope(name) != NULL)
     return lexer_.Error("duplicate rule '" + name + "'", err);
 
-  Rule* rule = new Rule(name);  // XXX scoped_ptr
+  auto rule = std::unique_ptr<Rule>(new Rule(name));
 
   while (lexer_.PeekToken(Lexer::INDENT)) {
     string key;
@@ -169,7 +170,7 @@
   if (rule->bindings_["command"].empty())
     return lexer_.Error("expected 'command =' line", err);
 
-  env_->AddRule(rule);
+  env_->AddRule(std::move(rule));
   return true;
 }
 
@@ -209,14 +210,16 @@
 }
 
 bool ManifestParser::ParseEdge(string* err) {
-  vector<EvalString> ins, outs, validations;
+  ins_.clear();
+  outs_.clear();
+  validations_.clear();
 
   {
     EvalString out;
     if (!lexer_.ReadPath(&out, err))
       return false;
     while (!out.empty()) {
-      outs.push_back(out);
+      outs_.push_back(std::move(out));
 
       out.Clear();
       if (!lexer_.ReadPath(&out, err))
@@ -233,12 +236,12 @@
         return false;
       if (out.empty())
         break;
-      outs.push_back(out);
+      outs_.push_back(std::move(out));
       ++implicit_outs;
     }
   }
 
-  if (outs.empty())
+  if (outs_.empty())
     return lexer_.Error("expected path", err);
 
   if (!ExpectToken(Lexer::COLON, err))
@@ -259,7 +262,7 @@
       return false;
     if (in.empty())
       break;
-    ins.push_back(in);
+    ins_.push_back(std::move(in));
   }
 
   // Add all implicit deps, counting how many as we go.
@@ -271,7 +274,7 @@
         return false;
       if (in.empty())
         break;
-      ins.push_back(in);
+      ins_.push_back(std::move(in));
       ++implicit;
     }
   }
@@ -285,7 +288,7 @@
         return false;
       if (in.empty())
         break;
-      ins.push_back(in);
+      ins_.push_back(std::move(in));
       ++order_only;
     }
   }
@@ -298,7 +301,7 @@
         return false;
       if (validation.empty())
         break;
-      validations.push_back(validation);
+      validations_.push_back(std::move(validation));
     }
   }
 
@@ -329,9 +332,9 @@
     edge->pool_ = pool;
   }
 
-  edge->outputs_.reserve(outs.size());
-  for (size_t i = 0, e = outs.size(); i != e; ++i) {
-    string path = outs[i].Evaluate(env);
+  edge->outputs_.reserve(outs_.size());
+  for (size_t i = 0, e = outs_.size(); i != e; ++i) {
+    string path = outs_[i].Evaluate(env);
     if (path.empty())
       return lexer_.Error("empty path", err);
     uint64_t slash_bits;
@@ -351,8 +354,8 @@
   }
   edge->implicit_outs_ = implicit_outs;
 
-  edge->inputs_.reserve(ins.size());
-  for (vector<EvalString>::iterator i = ins.begin(); i != ins.end(); ++i) {
+  edge->inputs_.reserve(ins_.size());
+  for (vector<EvalString>::iterator i = ins_.begin(); i != ins_.end(); ++i) {
     string path = i->Evaluate(env);
     if (path.empty())
       return lexer_.Error("empty path", err);
@@ -363,9 +366,9 @@
   edge->implicit_deps_ = implicit;
   edge->order_only_deps_ = order_only;
 
-  edge->validations_.reserve(validations.size());
-  for (std::vector<EvalString>::iterator v = validations.begin();
-      v != validations.end(); ++v) {
+  edge->validations_.reserve(validations_.size());
+  for (std::vector<EvalString>::iterator v = validations_.begin();
+      v != validations_.end(); ++v) {
     string path = v->Evaluate(env);
     if (path.empty())
       return lexer_.Error("empty path", err);
@@ -419,14 +422,16 @@
     return false;
   string path = eval.Evaluate(env_);
 
-  ManifestParser subparser(state_, file_reader_, options_);
+  if (subparser_ == nullptr) {
+    subparser_.reset(new ManifestParser(state_, file_reader_, options_));
+  }
   if (new_scope) {
-    subparser.env_ = new BindingEnv(env_);
+    subparser_->env_ = new BindingEnv(env_);
   } else {
-    subparser.env_ = env_;
+    subparser_->env_ = env_;
   }
 
-  if (!subparser.Load(path, err, &lexer_))
+  if (!subparser_->Load(path, err, &lexer_))
     return false;
 
   if (!ExpectToken(Lexer::NEWLINE, err))
diff --git a/src/manifest_parser.h b/src/manifest_parser.h
index db6812d..ce37759 100644
--- a/src/manifest_parser.h
+++ b/src/manifest_parser.h
@@ -17,6 +17,9 @@
 
 #include "parser.h"
 
+#include <memory>
+#include <vector>
+
 struct BindingEnv;
 struct EvalString;
 
@@ -63,6 +66,12 @@
   BindingEnv* env_;
   ManifestParserOptions options_;
   bool quiet_;
+
+  // ins_/out_/validations_ are reused across invocations to ParseEdge(),
+  // to save on the otherwise constant memory reallocation.
+  // subparser_ is reused solely to get better reuse out ins_/outs_/validation_.
+  std::unique_ptr<ManifestParser> subparser_;
+  std::vector<EvalString> ins_, outs_, validations_;
 };
 
 #endif  // NINJA_MANIFEST_PARSER_H_
diff --git a/src/manifest_parser_test.cc b/src/manifest_parser_test.cc
index c5a1fe8..03ce0b1 100644
--- a/src/manifest_parser_test.cc
+++ b/src/manifest_parser_test.cc
@@ -51,7 +51,7 @@
 "build result: cat in_1.cc in-2.O\n"));
 
   ASSERT_EQ(3u, state.bindings_.GetRules().size());
-  const Rule* rule = state.bindings_.GetRules().begin()->second;
+  const auto& rule = state.bindings_.GetRules().begin()->second;
   EXPECT_EQ("cat", rule->name());
   EXPECT_EQ("[cat ][$in][ > ][$out]",
             rule->GetBinding("command")->Serialize());
@@ -84,7 +84,7 @@
 "  #comment\n"));
 
   ASSERT_EQ(2u, state.bindings_.GetRules().size());
-  const Rule* rule = state.bindings_.GetRules().begin()->second;
+  const auto& rule = state.bindings_.GetRules().begin()->second;
   EXPECT_EQ("cat", rule->name());
   Edge* edge = state.GetNode("result", 0)->in_edge();
   EXPECT_TRUE(edge->GetBindingBool("restat"));
@@ -117,7 +117,7 @@
 "  rspfile=out.rsp\n"));
 
   ASSERT_EQ(2u, state.bindings_.GetRules().size());
-  const Rule* rule = state.bindings_.GetRules().begin()->second;
+  const auto& rule = state.bindings_.GetRules().begin()->second;
   EXPECT_EQ("cat_rsp", rule->name());
   EXPECT_EQ("[cat ][$rspfile][ > ][$out]",
             rule->GetBinding("command")->Serialize());
@@ -134,7 +134,7 @@
 "  rspfile=out.rsp\n"));
 
   ASSERT_EQ(2u, state.bindings_.GetRules().size());
-  const Rule* rule = state.bindings_.GetRules().begin()->second;
+  const auto& rule = state.bindings_.GetRules().begin()->second;
   EXPECT_EQ("cat_rsp", rule->name());
   EXPECT_EQ("[cat ][$in_newline][ > ][$out]",
             rule->GetBinding("command")->Serialize());
@@ -195,7 +195,7 @@
 " d e f\n"));
 
   ASSERT_EQ(2u, state.bindings_.GetRules().size());
-  const Rule* rule = state.bindings_.GetRules().begin()->second;
+  const auto& rule = state.bindings_.GetRules().begin()->second;
   EXPECT_EQ("link", rule->name());
   EXPECT_EQ("[foo bar baz]", rule->GetBinding("command")->Serialize());
 }
@@ -380,7 +380,7 @@
 
   Node* node = state.LookupNode("a");
   Edge* edge = node->in_edge();
-  ASSERT_EQ(edge->inputs_.size(), 1);
+  ASSERT_EQ(edge->inputs_.size(), size_t(1));
   ASSERT_EQ(edge->inputs_[0], node);
 }
 
@@ -944,7 +944,7 @@
 "build foo: cat bar |@ baz\n"));
 
   Edge* edge = state.LookupNode("foo")->in_edge();
-  ASSERT_EQ(edge->validations_.size(), 1);
+  ASSERT_EQ(edge->validations_.size(), size_t(1));
   EXPECT_EQ(edge->validations_[0]->path(), "baz");
 }
 
@@ -955,7 +955,7 @@
 "build foo | imp: cat bar\n"));
 
   Edge* edge = state.LookupNode("imp")->in_edge();
-  ASSERT_EQ(edge->outputs_.size(), 2);
+  ASSERT_EQ(edge->outputs_.size(), size_t(2));
   EXPECT_TRUE(edge->is_implicit_out(1));
 }
 
@@ -966,7 +966,7 @@
 "build foo | : cat bar\n"));
 
   Edge* edge = state.LookupNode("foo")->in_edge();
-  ASSERT_EQ(edge->outputs_.size(), 1);
+  ASSERT_EQ(edge->outputs_.size(), size_t(1));
   EXPECT_FALSE(edge->is_implicit_out(0));
 }
 
diff --git a/src/metrics.cc b/src/metrics.cc
index 632ae43..e7cb4d1 100644
--- a/src/metrics.cc
+++ b/src/metrics.cc
@@ -37,15 +37,6 @@
       .count();
 }
 
-constexpr int64_t GetFrequency() {
-  // If numerator isn't 1 then we lose precision and that will need to be
-  // assessed.
-  static_assert(std::chrono::steady_clock::period::num == 1,
-                "Numerator must be 1");
-  return std::chrono::steady_clock::period::den /
-         std::chrono::steady_clock::period::num;
-}
-
 int64_t TimerToMicros(int64_t dt) {
   // dt is in ticks.  We want microseconds.
   return chrono::duration_cast<chrono::microseconds>(
diff --git a/src/minidump-win32.cc b/src/minidump-win32.cc
index 9aea767..c0d27a5 100644
--- a/src/minidump-win32.cc
+++ b/src/minidump-win32.cc
@@ -15,7 +15,7 @@
 #ifdef _MSC_VER
 
 #include <windows.h>
-#include <DbgHelp.h>
+#include <dbghelp.h>
 
 #include "util.h"
 
@@ -51,8 +51,8 @@
     return;
   }
 
-  MiniDumpWriteDumpFunc mini_dump_write_dump =
-      (MiniDumpWriteDumpFunc)GetProcAddress(dbghelp, "MiniDumpWriteDump");
+  MiniDumpWriteDumpFunc mini_dump_write_dump = FunctionCast
+      <MiniDumpWriteDumpFunc>(GetProcAddress(dbghelp, "MiniDumpWriteDump"));
   if (mini_dump_write_dump == NULL) {
     Error("failed to create minidump: GetProcAddress('MiniDumpWriteDump'): %s",
           GetLastErrorString().c_str());
diff --git a/src/missing_deps.cc b/src/missing_deps.cc
index de76620..f96a5e7 100644
--- a/src/missing_deps.cc
+++ b/src/missing_deps.cc
@@ -33,9 +33,9 @@
   NodeStoringImplicitDepLoader(
       State* state, DepsLog* deps_log, DiskInterface* disk_interface,
       DepfileParserOptions const* depfile_parser_options,
-      std::vector<Node*>* dep_nodes_output)
+      Explanations* explanations, std::vector<Node*>* dep_nodes_output)
       : ImplicitDepLoader(state, deps_log, disk_interface,
-                          depfile_parser_options),
+                          depfile_parser_options, explanations),
         dep_nodes_output_(dep_nodes_output) {}
 
  protected:
@@ -98,7 +98,8 @@
     DepfileParserOptions parser_opts;
     std::vector<Node*> depfile_deps;
     NodeStoringImplicitDepLoader dep_loader(state_, deps_log_, disk_interface_,
-                                            &parser_opts, &depfile_deps);
+                                            &parser_opts, nullptr,
+                                            &depfile_deps);
     std::string err;
     dep_loader.LoadDeps(edge, &err);
     if (!depfile_deps.empty())
diff --git a/src/msvc_helper_main-win32.cc b/src/msvc_helper_main-win32.cc
index 7d59307..972982c 100644
--- a/src/msvc_helper_main-win32.cc
+++ b/src/msvc_helper_main-win32.cc
@@ -54,23 +54,23 @@
   string depfile_path = string(object_path) + ".d";
   FILE* depfile = fopen(depfile_path.c_str(), "w");
   if (!depfile) {
-    unlink(object_path);
+    platformAwareUnlink(object_path);
     Fatal("opening %s: %s", depfile_path.c_str(),
           GetLastErrorString().c_str());
   }
   if (fprintf(depfile, "%s: ", object_path) < 0) {
-    unlink(object_path);
+    platformAwareUnlink(object_path);
     fclose(depfile);
-    unlink(depfile_path.c_str());
+    platformAwareUnlink(depfile_path.c_str());
     Fatal("writing %s", depfile_path.c_str());
   }
   const set<string>& headers = parse.includes_;
   for (set<string>::const_iterator i = headers.begin();
        i != headers.end(); ++i) {
     if (fprintf(depfile, "%s\n", EscapeForDepfile(*i).c_str()) < 0) {
-      unlink(object_path);
+      platformAwareUnlink(object_path);
       fclose(depfile);
-      unlink(depfile_path.c_str());
+      platformAwareUnlink(depfile_path.c_str());
       Fatal("writing %s", depfile_path.c_str());
     }
   }
diff --git a/src/ninja.cc b/src/ninja.cc
index ce1beda..85ae6eb 100644
--- a/src/ninja.cc
+++ b/src/ninja.cc
@@ -20,6 +20,8 @@
 
 #include <algorithm>
 #include <cstdlib>
+#include <cstring>
+#include <string>
 
 #ifdef _WIN32
 #include "getopt.h"
@@ -36,13 +38,15 @@
 #include "browse.h"
 #include "build.h"
 #include "build_log.h"
-#include "deps_log.h"
 #include "clean.h"
+#include "command_collector.h"
 #include "debug_flags.h"
-#include "depfile_parser.h"
+#include "deps_log.h"
 #include "disk_interface.h"
+#include "exit_status.h"
 #include "graph.h"
 #include "graphviz.h"
+#include "jobserver.h"
 #include "json.h"
 #include "manifest_parser.h"
 #include "metrics.h"
@@ -127,9 +131,12 @@
   int ToolTargets(const Options* options, int argc, char* argv[]);
   int ToolCommands(const Options* options, int argc, char* argv[]);
   int ToolInputs(const Options* options, int argc, char* argv[]);
+  int ToolMultiInputs(const Options* options, int argc, char* argv[]);
   int ToolClean(const Options* options, int argc, char* argv[]);
   int ToolCleanDead(const Options* options, int argc, char* argv[]);
   int ToolCompilationDatabase(const Options* options, int argc, char* argv[]);
+  int ToolCompilationDatabaseForTargets(const Options* options, int argc,
+                                        char* argv[]);
   int ToolRecompact(const Options* options, int argc, char* argv[]);
   int ToolRestat(const Options* options, int argc, char* argv[]);
   int ToolUrtle(const Options* options, int argc, char** argv);
@@ -154,12 +161,16 @@
   bool RebuildManifest(const char* input_file, string* err, Status* status);
 
   /// For each edge, lookup in build log how long it took last time,
-  /// and record that in the edge itself. It will be used for ETA predicton.
+  /// and record that in the edge itself. It will be used for ETA prediction.
   void ParsePreviousElapsedTimes();
 
+  /// Create a jobserver client if needed. Return a nullptr value if
+  /// not. Prints info and warnings to \a status.
+  std::unique_ptr<Jobserver::Client> SetupJobserverClient(Status* status);
+
   /// Build the targets listed on the command line.
   /// @return an exit code.
-  int RunBuild(int argc, char** argv, Status* status);
+  ExitStatus RunBuild(int argc, char** argv, Status* status);
 
   /// Dump the output requested by '-d stats'.
   void DumpMetrics();
@@ -275,7 +286,7 @@
   if (builder.AlreadyUpToDate())
     return false;  // Not an error, but we didn't rebuild.
 
-  if (!builder.Build(err))
+  if (builder.Build(err) != ExitSuccess)
     return false;
 
   // The manifest was only rebuilt if it is now dirty (it may have been cleaned
@@ -674,12 +685,12 @@
 
   // Print rules
 
-  typedef map<string, const Rule*> Rules;
+  typedef map<string, std::unique_ptr<const Rule>> Rules;
   const Rules& rules = state_.bindings_.GetRules();
   for (Rules::const_iterator i = rules.begin(); i != rules.end(); ++i) {
     printf("%s", i->first.c_str());
     if (print_description) {
-      const Rule* rule = i->second;
+      const Rule* rule = i->second.get();
       const EvalString* description = rule->GetBinding("description");
       if (description != NULL) {
         printf(": %s", description->Unparse().c_str());
@@ -761,43 +772,50 @@
   return 0;
 }
 
-void CollectInputs(Edge* edge, std::set<Edge*>* seen,
-                   std::vector<std::string>* result) {
-  if (!edge)
-    return;
-  if (!seen->insert(edge).second)
-    return;
-
-  for (vector<Node*>::iterator in = edge->inputs_.begin();
-       in != edge->inputs_.end(); ++in)
-    CollectInputs((*in)->in_edge(), seen, result);
-
-  if (!edge->is_phony()) {
-    edge->CollectInputs(true, result);
-  }
-}
-
 int NinjaMain::ToolInputs(const Options* options, int argc, char* argv[]) {
   // The inputs tool uses getopt, and expects argv[0] to contain the name of
   // the tool, i.e. "inputs".
   argc++;
   argv--;
+
+  bool print0 = false;
+  bool shell_escape = true;
+  bool dependency_order = false;
+
   optind = 1;
   int opt;
   const option kLongOptions[] = { { "help", no_argument, NULL, 'h' },
+                                  { "no-shell-escape", no_argument, NULL, 'E' },
+                                  { "print0", no_argument, NULL, '0' },
+                                  { "dependency-order", no_argument, NULL,
+                                    'd' },
                                   { NULL, 0, NULL, 0 } };
-  while ((opt = getopt_long(argc, argv, "h", kLongOptions, NULL)) != -1) {
+  while ((opt = getopt_long(argc, argv, "h0Ed", kLongOptions, NULL)) != -1) {
     switch (opt) {
+    case 'd':
+      dependency_order = true;
+      break;
+    case 'E':
+      shell_escape = false;
+      break;
+    case '0':
+      print0 = true;
+      break;
     case 'h':
     default:
       // clang-format off
       printf(
 "Usage '-t inputs [options] [targets]\n"
 "\n"
-"List all inputs used for a set of targets. Note that this includes\n"
-"explicit, implicit and order-only inputs, but not validation ones.\n\n"
+"List all inputs used for a set of targets, sorted in dependency order.\n"
+"Note that by default, results are shell escaped, and sorted alphabetically,\n"
+"and never include validation target paths.\n\n"
 "Options:\n"
-"  -h, --help   Print this message.\n");
+"  -h, --help          Print this message.\n"
+"  -0, --print0            Use \\0, instead of \\n as a line terminator.\n"
+"  -E, --no-shell-escape   Do not shell escape the result.\n"
+"  -d, --dependency-order  Sort results by dependency order.\n"
+      );
       // clang-format on
       return 1;
     }
@@ -805,24 +823,98 @@
   argv += optind;
   argc -= optind;
 
-  vector<Node*> nodes;
-  string err;
+  std::vector<Node*> nodes;
+  std::string err;
   if (!CollectTargetsFromArgs(argc, argv, &nodes, &err)) {
     Error("%s", err.c_str());
     return 1;
   }
 
-  std::set<Edge*> seen;
-  std::vector<std::string> result;
-  for (vector<Node*>::iterator in = nodes.begin(); in != nodes.end(); ++in)
-    CollectInputs((*in)->in_edge(), &seen, &result);
+  InputsCollector collector;
+  for (const Node* node : nodes)
+    collector.VisitNode(node);
 
-  // Make output deterministic by sorting then removing duplicates.
-  std::sort(result.begin(), result.end());
-  result.erase(std::unique(result.begin(), result.end()), result.end());
+  std::vector<std::string> inputs = collector.GetInputsAsStrings(shell_escape);
+  if (!dependency_order)
+    std::sort(inputs.begin(), inputs.end());
 
-  for (size_t n = 0; n < result.size(); ++n)
-    puts(result[n].c_str());
+  if (print0) {
+    for (const std::string& input : inputs) {
+      fwrite(input.c_str(), input.size(), 1, stdout);
+      fputc('\0', stdout);
+    }
+    fflush(stdout);
+  } else {
+    for (const std::string& input : inputs)
+      puts(input.c_str());
+  }
+  return 0;
+}
+
+int NinjaMain::ToolMultiInputs(const Options* options, int argc, char* argv[]) {
+  // The inputs tool uses getopt, and expects argv[0] to contain the name of
+  // the tool, i.e. "inputs".
+  argc++;
+  argv--;
+
+  optind = 1;
+  int opt;
+  char terminator = '\n';
+  const char* delimiter = "\t";
+  const option kLongOptions[] = { { "help", no_argument, NULL, 'h' },
+                                  { "delimiter", required_argument, NULL,
+                                    'd' },
+                                  { "print0", no_argument, NULL, '0' },
+                                  { NULL, 0, NULL, 0 } };
+  while ((opt = getopt_long(argc, argv, "d:h0", kLongOptions, NULL)) != -1) {
+    switch (opt) {
+    case 'd':
+      delimiter = optarg;
+      break;
+    case '0':
+      terminator = '\0';
+      break;
+    case 'h':
+    default:
+      // clang-format off
+      printf(
+"Usage '-t multi-inputs [options] [targets]\n"
+"\n"
+"Print one or more sets of inputs required to build targets, sorted in dependency order.\n"
+"The tool works like inputs tool but with addition of the target for each line.\n"
+"The output will be a series of lines with the following elements:\n"
+"<target> <delimiter> <input> <terminator>\n"
+"Note that a given input may appear for several targets if it is used by more than one targets.\n"
+"Options:\n"
+"  -h, --help                   Print this message.\n"
+"  -d  --delimiter=DELIM        Use DELIM instead of TAB for field delimiter.\n"
+"  -0, --print0                 Use \\0, instead of \\n as a line terminator.\n"
+      );
+      // clang-format on
+      return 1;
+    }
+  }
+  argv += optind;
+  argc -= optind;
+
+  std::vector<Node*> nodes;
+  std::string err;
+  if (!CollectTargetsFromArgs(argc, argv, &nodes, &err)) {
+    Error("%s", err.c_str());
+    return 1;
+  }
+
+  for (const Node* node : nodes) {
+    InputsCollector collector;
+
+    collector.VisitNode(node);
+    std::vector<std::string> inputs = collector.GetInputsAsStrings();
+
+    for (const std::string& input : inputs) {
+      printf("%s%s%s", node->path().c_str(), delimiter, input.c_str());
+      fputc(terminator, stdout);
+    }
+  }
 
   return 0;
 }
@@ -919,8 +1011,8 @@
   return command;
 }
 
-void printCompdb(const char* const directory, const Edge* const edge,
-                 const EvaluateCommandMode eval_mode) {
+void PrintOneCompdbObject(std::string const& directory, const Edge* const edge,
+                          const EvaluateCommandMode eval_mode) {
   printf("\n  {\n    \"directory\": \"");
   PrintJSONString(directory);
   printf("\",\n    \"command\": \"");
@@ -964,37 +1056,25 @@
   argc -= optind;
 
   bool first = true;
-  vector<char> cwd;
-  char* success = NULL;
 
-  do {
-    cwd.resize(cwd.size() + 1024);
-    errno = 0;
-    success = getcwd(&cwd[0], cwd.size());
-  } while (!success && errno == ERANGE);
-  if (!success) {
-    Error("cannot determine working directory: %s", strerror(errno));
-    return 1;
-  }
-
+  std::string directory = GetWorkingDirectory();
   putchar('[');
-  for (vector<Edge*>::iterator e = state_.edges_.begin();
-       e != state_.edges_.end(); ++e) {
-    if ((*e)->inputs_.empty())
+  for (const Edge* edge : state_.edges_) {
+    if (edge->inputs_.empty())
       continue;
     if (argc == 0) {
       if (!first) {
         putchar(',');
       }
-      printCompdb(&cwd[0], *e, eval_mode);
+      PrintOneCompdbObject(directory, edge, eval_mode);
       first = false;
     } else {
       for (int i = 0; i != argc; ++i) {
-        if ((*e)->rule_->name() == argv[i]) {
+        if (edge->rule_->name() == argv[i]) {
           if (!first) {
             putchar(',');
           }
-          printCompdb(&cwd[0], *e, eval_mode);
+          PrintOneCompdbObject(directory, edge, eval_mode);
           first = false;
         }
       }
@@ -1074,6 +1154,118 @@
   return EXIT_SUCCESS;
 }
 
+struct CompdbTargets {
+  enum class Action { kDisplayHelpAndExit, kEmitCommands };
+
+  Action action;
+  EvaluateCommandMode eval_mode = ECM_NORMAL;
+
+  std::vector<std::string> targets;
+
+  static CompdbTargets CreateFromArgs(int argc, char* argv[]) {
+    //
+    // grammar:
+    //     ninja -t compdb-targets [-hx] target [targets]
+    //
+    CompdbTargets ret;
+
+    // getopt_long() expects argv[0] to contain the name of
+    // the tool, i.e. "compdb-targets".
+    argc++;
+    argv--;
+
+    // Phase 1: parse options:
+    optind = 1;  // see `man 3 getopt` for documentation on optind
+    int opt;
+    while ((opt = getopt(argc, argv, const_cast<char*>("hx"))) != -1) {
+      switch (opt) {
+      case 'x':
+        ret.eval_mode = ECM_EXPAND_RSPFILE;
+        break;
+      case 'h':
+      default:
+        ret.action = CompdbTargets::Action::kDisplayHelpAndExit;
+        return ret;
+      }
+    }
+
+    // Phase 2: parse operands:
+    int const targets_begin = optind;
+    int const targets_end = argc;
+
+    if (targets_begin == targets_end) {
+      Error("compdb-targets expects the name of at least one target");
+      ret.action = CompdbTargets::Action::kDisplayHelpAndExit;
+    } else {
+      ret.action = CompdbTargets::Action::kEmitCommands;
+      for (int i = targets_begin; i < targets_end; ++i) {
+        ret.targets.push_back(argv[i]);
+      }
+    }
+
+    return ret;
+  }
+};
+
+void PrintCompdb(std::string const& directory, std::vector<Edge*> const& edges,
+                 const EvaluateCommandMode eval_mode) {
+  putchar('[');
+
+  bool first = true;
+  for (const Edge* edge : edges) {
+    if (edge->is_phony() || edge->inputs_.empty())
+      continue;
+    if (!first)
+      putchar(',');
+    PrintOneCompdbObject(directory, edge, eval_mode);
+    first = false;
+  }
+
+  puts("\n]");
+}
+
+int NinjaMain::ToolCompilationDatabaseForTargets(const Options* options,
+                                                 int argc, char* argv[]) {
+  auto compdb = CompdbTargets::CreateFromArgs(argc, argv);
+
+  switch (compdb.action) {
+  case CompdbTargets::Action::kDisplayHelpAndExit: {
+    printf(
+        "usage: ninja -t compdb [-hx] target [targets]\n"
+        "\n"
+        "options:\n"
+        "  -h     display this help message\n"
+        "  -x     expand @rspfile style response file invocations\n");
+    return 1;
+  }
+
+  case CompdbTargets::Action::kEmitCommands: {
+    CommandCollector collector;
+
+    for (const std::string& target_arg : compdb.targets) {
+      std::string err;
+      Node* node = CollectTarget(target_arg.c_str(), &err);
+      if (!node) {
+        Fatal("%s", err.c_str());
+        return 1;
+      }
+      if (!node->in_edge()) {
+        Fatal(
+            "'%s' is not a target "
+            "(i.e. it is not an output of any `build` statement)",
+            node->path().c_str());
+      }
+      collector.CollectFrom(node);
+    }
+
+    std::string directory = GetWorkingDirectory();
+    PrintCompdb(directory, collector.in_edges, compdb.eval_mode);
+  } break;
+  }
+
+  return 0;
+}
+
 int NinjaMain::ToolUrtle(const Options* options, int argc, char** argv) {
   // RLE encoded.
   const char* urtle =
@@ -1116,6 +1308,8 @@
       Tool::RUN_AFTER_LOAD, &NinjaMain::ToolCommands },
     { "inputs", "list all inputs required to rebuild given targets",
       Tool::RUN_AFTER_LOAD, &NinjaMain::ToolInputs},
+    { "multi-inputs", "print one or more sets of inputs required to build targets",
+      Tool::RUN_AFTER_LOAD, &NinjaMain::ToolMultiInputs},
     { "deps", "show dependencies stored in the deps log",
       Tool::RUN_AFTER_LOGS, &NinjaMain::ToolDeps },
     { "missingdeps", "check deps log dependencies on generated files",
@@ -1128,6 +1322,9 @@
       Tool::RUN_AFTER_LOAD, &NinjaMain::ToolTargets },
     { "compdb",  "dump JSON compilation database to stdout",
       Tool::RUN_AFTER_LOAD, &NinjaMain::ToolCompilationDatabase },
+    { "compdb-targets",
+      "dump JSON compilation database for a given list of targets to stdout",
+      Tool::RUN_AFTER_LOAD, &NinjaMain::ToolCompilationDatabaseForTargets },
     { "recompact",  "recompacts ninja-internal data structures",
       Tool::RUN_AFTER_LOAD, &NinjaMain::ToolRecompact },
     { "restat",  "restats all outputs in the build log",
@@ -1230,6 +1427,10 @@
   } else if (name == "phonycycle=warn") {
     options->phony_cycle_should_err = false;
     return true;
+  } else if (name == "dupbuild=err" ||
+             name == "dupbuild=warn") {
+    Warning("deprecated warning 'dupbuild'");
+    return true;
   } else if (name == "depfilemulti=err" ||
              name == "depfilemulti=warn") {
     Warning("deprecated warning 'depfilemulti'");
@@ -1345,23 +1546,76 @@
   return true;
 }
 
-int NinjaMain::RunBuild(int argc, char** argv, Status* status) {
-  string err;
-  vector<Node*> targets;
+std::unique_ptr<Jobserver::Client> NinjaMain::SetupJobserverClient(
+    Status* status) {
+  // Empty result by default.
+  std::unique_ptr<Jobserver::Client> result;
+
+  // If dry-run or explicit job count, don't even look at MAKEFLAGS
+  if (config_.disable_jobserver_client)
+    return result;
+
+  const char* makeflags = getenv("MAKEFLAGS");
+  if (!makeflags) {
+    // MAKEFLAGS is not defined.
+    return result;
+  }
+
+  std::string err;
+  Jobserver::Config jobserver_config;
+  if (!Jobserver::ParseNativeMakeFlagsValue(makeflags, &jobserver_config,
+                                            &err)) {
+    // MAKEFLAGS is defined but could not be parsed correctly.
+    if (config_.verbosity > BuildConfig::QUIET)
+      status->Warning("Ignoring jobserver: %s [%s]", err.c_str(), makeflags);
+    return result;
+  }
+
+  if (!jobserver_config.HasMode()) {
+    // MAKEFLAGS is defined, but does not describe a jobserver mode.
+    return result;
+  }
+
+  if (config_.verbosity > BuildConfig::NO_STATUS_UPDATE) {
+    status->Info("Jobserver mode detected: %s", makeflags);
+  }
+
+  result = Jobserver::Client::Create(jobserver_config, &err);
+  if (!result.get()) {
+    // Jobserver client initialization failed !?
+    if (config_.verbosity > BuildConfig::QUIET)
+      status->Error("Could not initialize jobserver: %s", err.c_str());
+  }
+  return result;
+}
+
+ExitStatus NinjaMain::RunBuild(int argc, char** argv, Status* status) {
+  std::string err;
+  std::vector<Node*> targets;
   if (!CollectTargetsFromArgs(argc, argv, &targets, &err)) {
     status->Error("%s", err.c_str());
-    return 1;
+    return ExitFailure;
   }
 
   disk_interface_.AllowStatCache(g_experimental_statcache);
 
+  // Detect jobserver context and inject Jobserver::Client into the builder
+  // if needed.
+  std::unique_ptr<Jobserver::Client> jobserver_client =
+      SetupJobserverClient(status);
+
   Builder builder(&state_, config_, &build_log_, &deps_log_, &disk_interface_,
                   status, start_time_millis_);
+
+  if (jobserver_client.get()) {
+    builder.SetJobserverClient(std::move(jobserver_client));
+  }
+
   for (size_t i = 0; i < targets.size(); ++i) {
     if (!builder.AddTarget(targets[i], &err)) {
       if (!err.empty()) {
         status->Error("%s", err.c_str());
-        return 1;
+        return ExitFailure;
       } else {
         // Added a target that is already up-to-date; not really
         // an error.
@@ -1376,18 +1630,18 @@
     if (config_.verbosity != BuildConfig::NO_STATUS_UPDATE) {
       status->Info("no work to do.");
     }
-    return 0;
+    return ExitSuccess;
   }
 
-  if (!builder.Build(&err)) {
+  ExitStatus exit_status = builder.Build(&err);
+  if (exit_status != ExitSuccess) {
     status->Info("build stopped: %s.", err.c_str());
     if (err.find("interrupted by user") != string::npos) {
-      return 2;
+      return ExitInterrupted;
     }
-    return 1;
   }
 
-  return 0;
+  return exit_status;
 }
 
 #ifdef _MSC_VER
@@ -1466,6 +1720,7 @@
         // We want to run N jobs in parallel. For N = 0, INT_MAX
         // is close enough to infinite for most sane builds.
         config->parallelism = value > 0 ? value : INT_MAX;
+        config->disable_jobserver_client = true;
         deferGuessParallelism.needGuess = false;
         break;
       }
@@ -1491,6 +1746,7 @@
       }
       case 'n':
         config->dry_run = true;
+        config->disable_jobserver_client = true;
         break;
       case 't':
         options->tool = ChooseTool(optarg);
@@ -1540,7 +1796,7 @@
   if (exit_code >= 0)
     exit(exit_code);
 
-  Status* status = new StatusPrinter(config);
+  Status* status = Status::factory(config);
 
   if (options.working_dir) {
     // The formatting of this string, complete with funny quotes, is
@@ -1605,7 +1861,7 @@
 
     ninja.ParsePreviousElapsedTimes();
 
-    int result = ninja.RunBuild(argc, argv, status);
+    ExitStatus result = ninja.RunBuild(argc, argv, status);
     if (g_metrics)
       ninja.DumpMetrics();
     exit(result);
diff --git a/src/parser.h b/src/parser.h
index 011fad8..75990aa 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -26,6 +26,7 @@
 struct Parser {
   Parser(State* state, FileReader* file_reader)
       : state_(state), file_reader_(file_reader) {}
+  virtual ~Parser() {}
 
   /// Load and parse a file.
   bool Load(const std::string& filename, std::string* err, Lexer* parent = NULL);
diff --git a/src/real_command_runner.cc b/src/real_command_runner.cc
new file mode 100644
index 0000000..4a01276
--- /dev/null
+++ b/src/real_command_runner.cc
@@ -0,0 +1,120 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "build.h"
+#include "jobserver.h"
+#include "limits.h"
+#include "subprocess.h"
+
+struct RealCommandRunner : public CommandRunner {
+  explicit RealCommandRunner(const BuildConfig& config,
+                             Jobserver::Client* jobserver)
+      : config_(config), jobserver_(jobserver) {}
+  size_t CanRunMore() const override;
+  bool StartCommand(Edge* edge) override;
+  bool WaitForCommand(Result* result) override;
+  std::vector<Edge*> GetActiveEdges() override;
+  void Abort() override;
+
+  void ClearJobTokens() {
+    if (jobserver_) {
+      for (Edge* edge : GetActiveEdges()) {
+        jobserver_->Release(std::move(edge->job_slot_));
+      }
+    }
+  }
+
+  const BuildConfig& config_;
+  SubprocessSet subprocs_;
+  Jobserver::Client* jobserver_ = nullptr;
+  std::map<const Subprocess*, Edge*> subproc_to_edge_;
+};
+
+std::vector<Edge*> RealCommandRunner::GetActiveEdges() {
+  std::vector<Edge*> edges;
+  for (std::map<const Subprocess*, Edge*>::iterator e =
+           subproc_to_edge_.begin();
+       e != subproc_to_edge_.end(); ++e)
+    edges.push_back(e->second);
+  return edges;
+}
+
+void RealCommandRunner::Abort() {
+  ClearJobTokens();
+  subprocs_.Clear();
+}
+
+size_t RealCommandRunner::CanRunMore() const {
+  size_t subproc_number =
+      subprocs_.running_.size() + subprocs_.finished_.size();
+
+  int64_t capacity = config_.parallelism - subproc_number;
+
+  if (jobserver_) {
+    // When a jobserver token pool is used, make the
+    // capacity infinite, and let FindWork() limit jobs
+    // through token acquisitions instead.
+    capacity = INT_MAX;
+  }
+
+  if (config_.max_load_average > 0.0f) {
+    int load_capacity = config_.max_load_average - GetLoadAverage();
+    if (load_capacity < capacity)
+      capacity = load_capacity;
+  }
+
+  if (capacity < 0)
+    capacity = 0;
+
+  if (capacity == 0 && subprocs_.running_.empty())
+    // Ensure that we make progress.
+    capacity = 1;
+
+  return capacity;
+}
+
+bool RealCommandRunner::StartCommand(Edge* edge) {
+  std::string command = edge->EvaluateCommand();
+  Subprocess* subproc = subprocs_.Add(command, edge->use_console());
+  if (!subproc)
+    return false;
+  subproc_to_edge_.insert(std::make_pair(subproc, edge));
+
+  return true;
+}
+
+bool RealCommandRunner::WaitForCommand(Result* result) {
+  Subprocess* subproc;
+  while ((subproc = subprocs_.NextFinished()) == NULL) {
+    bool interrupted = subprocs_.DoWork();
+    if (interrupted)
+      return false;
+  }
+
+  result->status = subproc->Finish();
+  result->output = subproc->GetOutput();
+
+  std::map<const Subprocess*, Edge*>::iterator e =
+      subproc_to_edge_.find(subproc);
+  result->edge = e->second;
+  subproc_to_edge_.erase(e);
+
+  delete subproc;
+  return true;
+}
+
+CommandRunner* CommandRunner::factory(const BuildConfig& config,
+                                      Jobserver::Client* jobserver) {
+  return new RealCommandRunner(config, jobserver);
+}
diff --git a/src/state.cc b/src/state.cc
index 5fec5e1..a8a1482 100644
--- a/src/state.cc
+++ b/src/state.cc
@@ -63,10 +63,9 @@
 
 Pool State::kDefaultPool("", 0);
 Pool State::kConsolePool("console", 1);
-const Rule State::kPhonyRule("phony");
 
 State::State() {
-  bindings_.AddRule(&kPhonyRule);
+  bindings_.AddRule(Rule::Phony());
   AddPool(&kDefaultPool);
   AddPool(&kConsolePool);
 }
diff --git a/src/state.h b/src/state.h
index 8789cb1..13e0e81 100644
--- a/src/state.h
+++ b/src/state.h
@@ -95,7 +95,6 @@
 struct State {
   static Pool kDefaultPool;
   static Pool kConsolePool;
-  static const Rule kPhonyRule;
 
   State();
 
diff --git a/src/state_test.cc b/src/state_test.cc
index e0e3060..9c1af1c 100644
--- a/src/state_test.cc
+++ b/src/state_test.cc
@@ -31,7 +31,7 @@
 
   Rule* rule = new Rule("cat");
   rule->AddBinding("command", command);
-  state.bindings_.AddRule(rule);
+  state.bindings_.AddRule(std::unique_ptr<Rule>(rule));
 
   Edge* edge = state.AddEdge(rule);
   state.AddIn(edge, "in1", 0);
diff --git a/src/status.h b/src/status.h
index a1a8fdd..a873993 100644
--- a/src/status.h
+++ b/src/status.h
@@ -15,11 +15,12 @@
 #ifndef NINJA_STATUS_H_
 #define NINJA_STATUS_H_
 
-#include <map>
 #include <string>
+#include "exit_status.h"
 
-#include "build.h"
-#include "line_printer.h"
+struct BuildConfig;
+struct Edge;
+struct Explanations;
 
 /// Abstract interface to object that tracks the status of a build:
 /// completion fraction, printing updates.
@@ -29,120 +30,24 @@
   virtual void BuildEdgeStarted(const Edge* edge,
                                 int64_t start_time_millis) = 0;
   virtual void BuildEdgeFinished(Edge* edge, int64_t start_time_millis,
-                                 int64_t end_time_millis, bool success,
+                                 int64_t end_time_millis, ExitStatus exit_code,
                                  const std::string& output) = 0;
-  virtual void BuildLoadDyndeps() = 0;
   virtual void BuildStarted() = 0;
   virtual void BuildFinished() = 0;
 
+  /// Set the Explanations instance to use to report explanations,
+  /// argument can be nullptr if no explanations need to be printed
+  /// (which is the default).
+  virtual void SetExplanations(Explanations*) = 0;
+
   virtual void Info(const char* msg, ...) = 0;
   virtual void Warning(const char* msg, ...) = 0;
   virtual void Error(const char* msg, ...) = 0;
 
   virtual ~Status() { }
-};
 
-/// Implementation of the Status interface that prints the status as
-/// human-readable strings to stdout
-struct StatusPrinter : Status {
-  explicit StatusPrinter(const BuildConfig& config);
-
-  /// Callbacks for the Plan to notify us about adding/removing Edge's.
-  virtual void EdgeAddedToPlan(const Edge* edge);
-  virtual void EdgeRemovedFromPlan(const Edge* edge);
-
-  virtual void BuildEdgeStarted(const Edge* edge, int64_t start_time_millis);
-  virtual void BuildEdgeFinished(Edge* edge, int64_t start_time_millis,
-                                 int64_t end_time_millis, bool success,
-                                 const std::string& output);
-  virtual void BuildLoadDyndeps();
-  virtual void BuildStarted();
-  virtual void BuildFinished();
-
-  virtual void Info(const char* msg, ...);
-  virtual void Warning(const char* msg, ...);
-  virtual void Error(const char* msg, ...);
-
-  virtual ~StatusPrinter() { }
-
-  /// Format the progress status string by replacing the placeholders.
-  /// See the user manual for more information about the available
-  /// placeholders.
-  /// @param progress_status_format The format of the progress status.
-  /// @param status The status of the edge.
-  std::string FormatProgressStatus(const char* progress_status_format,
-                                   int64_t time_millis) const;
-
- private:
-  void PrintStatus(const Edge* edge, int64_t time_millis);
-
-  const BuildConfig& config_;
-
-  int started_edges_, finished_edges_, total_edges_, running_edges_;
-
-  /// How much wall clock elapsed so far?
-  int64_t time_millis_ = 0;
-
-  /// How much cpu clock elapsed so far?
-  int64_t cpu_time_millis_ = 0;
-
-  /// What percentage of predicted total time have elapsed already?
-  double time_predicted_percentage_ = 0.0;
-
-  /// Out of all the edges, for how many do we know previous time?
-  int eta_predictable_edges_total_ = 0;
-  /// And how much time did they all take?
-  int64_t eta_predictable_cpu_time_total_millis_ = 0;
-
-  /// Out of all the non-finished edges, for how many do we know previous time?
-  int eta_predictable_edges_remaining_ = 0;
-  /// And how much time will they all take?
-  int64_t eta_predictable_cpu_time_remaining_millis_ = 0;
-
-  /// For how many edges we don't know the previous run time?
-  int eta_unpredictable_edges_remaining_ = 0;
-
-  void RecalculateProgressPrediction();
-
-  /// Prints progress output.
-  LinePrinter printer_;
-
-  /// The custom progress status format to use.
-  const char* progress_status_format_;
-
-  template<size_t S>
-  void SnprintfRate(double rate, char(&buf)[S], const char* format) const {
-    if (rate == -1)
-      snprintf(buf, S, "?");
-    else
-      snprintf(buf, S, format, rate);
-  }
-
-  struct SlidingRateInfo {
-    SlidingRateInfo(int n) : rate_(-1), N(n), last_update_(-1) {}
-
-    double rate() { return rate_; }
-
-    void UpdateRate(int update_hint, int64_t time_millis) {
-      if (update_hint == last_update_)
-        return;
-      last_update_ = update_hint;
-
-      if (times_.size() == N)
-        times_.pop();
-      times_.push(time_millis);
-      if (times_.back() != times_.front())
-        rate_ = times_.size() / ((times_.back() - times_.front()) / 1e3);
-    }
-
-  private:
-    double rate_;
-    const size_t N;
-    std::queue<double> times_;
-    int last_update_;
-  };
-
-  mutable SlidingRateInfo current_rate_;
+  /// creates the actual implementation
+  static Status* factory(const BuildConfig&);
 };
 
 #endif // NINJA_STATUS_H_
diff --git a/src/status.cc b/src/status_printer.cc
similarity index 90%
rename from src/status.cc
rename to src/status_printer.cc
index 06f3c20..e69cd15 100644
--- a/src/status.cc
+++ b/src/status_printer.cc
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include "status.h"
+#include "status_printer.h"
 
 #ifdef _WIN32
 #include "win32port.h"
@@ -31,10 +31,16 @@
 #include <io.h>
 #endif
 
+#include "build.h"
 #include "debug_flags.h"
+#include "exit_status.h"
 
 using namespace std;
 
+Status* Status::factory(const BuildConfig& config) {
+  return new StatusPrinter(config);
+}
+
 StatusPrinter::StatusPrinter(const BuildConfig& config)
     : config_(config), started_edges_(0), finished_edges_(0), total_edges_(0),
       running_edges_(0), progress_status_format_(NULL),
@@ -169,7 +175,7 @@
 }
 
 void StatusPrinter::BuildEdgeFinished(Edge* edge, int64_t start_time_millis,
-                                      int64_t end_time_millis, bool success,
+                                      int64_t end_time_millis, ExitStatus exit_code,
                                       const string& output) {
   time_millis_ = end_time_millis;
   ++finished_edges_;
@@ -197,21 +203,28 @@
   --running_edges_;
 
   // Print the command that is spewing before printing its output.
-  if (!success) {
+  if (exit_code != ExitSuccess) {
     string outputs;
     for (vector<Node*>::const_iterator o = edge->outputs_.begin();
          o != edge->outputs_.end(); ++o)
       outputs += (*o)->path() + " ";
 
+    string failed = "FAILED: [code=" + std::to_string(exit_code) + "] ";
     if (printer_.supports_color()) {
-        printer_.PrintOnNewLine("\x1B[31m" "FAILED: " "\x1B[0m" + outputs + "\n");
+        printer_.PrintOnNewLine("\x1B[31m" + failed + "\x1B[0m" + outputs + "\n");
     } else {
-        printer_.PrintOnNewLine("FAILED: " + outputs + "\n");
+        printer_.PrintOnNewLine(failed + outputs + "\n");
     }
     printer_.PrintOnNewLine(edge->EvaluateCommand() + "\n");
   }
 
   if (!output.empty()) {
+#ifdef _WIN32
+    // Fix extra CR being added on Windows, writing out CR CR LF (#773)
+    fflush(stdout);  // Begin Windows extra CR fix
+    _setmode(_fileno(stdout), _O_BINARY);
+#endif
+
     // ninja sets stdout and stderr of subprocesses to a pipe, to be able to
     // check if the output is empty. Some compilers, e.g. clang, check
     // isatty(stderr) to decide if they should print colored output.
@@ -223,39 +236,20 @@
     // (Launching subprocesses in pseudo ttys doesn't work because there are
     // only a few hundred available on some systems, and ninja can launch
     // thousands of parallel compile commands.)
-    string final_output;
-    if (!printer_.supports_color())
-      final_output = StripAnsiEscapeCodes(output);
-    else
-      final_output = output;
+    if (printer_.supports_color() || output.find('\x1b') == std::string::npos) {
+      printer_.PrintOnNewLine(output);
+    } else {
+      std::string final_output = StripAnsiEscapeCodes(output);
+      printer_.PrintOnNewLine(final_output);
+    }
 
 #ifdef _WIN32
-    // Fix extra CR being added on Windows, writing out CR CR LF (#773)
-    _setmode(_fileno(stdout), _O_BINARY);  // Begin Windows extra CR fix
-#endif
-
-    printer_.PrintOnNewLine(final_output);
-
-#ifdef _WIN32
+    fflush(stdout);
     _setmode(_fileno(stdout), _O_TEXT);  // End Windows extra CR fix
 #endif
   }
 }
 
-void StatusPrinter::BuildLoadDyndeps() {
-  // The DependencyScan calls EXPLAIN() to print lines explaining why
-  // it considers a portion of the graph to be out of date.  Normally
-  // this is done before the build starts, but our caller is about to
-  // load a dyndep file during the build.  Doing so may generate more
-  // explanation lines (via fprintf directly to stderr), but in an
-  // interactive console the cursor is currently at the end of a status
-  // line.  Start a new line so that the first explanation does not
-  // append to the status line.  After the explanations are done a
-  // new build status line will appear.
-  if (g_explaining)
-    printer_.PrintOnNewLine("");
-}
-
 void StatusPrinter::BuildStarted() {
   started_edges_ = 0;
   finished_edges_ = 0;
@@ -412,6 +406,22 @@
 }
 
 void StatusPrinter::PrintStatus(const Edge* edge, int64_t time_millis) {
+  if (explanations_) {
+    // Collect all explanations for the current edge's outputs.
+    std::vector<std::string> explanations;
+    for (Node* output : edge->outputs_) {
+      explanations_->LookupAndAppend(output, &explanations);
+    }
+    if (!explanations.empty()) {
+      // Start a new line so that the first explanation does not append to the
+      // status line.
+      printer_.PrintOnNewLine("");
+      for (const auto& exp : explanations) {
+        fprintf(stderr, "ninja explain: %s\n", exp.c_str());
+      }
+    }
+  }
+
   if (config_.verbosity == BuildConfig::QUIET
       || config_.verbosity == BuildConfig::NO_STATUS_UPDATE)
     return;
diff --git a/src/status_printer.h b/src/status_printer.h
new file mode 100644
index 0000000..213e9ce
--- /dev/null
+++ b/src/status_printer.h
@@ -0,0 +1,130 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+
+#include <cstdint>
+#include <queue>
+
+#include "exit_status.h"
+#include "explanations.h"
+#include "line_printer.h"
+#include "status.h"
+
+/// Implementation of the Status interface that prints the status as
+/// human-readable strings to stdout
+struct StatusPrinter : Status {
+  explicit StatusPrinter(const BuildConfig& config);
+
+  /// Callbacks for the Plan to notify us about adding/removing Edge's.
+  void EdgeAddedToPlan(const Edge* edge) override;
+  void EdgeRemovedFromPlan(const Edge* edge) override;
+
+  void BuildEdgeStarted(const Edge* edge, int64_t start_time_millis) override;
+  void BuildEdgeFinished(Edge* edge, int64_t start_time_millis,
+                                 int64_t end_time_millis, ExitStatus exit_code,
+                                 const std::string& output) override;
+  void BuildStarted() override;
+  void BuildFinished() override;
+
+  void Info(const char* msg, ...) override;
+  void Warning(const char* msg, ...) override;
+  void Error(const char* msg, ...) override;
+
+  /// Format the progress status string by replacing the placeholders.
+  /// See the user manual for more information about the available
+  /// placeholders.
+  /// @param progress_status_format The format of the progress status.
+  /// @param status The status of the edge.
+  std::string FormatProgressStatus(const char* progress_status_format,
+                                   int64_t time_millis) const;
+
+  /// Set the |explanations_| pointer. Used to implement `-d explain`.
+  void SetExplanations(Explanations* explanations) override {
+    explanations_ = explanations;
+  }
+
+ private:
+  void PrintStatus(const Edge* edge, int64_t time_millis);
+
+  const BuildConfig& config_;
+
+  int started_edges_, finished_edges_, total_edges_, running_edges_;
+
+  /// How much wall clock elapsed so far?
+  int64_t time_millis_ = 0;
+
+  /// How much cpu clock elapsed so far?
+  int64_t cpu_time_millis_ = 0;
+
+  /// What percentage of predicted total time have elapsed already?
+  double time_predicted_percentage_ = 0.0;
+
+  /// Out of all the edges, for how many do we know previous time?
+  int eta_predictable_edges_total_ = 0;
+  /// And how much time did they all take?
+  int64_t eta_predictable_cpu_time_total_millis_ = 0;
+
+  /// Out of all the non-finished edges, for how many do we know previous time?
+  int eta_predictable_edges_remaining_ = 0;
+  /// And how much time will they all take?
+  int64_t eta_predictable_cpu_time_remaining_millis_ = 0;
+
+  /// For how many edges we don't know the previous run time?
+  int eta_unpredictable_edges_remaining_ = 0;
+
+  void RecalculateProgressPrediction();
+
+  /// Prints progress output.
+  LinePrinter printer_;
+
+  /// An optional Explanations pointer, used to implement `-d explain`.
+  Explanations* explanations_ = nullptr;
+
+  /// The custom progress status format to use.
+  const char* progress_status_format_;
+
+  template <size_t S>
+  void SnprintfRate(double rate, char (&buf)[S], const char* format) const {
+    if (rate == -1)
+      snprintf(buf, S, "?");
+    else
+      snprintf(buf, S, format, rate);
+  }
+
+  struct SlidingRateInfo {
+    SlidingRateInfo(int n) : rate_(-1), N(n), last_update_(-1) {}
+
+    double rate() { return rate_; }
+
+    void UpdateRate(int update_hint, int64_t time_millis) {
+      if (update_hint == last_update_)
+        return;
+      last_update_ = update_hint;
+
+      if (times_.size() == N)
+        times_.pop();
+      times_.push(time_millis);
+      if (times_.back() != times_.front())
+        rate_ = times_.size() / ((times_.back() - times_.front()) / 1e3);
+    }
+
+   private:
+    double rate_;
+    const size_t N;
+    std::queue<double> times_;
+    int last_update_;
+  };
+
+  mutable SlidingRateInfo current_rate_;
+};
diff --git a/src/string_piece.h b/src/string_piece.h
index 1c0bee6..7e7367c 100644
--- a/src/string_piece.h
+++ b/src/string_piece.h
@@ -63,6 +63,10 @@
     return len_;
   }
 
+  size_t empty() const {
+    return len_ == 0;
+  }
+
   const char* str_;
   size_t len_;
 };
diff --git a/src/string_piece_util_test.cc b/src/string_piece_util_test.cc
index 61586dd..cb296fe 100644
--- a/src/string_piece_util_test.cc
+++ b/src/string_piece_util_test.cc
@@ -23,7 +23,7 @@
     string input("a:b:c");
     vector<StringPiece> list = SplitStringPiece(input, ':');
 
-    EXPECT_EQ(list.size(), 3);
+    EXPECT_EQ(list.size(), size_t(3));
 
     EXPECT_EQ(list[0], "a");
     EXPECT_EQ(list[1], "b");
@@ -34,7 +34,7 @@
     string empty;
     vector<StringPiece> list = SplitStringPiece(empty, ':');
 
-    EXPECT_EQ(list.size(), 1);
+    EXPECT_EQ(list.size(), size_t(1));
 
     EXPECT_EQ(list[0], "");
   }
@@ -43,7 +43,7 @@
     string one("a");
     vector<StringPiece> list = SplitStringPiece(one, ':');
 
-    EXPECT_EQ(list.size(), 1);
+    EXPECT_EQ(list.size(), size_t(1));
 
     EXPECT_EQ(list[0], "a");
   }
@@ -52,7 +52,7 @@
     string sep_only(":");
     vector<StringPiece> list = SplitStringPiece(sep_only, ':');
 
-    EXPECT_EQ(list.size(), 2);
+    EXPECT_EQ(list.size(), size_t(2));
 
     EXPECT_EQ(list[0], "");
     EXPECT_EQ(list[1], "");
@@ -62,7 +62,7 @@
     string sep(":a:b:c:");
     vector<StringPiece> list = SplitStringPiece(sep, ':');
 
-    EXPECT_EQ(list.size(), 5);
+    EXPECT_EQ(list.size(), size_t(5));
 
     EXPECT_EQ(list[0], "");
     EXPECT_EQ(list[1], "a");
diff --git a/src/subprocess-posix.cc b/src/subprocess-posix.cc
index 8e78540..0e62b3b 100644
--- a/src/subprocess-posix.cc
+++ b/src/subprocess-posix.cc
@@ -12,6 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+#include "exit_status.h"
 #include "subprocess.h"
 
 #include <sys/select.h>
@@ -36,6 +37,10 @@
 
 using namespace std;
 
+namespace {
+  ExitStatus ParseExitStatus(int status);
+}
+
 Subprocess::Subprocess(bool use_console) : fd_(-1), pid_(-1),
                                            use_console_(use_console) {
 }
@@ -49,26 +54,34 @@
 }
 
 bool Subprocess::Start(SubprocessSet* set, const string& command) {
-  int output_pipe[2];
-  if (pipe(output_pipe) < 0)
-    Fatal("pipe: %s", strerror(errno));
-  fd_ = output_pipe[0];
+  int subproc_stdout_fd = -1;
+  if (use_console_) {
+    fd_ = -1;
+  } else {
+    int output_pipe[2];
+    if (pipe(output_pipe) < 0)
+      Fatal("pipe: %s", strerror(errno));
+    fd_ = output_pipe[0];
+    subproc_stdout_fd = output_pipe[1];
 #if !defined(USE_PPOLL)
-  // If available, we use ppoll in DoWork(); otherwise we use pselect
-  // and so must avoid overly-large FDs.
-  if (fd_ >= static_cast<int>(FD_SETSIZE))
-    Fatal("pipe: %s", strerror(EMFILE));
+    // If available, we use ppoll in DoWork(); otherwise we use pselect
+    // and so must avoid overly-large FDs.
+    if (fd_ >= static_cast<int>(FD_SETSIZE))
+      Fatal("pipe: %s", strerror(EMFILE));
 #endif  // !USE_PPOLL
-  SetCloseOnExec(fd_);
+    SetCloseOnExec(fd_);
+  }
 
   posix_spawn_file_actions_t action;
   int err = posix_spawn_file_actions_init(&action);
   if (err != 0)
     Fatal("posix_spawn_file_actions_init: %s", strerror(err));
 
-  err = posix_spawn_file_actions_addclose(&action, output_pipe[0]);
-  if (err != 0)
-    Fatal("posix_spawn_file_actions_addclose: %s", strerror(err));
+  if (!use_console_) {
+    err = posix_spawn_file_actions_addclose(&action, fd_);
+    if (err != 0)
+      Fatal("posix_spawn_file_actions_addclose: %s", strerror(err));
+  }
 
   posix_spawnattr_t attr;
   err = posix_spawnattr_init(&attr);
@@ -97,18 +110,17 @@
       Fatal("posix_spawn_file_actions_addopen: %s", strerror(err));
     }
 
-    err = posix_spawn_file_actions_adddup2(&action, output_pipe[1], 1);
+    err = posix_spawn_file_actions_adddup2(&action, subproc_stdout_fd, 1);
     if (err != 0)
       Fatal("posix_spawn_file_actions_adddup2: %s", strerror(err));
-    err = posix_spawn_file_actions_adddup2(&action, output_pipe[1], 2);
+    err = posix_spawn_file_actions_adddup2(&action, subproc_stdout_fd, 2);
     if (err != 0)
       Fatal("posix_spawn_file_actions_adddup2: %s", strerror(err));
-    err = posix_spawn_file_actions_addclose(&action, output_pipe[1]);
+    err = posix_spawn_file_actions_addclose(&action, subproc_stdout_fd);
     if (err != 0)
       Fatal("posix_spawn_file_actions_addclose: %s", strerror(err));
-    // In the console case, output_pipe is still inherited by the child and
-    // closed when the subprocess finishes, which then notifies ninja.
   }
+
 #ifdef POSIX_SPAWN_USEVFORK
   flags |= POSIX_SPAWN_USEVFORK;
 #endif
@@ -130,7 +142,8 @@
   if (err != 0)
     Fatal("posix_spawn_file_actions_destroy: %s", strerror(err));
 
-  close(output_pipe[1]);
+  if (!use_console_)
+    close(subproc_stdout_fd);
   return true;
 }
 
@@ -147,13 +160,32 @@
   }
 }
 
-ExitStatus Subprocess::Finish() {
-  assert(pid_ != -1);
-  int status;
-  if (waitpid(pid_, &status, 0) < 0)
-    Fatal("waitpid(%d): %s", pid_, strerror(errno));
-  pid_ = -1;
 
+bool Subprocess::TryFinish(int waitpid_options) {
+  assert(pid_ != -1);
+  int status, ret;
+  while ((ret = waitpid(pid_, &status, waitpid_options)) < 0) {
+    if (errno != EINTR)
+      Fatal("waitpid(%d): %s", pid_, strerror(errno));
+  }
+  if (ret == 0)
+    return false; // Subprocess is alive (WNOHANG-only).
+  pid_ = -1;
+  exit_status_ = ParseExitStatus(status);
+  return true; // Subprocess has terminated.
+}
+
+ExitStatus Subprocess::Finish() {
+  if (pid_ != -1) {
+    TryFinish(0);
+    assert(pid_ == -1);
+  }
+  return exit_status_;
+}
+
+namespace {
+
+ExitStatus ParseExitStatus(int status) {
 #ifdef _AIX
   if (WIFEXITED(status) && WEXITSTATUS(status) & 0x80) {
     // Map the shell's exit code used for signal failure (128 + signal) to the
@@ -165,31 +197,43 @@
 #endif
 
   if (WIFEXITED(status)) {
-    int exit = WEXITSTATUS(status);
-    if (exit == 0)
-      return ExitSuccess;
-  } else if (WIFSIGNALED(status)) {
+    // propagate the status transparently
+    return static_cast<ExitStatus>(WEXITSTATUS(status));
+  }
+  if (WIFSIGNALED(status)) {
     if (WTERMSIG(status) == SIGINT || WTERMSIG(status) == SIGTERM
         || WTERMSIG(status) == SIGHUP)
       return ExitInterrupted;
   }
-  return ExitFailure;
+  // At this point, we exit with any other signal+128
+  return static_cast<ExitStatus>(status + 128);
 }
 
+} // anonymous namespace
+
 bool Subprocess::Done() const {
-  return fd_ == -1;
+  // Console subprocesses share console with ninja, and we consider them done
+  // when they exit.
+  // For other processes, we consider them done when we have consumed all their
+  // output and closed their associated pipe.
+  return (use_console_ && pid_ == -1) || (!use_console_ && fd_ == -1);
 }
 
 const string& Subprocess::GetOutput() const {
   return buf_;
 }
 
-int SubprocessSet::interrupted_;
+volatile sig_atomic_t SubprocessSet::interrupted_;
+volatile sig_atomic_t SubprocessSet::s_sigchld_received;
 
 void SubprocessSet::SetInterruptedFlag(int signum) {
   interrupted_ = signum;
 }
 
+void SubprocessSet::SigChldHandler(int signo, siginfo_t* info, void* context) {
+  s_sigchld_received = 1;
+}
+
 void SubprocessSet::HandlePendingInterruption() {
   sigset_t pending;
   sigemptyset(&pending);
@@ -206,11 +250,14 @@
 }
 
 SubprocessSet::SubprocessSet() {
+  // Block all these signals.
+  // Their handlers will only be enabled during ppoll/pselect().
   sigset_t set;
   sigemptyset(&set);
   sigaddset(&set, SIGINT);
   sigaddset(&set, SIGTERM);
   sigaddset(&set, SIGHUP);
+  sigaddset(&set, SIGCHLD);
   if (sigprocmask(SIG_BLOCK, &set, &old_mask_) < 0)
     Fatal("sigprocmask: %s", strerror(errno));
 
@@ -223,6 +270,27 @@
     Fatal("sigaction: %s", strerror(errno));
   if (sigaction(SIGHUP, &act, &old_hup_act_) < 0)
     Fatal("sigaction: %s", strerror(errno));
+
+  memset(&act, 0, sizeof(act));
+  act.sa_flags = SA_SIGINFO | SA_NOCLDSTOP;
+  act.sa_sigaction = SigChldHandler;
+  if (sigaction(SIGCHLD, &act, &old_chld_act_) < 0)
+    Fatal("sigaction: %s", strerror(errno));
+}
+
+// Reaps console processes that have exited and moves them from the running set
+// to the finished set.
+void SubprocessSet::CheckConsoleProcessTerminated() {
+  if (!s_sigchld_received)
+    return;
+  for (auto i = running_.begin(); i != running_.end(); ) {
+    if ((*i)->use_console_ && (*i)->TryFinish(WNOHANG)) {
+      finished_.push(*i);
+      i = running_.erase(i);
+    } else {
+      ++i;
+    }
+  }
 }
 
 SubprocessSet::~SubprocessSet() {
@@ -234,6 +302,8 @@
     Fatal("sigaction: %s", strerror(errno));
   if (sigaction(SIGHUP, &old_hup_act_, 0) < 0)
     Fatal("sigaction: %s", strerror(errno));
+  if (sigaction(SIGCHLD, &old_chld_act_, 0) < 0)
+    Fatal("sigaction: %s", strerror(errno));
   if (sigprocmask(SIG_SETMASK, &old_mask_, 0) < 0)
     Fatal("sigprocmask: %s", strerror(errno));
 }
@@ -262,9 +332,21 @@
     fds.push_back(pfd);
     ++nfds;
   }
+  if (nfds == 0) {
+    // Add a dummy entry to prevent using an empty pollfd vector.
+    // ppoll() allows to do this by setting fd < 0.
+    pollfd pfd = { -1, 0, 0 };
+    fds.push_back(pfd);
+    ++nfds;
+  }
 
   interrupted_ = 0;
+  s_sigchld_received = 0;
   int ret = ppoll(&fds.front(), nfds, NULL, &old_mask_);
+  // Note: This can remove console processes from the running set, but that is
+  // not a problem for the pollfd set, as console processes are not part of the
+  // pollfd set (they don't have a fd).
+  CheckConsoleProcessTerminated();
   if (ret == -1) {
     if (errno != EINTR) {
       perror("ninja: ppoll");
@@ -273,16 +355,23 @@
     return IsInterrupted();
   }
 
+  // ppoll/pselect prioritizes file descriptor events over a signal delivery.
+  // However, if the user is trying to quit ninja, we should react as fast as
+  // possible.
   HandlePendingInterruption();
   if (IsInterrupted())
     return true;
 
+  // Iterate through both the pollfd set and the running set.
+  // All valid fds in the running set are in the pollfd, in the same order.
   nfds_t cur_nfd = 0;
   for (vector<Subprocess*>::iterator i = running_.begin();
        i != running_.end(); ) {
     int fd = (*i)->fd_;
-    if (fd < 0)
+    if (fd < 0) {
+      ++i;
       continue;
+    }
     assert(fd == fds[cur_nfd].fd);
     if (fds[cur_nfd++].revents) {
       (*i)->OnPipeReady();
@@ -315,7 +404,9 @@
   }
 
   interrupted_ = 0;
-  int ret = pselect(nfds, &set, 0, 0, 0, &old_mask_);
+  s_sigchld_received = 0;
+  int ret = pselect(nfds, (nfds > 0 ? &set : nullptr), 0, 0, 0, &old_mask_);
+  CheckConsoleProcessTerminated();
   if (ret == -1) {
     if (errno != EINTR) {
       perror("ninja: pselect");
@@ -324,6 +415,9 @@
     return IsInterrupted();
   }
 
+  // ppoll/pselect prioritizes file descriptor events over a signal delivery.
+  // However, if the user is trying to quit ninja, we should react as fast as
+  // possible.
   HandlePendingInterruption();
   if (IsInterrupted())
     return true;
diff --git a/src/subprocess-win32.cc b/src/subprocess-win32.cc
index ff3baac..4cb8472 100644
--- a/src/subprocess-win32.cc
+++ b/src/subprocess-win32.cc
@@ -12,6 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+#include "exit_status.h"
 #include "subprocess.h"
 
 #include <assert.h>
@@ -198,9 +199,8 @@
   CloseHandle(child_);
   child_ = NULL;
 
-  return exit_code == 0              ? ExitSuccess :
-         exit_code == CONTROL_C_EXIT ? ExitInterrupted :
-                                       ExitFailure;
+  return exit_code == CONTROL_C_EXIT ? ExitInterrupted :
+                                       static_cast<ExitStatus>(exit_code);
 }
 
 bool Subprocess::Done() const {
diff --git a/src/subprocess.h b/src/subprocess.h
index 9e3d2ee..07181ac 100644
--- a/src/subprocess.h
+++ b/src/subprocess.h
@@ -68,8 +68,27 @@
   char overlapped_buf_[4 << 10];
   bool is_reading_;
 #else
+  /// The file descriptor that will be used in ppoll/pselect() for this process,
+  /// if any. Otherwise -1.
+  /// In non-console mode, this is the read-side of a pipe that was created
+  /// specifically for this subprocess. The write-side of the pipe is given to
+  /// the subprocess as combined stdout and stderr.
+  /// In console mode no pipe is created: fd_ is -1, and process termination is
+  /// detected using the SIGCHLD signal and waitpid(WNOHANG).
   int fd_;
+  /// PID of the subprocess. Set to -1 when the subprocess is reaped.
   pid_t pid_;
+  /// In POSIX platforms it is necessary to use waitpid(WNOHANG) to know whether
+  /// a certain subprocess has finished. This is done for terminal subprocesses.
+  /// However, this also causes the subprocess to be reaped before Finish() is
+  /// called, so we need to store the ExitStatus so that a later Finish()
+  /// invocation can return it.
+  ExitStatus exit_status_;
+
+  /// Call waitpid() on the subprocess with the provided options and update the
+  /// pid_ and exit_status_ fields.
+  /// Return a boolean indicating whether the subprocess has indeed terminated.
+  bool TryFinish(int waitpid_options);
 #endif
   bool use_console_;
 
@@ -96,16 +115,24 @@
   static HANDLE ioport_;
 #else
   static void SetInterruptedFlag(int signum);
-  static void HandlePendingInterruption();
+  static void SigChldHandler(int signo, siginfo_t* info, void* context);
+
   /// Store the signal number that causes the interruption.
   /// 0 if not interruption.
-  static int interrupted_;
-
+  static volatile sig_atomic_t interrupted_;
+  /// Whether ninja should quit. Set on SIGINT, SIGTERM or SIGHUP reception.
   static bool IsInterrupted() { return interrupted_ != 0; }
+  static void HandlePendingInterruption();
+
+  /// Initialized to 0 before ppoll/pselect().
+  /// Filled to 1 by SIGCHLD handler when a child process terminates.
+  static volatile sig_atomic_t s_sigchld_received;
+  void CheckConsoleProcessTerminated();
 
   struct sigaction old_int_act_;
   struct sigaction old_term_act_;
   struct sigaction old_hup_act_;
+  struct sigaction old_chld_act_;
   sigset_t old_mask_;
 #endif
 };
diff --git a/src/subprocess_test.cc b/src/subprocess_test.cc
index 073fe86..a1ece6d 100644
--- a/src/subprocess_test.cc
+++ b/src/subprocess_test.cc
@@ -14,6 +14,7 @@
 
 #include "subprocess.h"
 
+#include "exit_status.h"
 #include "test.h"
 
 #ifndef _WIN32
@@ -50,7 +51,8 @@
     subprocs_.DoWork();
   }
 
-  EXPECT_EQ(ExitFailure, subproc->Finish());
+  ExitStatus exit = subproc->Finish();
+  EXPECT_NE(ExitSuccess, exit);
   EXPECT_NE("", subproc->GetOutput());
 }
 
@@ -64,7 +66,8 @@
     subprocs_.DoWork();
   }
 
-  EXPECT_EQ(ExitFailure, subproc->Finish());
+  ExitStatus exit = subproc->Finish();
+  EXPECT_NE(ExitSuccess, exit);
   EXPECT_NE("", subproc->GetOutput());
 #ifdef _WIN32
   ASSERT_EQ("CreateProcess failed: The system cannot find the file "
diff --git a/src/test.cc b/src/test.cc
index 4d063da..e9aaafa 100644
--- a/src/test.cc
+++ b/src/test.cc
@@ -254,7 +254,7 @@
 
 ScopedFilePath::~ScopedFilePath() {
   if (!released_) {
-    unlink(path_.c_str());
+    platformAwareUnlink(path_.c_str());
   }
 }
 
diff --git a/src/third_party/emhash/README.ninja b/src/third_party/emhash/README.ninja
new file mode 100644
index 0000000..12ead4e
--- /dev/null
+++ b/src/third_party/emhash/README.ninja
@@ -0,0 +1,8 @@
+Description: emhash8::HashMap for C++14/17
+Version: 1.6.5 (commit bdebddbdce1b473bbc189178fd523ef4a876ea01)
+URL: https://github.com/ktprime/emhash
+Copyright: Copyright (c) 2021-2024 Huang Yuanbing & bailuzhou AT 163.com
+SPDX-License-Identifier: MIT
+Local changes:
+ - Added includes for _mm_prefetch on MinGW.
+ - Fixed some spelling errors to appease the linter.
diff --git a/src/third_party/emhash/hash_table8.hpp b/src/third_party/emhash/hash_table8.hpp
new file mode 100644
index 0000000..ec96e2d
--- /dev/null
+++ b/src/third_party/emhash/hash_table8.hpp
@@ -0,0 +1,1834 @@
+// emhash8::HashMap for C++14/17
+// version 1.6.5
+// https://github.com/ktprime/emhash/blob/master/hash_table8.hpp
+//
+// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2021-2024 Huang Yuanbing & bailuzhou AT 163.com
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+#pragma once
+
+#include <cstring>
+#include <string>
+#include <cstdlib>
+#include <type_traits>
+#include <cassert>
+#include <utility>
+#include <cstdint>
+#include <functional>
+#include <iterator>
+#include <algorithm>
+#include <memory>
+
+#undef  EMH_NEW
+#undef  EMH_EMPTY
+
+// likely/unlikely
+#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
+#    define EMH_LIKELY(condition)   __builtin_expect(condition, 1)
+#    define EMH_UNLIKELY(condition) __builtin_expect(condition, 0)
+#else
+#    define EMH_LIKELY(condition)   condition
+#    define EMH_UNLIKELY(condition) condition
+#endif
+
+#define EMH_EMPTY(n) (0 > (int)(_index[n].next))
+#define EMH_EQHASH(n, key_hash) (((size_type)(key_hash) & ~_mask) == (_index[n].slot & ~_mask))
+//#define EMH_EQHASH(n, key_hash) ((size_type)(key_hash - _index[n].slot) & ~_mask) == 0
+#define EMH_NEW(key, val, bucket, key_hash) \
+    new(_pairs + _num_filled) value_type(key, val); \
+    _etail = bucket; \
+    _index[bucket] = {bucket, _num_filled++ | ((size_type)(key_hash) & ~_mask)}
+
+#if _WIN32 && defined(_M_IX86)
+#include <xmmintrin.h>
+#endif
+
+namespace emhash8 {
+
+struct DefaultPolicy {
+    static constexpr float load_factor = 0.80f;
+    static constexpr float min_load_factor = 0.20f;
+    static constexpr size_t cacheline_size = 64U;
+};
+
+template<typename KeyT, typename ValueT,
+         typename HashT = std::hash<KeyT>,
+         typename EqT = std::equal_to<KeyT>,
+         typename Allocator = std::allocator<std::pair<KeyT, ValueT>>, //never used
+         typename Policy = DefaultPolicy> //never used
+class HashMap
+{
+#ifndef EMH_DEFAULT_LOAD_FACTOR
+    constexpr static float EMH_DEFAULT_LOAD_FACTOR = 0.80f;
+#endif
+    constexpr static float EMH_MIN_LOAD_FACTOR     = 0.25f; //< 0.5
+    constexpr static uint32_t EMH_CACHE_LINE_SIZE  = 64; //debug only
+
+public:
+    using htype = HashMap<KeyT, ValueT, HashT, EqT>;
+    using value_type = std::pair<KeyT, ValueT>;
+    using key_type = KeyT;
+    using mapped_type = ValueT;
+    //using dPolicy = Policy;
+
+#ifdef EMH_SMALL_TYPE
+    using size_type = uint16_t;
+#elif EMH_SIZE_TYPE == 0
+    using size_type = uint32_t;
+#else
+    using size_type = size_t;
+#endif
+
+    using hasher = HashT;
+    using key_equal = EqT;
+
+    constexpr static size_type INACTIVE = 0-1u;
+    //constexpr uint32_t END      = 0-0x1u;
+    constexpr static size_type EAD      = 2;
+
+    struct Index
+    {
+        size_type next;
+        size_type slot;
+    };
+
+    class const_iterator;
+    class iterator
+    {
+    public:
+        using iterator_category = std::bidirectional_iterator_tag;
+        using difference_type = std::ptrdiff_t;
+        using value_type      = typename htype::value_type;
+        using pointer         = value_type*;
+        using const_pointer   = const value_type* ;
+        using reference       = value_type&;
+        using const_reference = const value_type&;
+
+        iterator() : kv_(nullptr) {}
+        iterator(const_iterator& cit) {
+            kv_ = cit.kv_;
+        }
+
+        iterator(const htype* hash_map, size_type bucket) {
+            kv_ = hash_map->_pairs + (int)bucket;
+        }
+
+        iterator& operator++()
+        {
+            kv_ ++;
+            return *this;
+        }
+
+        iterator operator++(int)
+        {
+            auto cur = *this; kv_ ++;
+            return cur;
+        }
+
+        iterator& operator--()
+        {
+            kv_ --;
+            return *this;
+        }
+
+        iterator operator--(int)
+        {
+            auto cur = *this; kv_ --;
+            return cur;
+        }
+
+        reference operator*() const { return *kv_; }
+        pointer operator->() const { return kv_; }
+
+        bool operator == (const iterator& rhs) const { return kv_ == rhs.kv_; }
+        bool operator != (const iterator& rhs) const { return kv_ != rhs.kv_; }
+        bool operator == (const const_iterator& rhs) const { return kv_ == rhs.kv_; }
+        bool operator != (const const_iterator& rhs) const { return kv_ != rhs.kv_; }
+
+    public:
+        value_type* kv_;
+    };
+
+    class const_iterator
+    {
+    public:
+        using iterator_category = std::bidirectional_iterator_tag;
+        using value_type        = typename htype::value_type;
+        using difference_type   = std::ptrdiff_t;
+        using pointer           = value_type*;
+        using const_pointer     = const value_type*;
+        using reference         = value_type&;
+        using const_reference   = const value_type&;
+
+        const_iterator(const iterator& it) {
+            kv_ = it.kv_;
+        }
+
+        const_iterator (const htype* hash_map, size_type bucket) {
+            kv_ = hash_map->_pairs + (int)bucket;
+        }
+
+        const_iterator& operator++()
+        {
+            kv_ ++;
+            return *this;
+        }
+
+        const_iterator operator++(int)
+        {
+            auto cur = *this; kv_ ++;
+            return cur;
+        }
+
+        const_iterator& operator--()
+        {
+            kv_ --;
+            return *this;
+        }
+
+        const_iterator operator--(int)
+        {
+            auto cur = *this; kv_ --;
+            return cur;
+        }
+
+        const_reference operator*() const { return *kv_; }
+        const_pointer operator->() const { return kv_; }
+
+        bool operator == (const iterator& rhs) const { return kv_ == rhs.kv_; }
+        bool operator != (const iterator& rhs) const { return kv_ != rhs.kv_; }
+        bool operator == (const const_iterator& rhs) const { return kv_ == rhs.kv_; }
+        bool operator != (const const_iterator& rhs) const { return kv_ != rhs.kv_; }
+    public:
+        const value_type* kv_;
+    };
+
+    void init(size_type bucket, float mlf = EMH_DEFAULT_LOAD_FACTOR)
+    {
+        _pairs = nullptr;
+        _index = nullptr;
+        _mask  = _num_buckets = 0;
+        _num_filled = 0;
+        _mlf = (uint32_t)((1 << 27) / EMH_DEFAULT_LOAD_FACTOR);
+        max_load_factor(mlf);
+        rehash(bucket);
+    }
+
+    HashMap(size_type bucket = 2, float mlf = EMH_DEFAULT_LOAD_FACTOR)
+    {
+        init(bucket, mlf);
+    }
+
+    HashMap(const HashMap& rhs)
+    {
+        if (rhs.load_factor() > EMH_MIN_LOAD_FACTOR) {
+            _pairs = alloc_bucket((size_type)(rhs._num_buckets * rhs.max_load_factor()) + 4);
+            _index = alloc_index(rhs._num_buckets);
+            clone(rhs);
+        } else {
+            init(rhs._num_filled + 2, rhs.max_load_factor());
+            for (auto it = rhs.begin(); it != rhs.end(); ++it)
+                insert_unique(it->first, it->second);
+        }
+    }
+
+    HashMap(HashMap&& rhs) noexcept
+    {
+        init(0);
+        *this = std::move(rhs);
+    }
+
+    HashMap(std::initializer_list<value_type> ilist)
+    {
+        init((size_type)ilist.size());
+        for (auto it = ilist.begin(); it != ilist.end(); ++it)
+            do_insert(*it);
+    }
+
+    template<class InputIt>
+    HashMap(InputIt first, InputIt last, size_type bucket_count=4)
+    {
+        init(std::distance(first, last) + bucket_count);
+        for (; first != last; ++first)
+            emplace(*first);
+    }
+
+    HashMap& operator=(const HashMap& rhs)
+    {
+        if (this == &rhs)
+            return *this;
+
+        if (rhs.load_factor() < EMH_MIN_LOAD_FACTOR) {
+            clear(); free(_pairs); _pairs = nullptr;
+            rehash(rhs._num_filled + 2);
+            for (auto it = rhs.begin(); it != rhs.end(); ++it)
+                insert_unique(it->first, it->second);
+            return *this;
+        }
+
+        clearkv();
+
+        if (_num_buckets != rhs._num_buckets) {
+            free(_pairs); free(_index);
+            _index = alloc_index(rhs._num_buckets);
+            _pairs = alloc_bucket((size_type)(rhs._num_buckets * rhs.max_load_factor()) + 4);
+        }
+
+        clone(rhs);
+        return *this;
+    }
+
+    HashMap& operator=(HashMap&& rhs) noexcept
+    {
+        if (this != &rhs) {
+            swap(rhs);
+            rhs.clear();
+        }
+        return *this;
+    }
+
+    template<typename Con>
+    bool operator == (const Con& rhs) const
+    {
+        if (size() != rhs.size())
+            return false;
+
+        for (auto it = begin(), last = end(); it != last; ++it) {
+            auto oi = rhs.find(it->first);
+            if (oi == rhs.end() || it->second != oi->second)
+                return false;
+        }
+        return true;
+    }
+
+    template<typename Con>
+    bool operator != (const Con& rhs) const { return !(*this == rhs); }
+
+    ~HashMap() noexcept
+    {
+        clearkv();
+        free(_pairs);
+        free(_index);
+        _index = nullptr;
+        _pairs = nullptr;
+    }
+
+    void clone(const HashMap& rhs)
+    {
+        _hasher      = rhs._hasher;
+//        _eq          = rhs._eq;
+        _num_buckets = rhs._num_buckets;
+        _num_filled  = rhs._num_filled;
+        _mlf         = rhs._mlf;
+        _last        = rhs._last;
+        _mask        = rhs._mask;
+#if EMH_HIGH_LOAD
+        _ehead       = rhs._ehead;
+#endif
+        _etail       = rhs._etail;
+
+        auto opairs  = rhs._pairs;
+        memcpy((char*)_index, (char*)rhs._index, (_num_buckets + EAD) * sizeof(Index));
+
+        if (is_copy_trivially()) {
+            memcpy((char*)_pairs, (char*)opairs, _num_filled * sizeof(value_type));
+        } else {
+            for (size_type slot = 0; slot < _num_filled; slot++)
+                new(_pairs + slot) value_type(opairs[slot]);
+        }
+    }
+
+    void swap(HashMap& rhs)
+    {
+        //      std::swap(_eq, rhs._eq);
+        std::swap(_hasher, rhs._hasher);
+        std::swap(_pairs, rhs._pairs);
+        std::swap(_index, rhs._index);
+        std::swap(_num_buckets, rhs._num_buckets);
+        std::swap(_num_filled, rhs._num_filled);
+        std::swap(_mask, rhs._mask);
+        std::swap(_mlf, rhs._mlf);
+        std::swap(_last, rhs._last);
+#if EMH_HIGH_LOAD
+        std::swap(_ehead, rhs._ehead);
+#endif
+        std::swap(_etail, rhs._etail);
+    }
+
+    // -------------------------------------------------------------
+    iterator first() const { return {this, 0}; }
+    iterator last() const { return {this, _num_filled - 1}; }
+
+    value_type& front() { return _pairs[0]; }
+    const value_type& front() const { return _pairs[0]; }
+    value_type& back() { return _pairs[_num_filled - 1]; }
+    const value_type& back() const { return _pairs[_num_filled - 1]; }
+
+    void pop_front() { erase(begin()); } //TODO. only erase first without move last
+    void pop_back() { erase(last()); }
+
+    iterator begin() { return first(); }
+    const_iterator cbegin() const { return first(); }
+    const_iterator begin() const { return first(); }
+
+    iterator end() { return {this, _num_filled}; }
+    const_iterator cend() const { return {this, _num_filled}; }
+    const_iterator end() const { return cend(); }
+
+    const value_type* values() const { return _pairs; }
+    const Index* index() const { return _index; }
+
+    size_type size() const { return _num_filled; }
+    bool empty() const { return _num_filled == 0; }
+    size_type bucket_count() const { return _num_buckets; }
+
+    /// Returns average number of elements per bucket.
+    float load_factor() const { return static_cast<float>(_num_filled) / (_mask + 1); }
+
+    HashT& hash_function() const { return _hasher; }
+    EqT& key_eq() const { return _eq; }
+
+    void max_load_factor(float mlf)
+    {
+        if (mlf < 0.992 && mlf > EMH_MIN_LOAD_FACTOR) {
+            _mlf = (uint32_t)((1 << 27) / mlf);
+            if (_num_buckets > 0) rehash(_num_buckets);
+        }
+    }
+
+    constexpr float max_load_factor() const { return (1 << 27) / (float)_mlf; }
+    constexpr size_type max_size() const { return (1ull << (sizeof(size_type) * 8 - 1)); }
+    constexpr size_type max_bucket_count() const { return max_size(); }
+
+#if EMH_STATIS
+    //Returns the bucket number where the element with key k is located.
+    size_type bucket(const KeyT& key) const
+    {
+        const auto bucket = hash_bucket(key);
+        const auto next_bucket = _index[bucket].next;
+        if ((int)next_bucket < 0)
+            return 0;
+        else if (bucket == next_bucket)
+            return bucket + 1;
+
+        return hash_main(bucket) + 1;
+    }
+
+    //Returns the number of elements in bucket n.
+    size_type bucket_size(const size_type bucket) const
+    {
+        auto next_bucket = _index[bucket].next;
+        if ((int)next_bucket < 0)
+            return 0;
+
+        next_bucket = hash_main(bucket);
+        size_type ibucket_size = 1;
+
+        //iterator each item in current main bucket
+        while (true) {
+            const auto nbucket = _index[next_bucket].next;
+            if (nbucket == next_bucket) {
+                break;
+            }
+            ibucket_size ++;
+            next_bucket = nbucket;
+        }
+        return ibucket_size;
+    }
+
+    size_type get_main_bucket(const size_type bucket) const
+    {
+        auto next_bucket = _index[bucket].next;
+        if ((int)next_bucket < 0)
+            return INACTIVE;
+
+        return hash_main(bucket);
+    }
+
+    size_type get_diss(size_type bucket, size_type next_bucket, const size_type slots) const
+    {
+        auto pbucket = reinterpret_cast<uint64_t>(&_pairs[bucket]);
+        auto pnext   = reinterpret_cast<uint64_t>(&_pairs[next_bucket]);
+        if (pbucket / EMH_CACHE_LINE_SIZE == pnext / EMH_CACHE_LINE_SIZE)
+            return 0;
+        size_type diff = pbucket > pnext ? (pbucket - pnext) : (pnext - pbucket);
+        if (diff / EMH_CACHE_LINE_SIZE < slots - 1)
+            return diff / EMH_CACHE_LINE_SIZE + 1;
+        return slots - 1;
+    }
+
+    int get_bucket_info(const size_type bucket, size_type steps[], const size_type slots) const
+    {
+        auto next_bucket = _index[bucket].next;
+        if ((int)next_bucket < 0)
+            return -1;
+
+        const auto main_bucket = hash_main(bucket);
+        if (next_bucket == main_bucket)
+            return 1;
+        else if (main_bucket != bucket)
+            return 0;
+
+        steps[get_diss(bucket, next_bucket, slots)] ++;
+        size_type ibucket_size = 2;
+        //find a empty and linked it to tail
+        while (true) {
+            const auto nbucket = _index[next_bucket].next;
+            if (nbucket == next_bucket)
+                break;
+
+            steps[get_diss(nbucket, next_bucket, slots)] ++;
+            ibucket_size ++;
+            next_bucket = nbucket;
+        }
+        return (int)ibucket_size;
+    }
+
+    void dump_statics() const
+    {
+        const size_type slots = 128;
+        size_type buckets[slots + 1] = {0};
+        size_type steps[slots + 1]   = {0};
+        for (size_type bucket = 0; bucket < _num_buckets; ++bucket) {
+            auto bsize = get_bucket_info(bucket, steps, slots);
+            if (bsize > 0)
+                buckets[bsize] ++;
+        }
+
+        size_type sumb = 0, collision = 0, sumc = 0, finds = 0, sumn = 0;
+        puts("============== buckets size ration =========");
+        for (size_type i = 0; i < sizeof(buckets) / sizeof(buckets[0]); i++) {
+            const auto bucketsi = buckets[i];
+            if (bucketsi == 0)
+                continue;
+            sumb += bucketsi;
+            sumn += bucketsi * i;
+            collision += bucketsi * (i - 1);
+            finds += bucketsi * i * (i + 1) / 2;
+            printf("  %2u  %8u  %2.2lf|  %.2lf\n", i, bucketsi, bucketsi * 100.0 * i / _num_filled, sumn * 100.0 / _num_filled);
+        }
+
+        puts("========== collision miss ration ===========");
+        for (size_type i = 0; i < sizeof(steps) / sizeof(steps[0]); i++) {
+            sumc += steps[i];
+            if (steps[i] <= 2)
+                continue;
+            printf("  %2u  %8u  %.2lf  %.2lf\n", i, steps[i], steps[i] * 100.0 / collision, sumc * 100.0 / collision);
+        }
+
+        if (sumb == 0)  return;
+        printf("    _num_filled/bucket_size/packed collision/cache_miss/hit_find = %u/%.2lf/%zd/ %.2lf%%/%.2lf%%/%.2lf\n",
+                _num_filled, _num_filled * 1.0 / sumb, sizeof(value_type), (collision * 100.0 / _num_filled), (collision - steps[0]) * 100.0 / _num_filled, finds * 1.0 / _num_filled);
+        assert(sumn == _num_filled);
+        assert(sumc == collision);
+        puts("============== buckets size end =============");
+    }
+#endif
+
+    void pack_zero(ValueT zero)
+    {
+        _pairs[_num_filled] = {KeyT(), zero};
+    }
+
+    // ------------------------------------------------------------
+    template<typename K=KeyT>
+    iterator find(const K& key) noexcept
+    {
+        return {this, find_filled_slot(key)};
+    }
+
+    template<typename K=KeyT>
+    const_iterator find(const K& key) const noexcept
+    {
+        return {this, find_filled_slot(key)};
+    }
+
+    template<typename K=KeyT>
+    ValueT& at(const K& key)
+    {
+        const auto slot = find_filled_slot(key);
+        //throw
+        return _pairs[slot].second;
+    }
+
+    template<typename K=KeyT>
+    const ValueT& at(const K& key) const
+    {
+        const auto slot = find_filled_slot(key);
+        //throw
+        return _pairs[slot].second;
+    }
+
+    const ValueT& index(const uint32_t index) const
+    {
+        return _pairs[index].second;
+    }
+
+    ValueT& index(const uint32_t index)
+    {
+        return _pairs[index].second;
+    }
+
+    template<typename K=KeyT>
+    bool contains(const K& key) const noexcept
+    {
+        return find_filled_slot(key) != _num_filled;
+    }
+
+    template<typename K=KeyT>
+    size_type count(const K& key) const noexcept
+    {
+        return find_filled_slot(key) == _num_filled ? 0 : 1;
+        //return find_sorted_bucket(key) == END ? 0 : 1;
+        //return find_hash_bucket(key) == END ? 0 : 1;
+    }
+
+    template<typename K=KeyT>
+    std::pair<iterator, iterator> equal_range(const K& key)
+    {
+        const auto found = find(key);
+        if (found.second == _num_filled)
+            return { found, found };
+        else
+            return { found, std::next(found) };
+    }
+
+    void merge(HashMap& rhs)
+    {
+        if (empty()) {
+            *this = std::move(rhs);
+            return;
+        }
+
+        for (auto rit = rhs.begin(); rit != rhs.end(); ) {
+            auto fit = find(rit->first);
+            if (fit == end()) {
+                insert_unique(rit->first, std::move(rit->second));
+                rit = rhs.erase(rit);
+            } else {
+                ++rit;
+            }
+        }
+    }
+
+    /// Returns the matching ValueT or nullptr if k isn't found.
+    bool try_get(const KeyT& key, ValueT& val) const noexcept
+    {
+        const auto slot = find_filled_slot(key);
+        const auto found = slot != _num_filled;
+        if (found) {
+            val = _pairs[slot].second;
+        }
+        return found;
+    }
+
+    /// Returns the matching ValueT or nullptr if k isn't found.
+    ValueT* try_get(const KeyT& key) noexcept
+    {
+        const auto slot = find_filled_slot(key);
+        return slot != _num_filled ? &_pairs[slot].second : nullptr;
+    }
+
+    /// Const version of the above
+    ValueT* try_get(const KeyT& key) const noexcept
+    {
+        const auto slot = find_filled_slot(key);
+        return slot != _num_filled ? &_pairs[slot].second : nullptr;
+    }
+
+    /// set value if key exist
+    bool try_set(const KeyT& key, const ValueT& val) noexcept
+    {
+        const auto slot = find_filled_slot(key);
+        if (slot == _num_filled)
+            return false;
+
+        _pairs[slot].second = val;
+        return true;
+    }
+
+    /// set value if key exist
+    bool try_set(const KeyT& key, ValueT&& val) noexcept
+    {
+        const auto slot = find_filled_slot(key);
+        if (slot == _num_filled)
+            return false;
+
+        _pairs[slot].second = std::move(val);
+        return true;
+    }
+
+    /// Convenience function.
+    ValueT get_or_return_default(const KeyT& key) const noexcept
+    {
+        const auto slot = find_filled_slot(key);
+        return slot == _num_filled ? ValueT() : _pairs[slot].second;
+    }
+
+    // -----------------------------------------------------
+    std::pair<iterator, bool> do_insert(const value_type& value) noexcept
+    {
+        const auto key_hash = hash_key(value.first);
+        const auto bucket = find_or_allocate(value.first, key_hash);
+        const auto bempty = EMH_EMPTY(bucket);
+        if (bempty) {
+            EMH_NEW(value.first, value.second, bucket, key_hash);
+        }
+
+        const auto slot = _index[bucket].slot & _mask;
+        return { {this, slot}, bempty };
+    }
+
+    std::pair<iterator, bool> do_insert(value_type&& value) noexcept
+    {
+        const auto key_hash = hash_key(value.first);
+        const auto bucket = find_or_allocate(value.first, key_hash);
+        const auto bempty = EMH_EMPTY(bucket);
+        if (bempty) {
+            EMH_NEW(std::move(value.first), std::move(value.second), bucket, key_hash);
+        }
+
+        const auto slot = _index[bucket].slot & _mask;
+        return { {this, slot}, bempty };
+    }
+
+    template<typename K, typename V>
+    std::pair<iterator, bool> do_insert(K&& key, V&& val) noexcept
+    {
+        const auto key_hash = hash_key(key);
+        const auto bucket = find_or_allocate(key, key_hash);
+        const auto bempty = EMH_EMPTY(bucket);
+        if (bempty) {
+            EMH_NEW(std::forward<K>(key), std::forward<V>(val), bucket, key_hash);
+        }
+
+        const auto slot = _index[bucket].slot & _mask;
+        return { {this, slot}, bempty };
+    }
+
+    template<typename K, typename V>
+    std::pair<iterator, bool> do_assign(K&& key, V&& val) noexcept
+    {
+        check_expand_need();
+        const auto key_hash = hash_key(key);
+        const auto bucket = find_or_allocate(key, key_hash);
+        const auto bempty = EMH_EMPTY(bucket);
+        if (bempty) {
+            EMH_NEW(std::forward<K>(key), std::forward<V>(val), bucket, key_hash);
+        } else {
+            _pairs[_index[bucket].slot & _mask].second = std::move(val);
+        }
+
+        const auto slot = _index[bucket].slot & _mask;
+        return { {this, slot}, bempty };
+    }
+
+    std::pair<iterator, bool> insert(const value_type& p)
+    {
+        check_expand_need();
+        return do_insert(p);
+    }
+
+    std::pair<iterator, bool> insert(value_type && p)
+    {
+        check_expand_need();
+        return do_insert(std::move(p));
+    }
+
+    void insert(std::initializer_list<value_type> ilist)
+    {
+        reserve(ilist.size() + _num_filled, false);
+        for (auto it = ilist.begin(); it != ilist.end(); ++it)
+            do_insert(*it);
+    }
+
+    template <typename Iter>
+    void insert(Iter first, Iter last)
+    {
+        reserve(std::distance(first, last) + _num_filled, false);
+        for (; first != last; ++first)
+            do_insert(first->first, first->second);
+    }
+
+#if 0
+    template <typename Iter>
+    void insert_unique(Iter begin, Iter end)
+    {
+        reserve(std::distance(begin, end) + _num_filled, false);
+        for (; begin != end; ++begin) {
+            insert_unique(*begin);
+        }
+    }
+#endif
+
+    template<typename K, typename V>
+    size_type insert_unique(K&& key, V&& val)
+    {
+        check_expand_need();
+        const auto key_hash = hash_key(key);
+        auto bucket = find_unique_bucket(key_hash);
+        EMH_NEW(std::forward<K>(key), std::forward<V>(val), bucket, key_hash);
+        return bucket;
+    }
+
+    size_type insert_unique(value_type&& value)
+    {
+        return insert_unique(std::move(value.first), std::move(value.second));
+    }
+
+    size_type insert_unique(const value_type& value)
+    {
+        return insert_unique(value.first, value.second);
+    }
+
+    template <class... Args>
+    std::pair<iterator, bool> emplace(Args&&... args) noexcept
+    {
+        check_expand_need();
+        return do_insert(std::forward<Args>(args)...);
+    }
+
+    //no any optimize for position
+    template <class... Args>
+    iterator emplace_hint(const_iterator hint, Args&&... args)
+    {
+        (void)hint;
+        check_expand_need();
+        return do_insert(std::forward<Args>(args)...).first;
+    }
+
+    template<class... Args>
+    std::pair<iterator, bool> try_emplace(const KeyT& k, Args&&... args)
+    {
+        check_expand_need();
+        return do_insert(k, std::forward<Args>(args)...);
+    }
+
+    template<class... Args>
+    std::pair<iterator, bool> try_emplace(KeyT&& k, Args&&... args)
+    {
+        check_expand_need();
+        return do_insert(std::move(k), std::forward<Args>(args)...);
+    }
+
+    template <class... Args>
+    size_type emplace_unique(Args&&... args)
+    {
+        return insert_unique(std::forward<Args>(args)...);
+    }
+
+    std::pair<iterator, bool> insert_or_assign(const KeyT& key, ValueT&& val) { return do_assign(key, std::forward<ValueT>(val)); }
+    std::pair<iterator, bool> insert_or_assign(KeyT&& key, ValueT&& val) { return do_assign(std::move(key), std::forward<ValueT>(val)); }
+
+    /// Return the old value or ValueT() if it didn't exist.
+    ValueT set_get(const KeyT& key, const ValueT& val)
+    {
+        check_expand_need();
+        const auto key_hash = hash_key(key);
+        const auto bucket = find_or_allocate(key, key_hash);
+        if (EMH_EMPTY(bucket)) {
+            EMH_NEW(key, val, bucket, key_hash);
+            return ValueT();
+        } else {
+            const auto slot = _index[bucket].slot & _mask;
+            ValueT old_value(val);
+            std::swap(_pairs[slot].second, old_value);
+            return old_value;
+        }
+    }
+
+    /// Like std::map<KeyT, ValueT>::operator[].
+    ValueT& operator[](const KeyT& key) noexcept
+    {
+        check_expand_need();
+        const auto key_hash = hash_key(key);
+        const auto bucket = find_or_allocate(key, key_hash);
+        if (EMH_EMPTY(bucket)) {
+            /* Check if inserting a value rather than overwriting an old entry */
+            EMH_NEW(key, std::move(ValueT()), bucket, key_hash);
+        }
+
+        const auto slot = _index[bucket].slot & _mask;
+        return _pairs[slot].second;
+    }
+
+    ValueT& operator[](KeyT&& key) noexcept
+    {
+        check_expand_need();
+        const auto key_hash = hash_key(key);
+        const auto bucket = find_or_allocate(key, key_hash);
+        if (EMH_EMPTY(bucket)) {
+            EMH_NEW(std::move(key), std::move(ValueT()), bucket, key_hash);
+        }
+
+        const auto slot = _index[bucket].slot & _mask;
+        return _pairs[slot].second;
+    }
+
+    /// Erase an element from the hash table.
+    /// return 0 if element was not found
+    size_type erase(const KeyT& key) noexcept
+    {
+        const auto key_hash = hash_key(key);
+        const auto sbucket = find_filled_bucket(key, key_hash);
+        if (sbucket == INACTIVE)
+            return 0;
+
+        const auto main_bucket = key_hash & _mask;
+        erase_slot(sbucket, (size_type)main_bucket);
+        return 1;
+    }
+
+    //iterator erase(const_iterator begin_it, const_iterator end_it)
+    iterator erase(const const_iterator& cit) noexcept
+    {
+        const auto slot = (size_type)(cit.kv_ - _pairs);
+        size_type main_bucket;
+        const auto sbucket = find_slot_bucket(slot, main_bucket); //TODO
+        erase_slot(sbucket, main_bucket);
+        return {this, slot};
+    }
+
+    //only last >= first
+    iterator erase(const_iterator first, const_iterator last) noexcept
+    {
+        auto esize = long(last.kv_ - first.kv_);
+        auto tsize = long((_pairs + _num_filled) - last.kv_); //last to tail size
+        auto next = first;
+        while (tsize -- > 0) {
+            if (esize-- <= 0)
+                break;
+            next = ++erase(next);
+        }
+
+        //fast erase from last
+        next = this->last();
+        while (esize -- > 0)
+            next = --erase(next);
+
+        return {this, size_type(next.kv_ - _pairs)};
+    }
+
+    template<typename Pred>
+    size_type erase_if(Pred pred)
+    {
+        auto old_size = size();
+        for (auto it = begin(); it != end();) {
+            if (pred(*it))
+                it = erase(it);
+            else
+                ++it;
+        }
+        return old_size - size();
+    }
+
+    static constexpr bool is_triviall_destructable()
+    {
+#if __cplusplus >= 201402L || _MSC_VER > 1600
+        return !(std::is_trivially_destructible<KeyT>::value && std::is_trivially_destructible<ValueT>::value);
+#else
+        return !(std::is_pod<KeyT>::value && std::is_pod<ValueT>::value);
+#endif
+    }
+
+    static constexpr bool is_copy_trivially()
+    {
+#if __cplusplus >= 201103L || _MSC_VER > 1600
+        return (std::is_trivially_copyable<KeyT>::value && std::is_trivially_copyable<ValueT>::value);
+#else
+        return (std::is_pod<KeyT>::value && std::is_pod<ValueT>::value);
+#endif
+    }
+
+    void clearkv()
+    {
+        if (is_triviall_destructable()) {
+            while (_num_filled --)
+                _pairs[_num_filled].~value_type();
+        }
+    }
+
+    /// Remove all elements, keeping full capacity.
+    void clear() noexcept
+    {
+        clearkv();
+
+        if (_num_filled > 0)
+            memset((char*)_index, INACTIVE, sizeof(_index[0]) * _num_buckets);
+
+        _last = _num_filled = 0;
+        _etail = INACTIVE;
+
+#if EMH_HIGH_LOAD
+        _ehead = 0;
+#endif
+    }
+
+    void shrink_to_fit(const float min_factor = EMH_DEFAULT_LOAD_FACTOR / 4)
+    {
+        if (load_factor() < min_factor && bucket_count() > 10) //safe guard
+            rehash(_num_filled + 1);
+    }
+
+#if EMH_HIGH_LOAD
+    #define EMH_PREVET(i, n) i[n].slot
+    void set_empty()
+    {
+        auto prev = 0;
+        for (int32_t bucket = 1; bucket < _num_buckets; ++bucket) {
+            if (EMH_EMPTY(bucket)) {
+                if (prev != 0) {
+                    EMH_PREVET(_index, bucket) = prev;
+                    _index[_prev].next = -bucket;
+                }
+                else
+                    _ehead = bucket;
+                prev = bucket;
+            }
+        }
+
+        EMH_PREVET(_index, _ehead) = prev;
+        _index[_prev].next = 0-_ehead;
+        _ehead = 0-_index[_ehead].next;
+    }
+
+    void clear_empty()
+    {
+        auto prev = EMH_PREVET(_index, _ehead);
+        while (prev != _ehead) {
+            _index[_prev].next = INACTIVE;
+            prev = EMH_PREVET(_index, prev);
+        }
+        _index[_ehead].next = INACTIVE;
+        _ehead = 0;
+    }
+
+    //prev-ehead->next
+    size_type pop_empty(const size_type bucket)
+    {
+        const auto prev_bucket = EMH_PREVET(_index, bucket);
+        const int next_bucket = 0-_index[bucket].next;
+
+        EMH_PREVET(_index, next_bucket) = prev_bucket;
+        _index[prev_bucket].next = -next_bucket;
+
+        _ehead = next_bucket;
+        return bucket;
+    }
+
+    //ehead->bucket->next
+    void push_empty(const int32_t bucket)
+    {
+        const int next_bucket = 0-_index[_ehead].next;
+        assert(next_bucket > 0);
+
+        EMH_PREVET(_index, bucket) = _ehead;
+        _index[bucket].next = -next_bucket;
+
+        EMH_PREVET(_index, next_bucket) = bucket;
+        _index[_ehead].next = -bucket;
+        //        _ehead = bucket;
+    }
+#endif
+
+    /// Make room for this many elements
+    bool reserve(uint64_t num_elems, bool force)
+    {
+        (void)force;
+#if EMH_HIGH_LOAD == 0
+        const auto required_buckets = num_elems * _mlf >> 27;
+        if (EMH_LIKELY(required_buckets < _mask)) // && !force
+            return false;
+
+#elif EMH_HIGH_LOAD
+        const auto required_buckets = num_elems + num_elems * 1 / 9;
+        if (EMH_LIKELY(required_buckets < _mask))
+            return false;
+
+        else if (_num_buckets < 16 && _num_filled < _num_buckets)
+            return false;
+
+        else if (_num_buckets > EMH_HIGH_LOAD) {
+            if (_ehead == 0) {
+                set_empty();
+                return false;
+            } else if (/*_num_filled + 100 < _num_buckets && */_index[_ehead].next != 0-_ehead) {
+                return false;
+            }
+        }
+#endif
+#if EMH_STATIS
+        if (_num_filled > EMH_STATIS) dump_statics();
+#endif
+
+        //assert(required_buckets < max_size());
+        rehash(required_buckets + 2);
+        return true;
+    }
+
+    static value_type* alloc_bucket(size_type num_buckets)
+    {
+#ifdef EMH_ALLOC
+        auto new_pairs = aligned_alloc(32, (uint64_t)num_buckets * sizeof(value_type));
+#else
+        auto new_pairs = malloc((uint64_t)num_buckets * sizeof(value_type));
+#endif
+        return (value_type *)(new_pairs);
+    }
+
+    static Index* alloc_index(size_type num_buckets)
+    {
+        auto new_index = (char*)malloc((uint64_t)(EAD + num_buckets) * sizeof(Index));
+        return (Index *)(new_index);
+    }
+
+    bool reserve(size_type required_buckets) noexcept
+    {
+        if (_num_filled != required_buckets)
+            return reserve(required_buckets, true);
+
+        _last = 0;
+#if EMH_HIGH_LOAD
+        _ehead = 0;
+#endif
+
+#if EMH_SORT
+        std::sort(_pairs, _pairs + _num_filled, [this](const value_type & l, const value_type & r) {
+            const auto hashl = (size_type)hash_key(l.first) & _mask, hashr = (size_type)hash_key(r.first) & _mask;
+            return hashl < hashr;
+            //return l.first < r.first;
+        });
+#endif
+
+        memset((char*)_index, INACTIVE, sizeof(_index[0]) * _num_buckets);
+        for (size_type slot = 0; slot < _num_filled; slot++) {
+            const auto& key = _pairs[slot].first;
+            const auto key_hash = hash_key(key);
+            const auto bucket = size_type(key_hash & _mask);
+            auto& next_bucket = _index[bucket].next;
+            if ((int)next_bucket < 0)
+                _index[bucket] = {1, slot | ((size_type)(key_hash) & ~_mask)};
+            else {
+                _index[bucket].slot |= (size_type)(key_hash) & ~_mask;
+                next_bucket ++;
+            }
+        }
+        return true;
+    }
+
+    void rebuild(size_type num_buckets) noexcept
+    {
+        free(_index);
+        auto new_pairs = (value_type*)alloc_bucket((size_type)(num_buckets * max_load_factor()) + 4);
+        if (is_copy_trivially()) {
+            if (_pairs)
+            memcpy((char*)new_pairs, (char*)_pairs, _num_filled * sizeof(value_type));
+        } else {
+            for (size_type slot = 0; slot < _num_filled; slot++) {
+                new(new_pairs + slot) value_type(std::move(_pairs[slot]));
+                if (is_triviall_destructable())
+                    _pairs[slot].~value_type();
+            }
+        }
+        free(_pairs);
+        _pairs = new_pairs;
+        _index = (Index*)alloc_index (num_buckets);
+
+        memset((char*)_index, INACTIVE, sizeof(_index[0]) * num_buckets);
+        memset((char*)(_index + num_buckets), 0, sizeof(_index[0]) * EAD);
+    }
+
+    void rehash(uint64_t required_buckets)
+    {
+        if (required_buckets < _num_filled)
+            return;
+
+        assert(required_buckets < max_size());
+        auto num_buckets = _num_filled > (1u << 16) ? (1u << 16) : 4u;
+        while (num_buckets < required_buckets) { num_buckets *= 2; }
+#if EMH_SAVE_MEM
+        if (sizeof(KeyT) < sizeof(size_type) && num_buckets >= (1ul << (2 * 8)))
+            num_buckets = 2ul << (sizeof(KeyT) * 8);
+#endif
+
+#if EMH_REHASH_LOG
+        auto last = _last;
+        size_type collision = 0;
+#endif
+
+#if EMH_HIGH_LOAD
+        _ehead = 0;
+#endif
+        _last = 0;
+
+        _mask        = num_buckets - 1;
+#if EMH_PACK_TAIL > 1
+        _last = _mask;
+        num_buckets += num_buckets * EMH_PACK_TAIL / 100; //add more 5-10%
+#endif
+        _num_buckets = num_buckets;
+
+        rebuild(num_buckets);
+
+#ifdef EMH_SORT
+        std::sort(_pairs, _pairs + _num_filled, [this](const value_type & l, const value_type & r) {
+            const auto hashl = hash_key(l.first), hashr = hash_key(r.first);
+            auto diff = int64_t((hashl & _mask) - (hashr & _mask));
+            if (diff != 0)
+                return diff < 0;
+            return hashl < hashr;
+//          return l.first < r.first;
+        });
+#endif
+
+        _etail = INACTIVE;
+        for (size_type slot = 0; slot < _num_filled; ++slot) {
+            const auto& key = _pairs[slot].first;
+            const auto key_hash = hash_key(key);
+            const auto bucket = find_unique_bucket(key_hash);
+            _index[bucket] = { bucket, slot | ((size_type)(key_hash) & ~_mask) };
+
+#if EMH_REHASH_LOG
+            if (bucket != hash_main(bucket))
+                collision ++;
+#endif
+        }
+
+#if EMH_REHASH_LOG
+        if (_num_filled > EMH_REHASH_LOG) {
+            auto mbucket = _num_filled - collision;
+            char buff[255] = {0};
+            sprintf(buff, "    _num_filled/aver_size/K.V/pack/collision|last = %u/%.2lf/%s.%s/%zd|%.2lf%%,%.2lf%%",
+                    _num_filled, double (_num_filled) / mbucket, typeid(KeyT).name(), typeid(ValueT).name(), sizeof(_pairs[0]), collision * 100.0 / _num_filled, last * 100.0 / _num_buckets);
+#ifdef EMH_LOG
+            static uint32_t ihashs = 0; EMH_LOG() << "hash_nums = " << ihashs ++ << "|" <<__FUNCTION__ << "|" << buff << endl;
+#else
+            puts(buff);
+#endif
+        }
+#endif
+    }
+
+private:
+    // Can we fit another element?
+    bool check_expand_need()
+    {
+        return reserve(_num_filled, false);
+    }
+
+    static void prefetch_heap_block(char* ctrl)
+    {
+        // Prefetch the heap-allocated memory region to resolve potential TLB
+        // misses.  This is intended to overlap with execution of calculating the hash for a key.
+#if __linux__
+        __builtin_prefetch(static_cast<const void*>(ctrl));
+#elif _WIN32 && defined(_M_IX86)
+        _mm_prefetch((const char*)ctrl, _MM_HINT_T0);
+#endif
+    }
+
+    size_type slot_to_bucket(const size_type slot) const noexcept
+    {
+        size_type main_bucket;
+        return find_slot_bucket(slot, main_bucket); //TODO
+    }
+
+    //very slow
+    void erase_slot(const size_type sbucket, const size_type main_bucket) noexcept
+    {
+        const auto slot = _index[sbucket].slot & _mask;
+        const auto ebucket = erase_bucket(sbucket, main_bucket);
+        const auto last_slot = --_num_filled;
+        if (EMH_LIKELY(slot != last_slot)) {
+            const auto last_bucket = (_etail == INACTIVE || ebucket == _etail)
+                ? slot_to_bucket(last_slot) : _etail;
+
+            _pairs[slot] = std::move(_pairs[last_slot]);
+            _index[last_bucket].slot = slot | (_index[last_bucket].slot & ~_mask);
+        }
+
+        if (is_triviall_destructable())
+            _pairs[last_slot].~value_type();
+
+        _etail = INACTIVE;
+        _index[ebucket] = {INACTIVE, 0};
+#if EMH_HIGH_LOAD
+        if (_ehead) {
+            if (10 * _num_filled < 8 * _num_buckets)
+                clear_empty();
+            else if (ebucket)
+                push_empty(ebucket);
+        }
+#endif
+    }
+
+    size_type erase_bucket(const size_type bucket, const size_type main_bucket) noexcept
+    {
+        const auto next_bucket = _index[bucket].next;
+        if (bucket == main_bucket) {
+            if (main_bucket != next_bucket) {
+                const auto nbucket = _index[next_bucket].next;
+                _index[main_bucket] = {
+                    (nbucket == next_bucket) ? main_bucket : nbucket,
+                    _index[next_bucket].slot
+                };
+            }
+            return next_bucket;
+        }
+
+        const auto prev_bucket = find_prev_bucket(main_bucket, bucket);
+        _index[prev_bucket].next = (bucket == next_bucket) ? prev_bucket : next_bucket;
+        return bucket;
+    }
+
+    // Find the slot with this key, or return bucket size
+    size_type find_slot_bucket(const size_type slot, size_type& main_bucket) const
+    {
+        const auto key_hash = hash_key(_pairs[slot].first);
+        const auto bucket = main_bucket = size_type(key_hash & _mask);
+        if (slot == (_index[bucket].slot & _mask))
+            return bucket;
+
+        auto next_bucket = _index[bucket].next;
+        while (true) {
+            if (EMH_LIKELY(slot == (_index[next_bucket].slot & _mask)))
+                return next_bucket;
+            next_bucket = _index[next_bucket].next;
+        }
+
+        return INACTIVE;
+    }
+
+    // Find the slot with this key, or return bucket size
+    size_type find_filled_bucket(const KeyT& key, uint64_t key_hash) const noexcept
+    {
+        const auto bucket = size_type(key_hash & _mask);
+        auto next_bucket  = _index[bucket].next;
+        if (EMH_UNLIKELY((int)next_bucket < 0))
+            return INACTIVE;
+
+        const auto slot = _index[bucket].slot & _mask;
+        //prefetch_heap_block((char*)&_pairs[slot]);
+        if (EMH_EQHASH(bucket, key_hash)) {
+            if (EMH_LIKELY(_eq(key, _pairs[slot].first)))
+                return bucket;
+        }
+        if (next_bucket == bucket)
+            return INACTIVE;
+
+        while (true) {
+            if (EMH_EQHASH(next_bucket, key_hash)) {
+                const auto next_slot = _index[next_bucket].slot & _mask;
+                if (EMH_LIKELY(_eq(key, _pairs[next_slot].first)))
+                    return next_bucket;
+            }
+
+            const auto nbucket = _index[next_bucket].next;
+            if (nbucket == next_bucket)
+                return INACTIVE;
+            next_bucket = nbucket;
+        }
+
+        return INACTIVE;
+    }
+
+    // Find the slot with this key, or return bucket size
+    template<typename K=KeyT>
+    size_type find_filled_slot(const K& key) const noexcept
+    {
+        const auto key_hash = hash_key(key);
+        const auto bucket = size_type(key_hash & _mask);
+        auto next_bucket = _index[bucket].next;
+        if ((int)next_bucket < 0)
+            return _num_filled;
+
+        const auto slot = _index[bucket].slot & _mask;
+        //prefetch_heap_block((char*)&_pairs[slot]);
+        if (EMH_EQHASH(bucket, key_hash)) {
+            if (EMH_LIKELY(_eq(key, _pairs[slot].first)))
+                return slot;
+        }
+        if (next_bucket == bucket)
+            return _num_filled;
+
+        while (true) {
+            if (EMH_EQHASH(next_bucket, key_hash)) {
+                const auto next_slot = _index[next_bucket].slot & _mask;
+                if (EMH_LIKELY(_eq(key, _pairs[next_slot].first)))
+                    return next_slot;
+            }
+
+            const auto nbucket = _index[next_bucket].next;
+            if (nbucket == next_bucket)
+                return _num_filled;
+            next_bucket = nbucket;
+        }
+
+        return _num_filled;
+    }
+
+#if EMH_SORT
+    size_type find_hash_bucket(const KeyT& key) const noexcept
+    {
+        const auto key_hash = hash_key(key);
+        const auto bucket = size_type(key_hash & _mask);
+        const auto next_bucket = _index[bucket].next;
+        if ((int)next_bucket < 0)
+            return END;
+
+        auto slot = _index[bucket].slot & _mask;
+        if (_eq(key, _pairs[slot++].first))
+            return slot;
+        else if (next_bucket == bucket)
+            return END;
+
+        while (true) {
+            const auto& okey = _pairs[slot++].first;
+            if (_eq(key, okey))
+                return slot;
+
+            const auto hasho = hash_key(okey);
+            if ((hasho & _mask) != bucket)
+                break;
+            else if (hasho > key_hash)
+                break;
+            else if (EMH_UNLIKELY(slot >= _num_filled))
+                break;
+        }
+
+        return END;
+    }
+
+    //only for find/can not insert
+    size_type find_sorted_bucket(const KeyT& key) const noexcept
+    {
+        const auto key_hash = hash_key(key);
+        const auto bucket = size_type(key_hash & _mask);
+        const auto slots = (int)(_index[bucket].next); //TODO
+        if (slots < 0 /**|| key < _pairs[slot].first*/)
+            return END;
+
+        const auto slot = _index[bucket].slot & _mask;
+        auto ormask = _index[bucket].slot & ~_mask;
+        auto hmask  = (size_type)(key_hash) & ~_mask;
+        if ((hmask | ormask) != ormask)
+            return END;
+
+        if (_eq(key, _pairs[slot].first))
+            return slot;
+        else if (slots == 1 || key < _pairs[slot].first)
+            return END;
+
+#if EMH_SORT
+        if (key < _pairs[slot].first || key > _pairs[slots + slot - 1].first)
+            return END;
+#endif
+
+        for (size_type i = 1; i < slots; ++i) {
+            const auto& okey = _pairs[slot + i].first;
+            if (_eq(key, okey))
+                return slot + i;
+            //            else if (okey > key)
+            //                return END;
+        }
+
+        return END;
+    }
+#endif
+
+    //kick out bucket and find empty to occpuy
+    //it will break the origin link and relink again.
+    //before: main_bucket-->prev_bucket --> bucket   --> next_bucket
+    //after : main_bucket-->prev_bucket --> (removed)--> new_bucket--> next_bucket
+    size_type kickout_bucket(const size_type kmain, const size_type bucket) noexcept
+    {
+        const auto next_bucket = _index[bucket].next;
+        const auto new_bucket  = find_empty_bucket(next_bucket, 2);
+        const auto prev_bucket = find_prev_bucket(kmain, bucket);
+
+        const auto last = next_bucket == bucket ? new_bucket : next_bucket;
+        _index[new_bucket] = {last, _index[bucket].slot};
+
+        _index[prev_bucket].next = new_bucket;
+        _index[bucket].next = INACTIVE;
+
+        return bucket;
+    }
+
+    /*
+     ** inserts a new key into a hash table; first, check whether key's main
+     ** bucket/position is free. If not, check whether colliding node/bucket is in its main
+     ** position or not: if it is not, move colliding bucket to an empty place and
+     ** put new key in its main position; otherwise (colliding bucket is in its main
+     ** position), new key goes to an empty position.
+     */
+    template<typename K=KeyT>
+    size_type find_or_allocate(const K& key, uint64_t key_hash) noexcept
+    {
+        const auto bucket = size_type(key_hash & _mask);
+        auto next_bucket = _index[bucket].next;
+        prefetch_heap_block((char*)&_pairs[bucket]);
+        if ((int)next_bucket < 0) {
+#if EMH_HIGH_LOAD
+            if (next_bucket != INACTIVE)
+                pop_empty(bucket);
+#endif
+            return bucket;
+        }
+
+        const auto slot = _index[bucket].slot & _mask;
+        if (EMH_EQHASH(bucket, key_hash))
+            if (EMH_LIKELY(_eq(key, _pairs[slot].first)))
+                return bucket;
+
+        //check current bucket_key is in main bucket or not
+        const auto kmain = hash_bucket(_pairs[slot].first);
+        if (kmain != bucket)
+            return kickout_bucket(kmain, bucket);
+        else if (next_bucket == bucket)
+            return _index[next_bucket].next = find_empty_bucket(next_bucket, 1);
+
+        uint32_t csize = 1;
+        //find next linked bucket and check key
+        while (true) {
+            const auto eslot = _index[next_bucket].slot & _mask;
+            if (EMH_EQHASH(next_bucket, key_hash)) {
+                if (EMH_LIKELY(_eq(key, _pairs[eslot].first)))
+                    return next_bucket;
+            }
+
+            csize += 1;
+            const auto nbucket = _index[next_bucket].next;
+            if (nbucket == next_bucket)
+                break;
+            next_bucket = nbucket;
+        }
+
+        //find a empty and link it to tail
+        const auto new_bucket = find_empty_bucket(next_bucket, csize);
+        prefetch_heap_block((char*)&_pairs[new_bucket]);
+        return _index[next_bucket].next = new_bucket;
+    }
+
+    size_type find_unique_bucket(uint64_t key_hash) noexcept
+    {
+        const auto bucket = size_type(key_hash & _mask);
+        auto next_bucket = _index[bucket].next;
+        if ((int)next_bucket < 0) {
+#if EMH_HIGH_LOAD
+            if (next_bucket != INACTIVE)
+                pop_empty(bucket);
+#endif
+            return bucket;
+        }
+
+        //check current bucket_key is in main bucket or not
+        const auto kmain = hash_main(bucket);
+        if (EMH_UNLIKELY(kmain != bucket))
+            return kickout_bucket(kmain, bucket);
+        else if (EMH_UNLIKELY(next_bucket != bucket))
+            next_bucket = find_last_bucket(next_bucket);
+
+        return _index[next_bucket].next = find_empty_bucket(next_bucket, 2);
+    }
+
+    /***
+      Different probing techniques usually provide a trade-off between memory locality and avoidance of clustering.
+      Since Robin Hood hashing is relatively resilient to clustering (both primary and secondary), linear probing is the most cache friendly alternativeis typically used.
+
+      It's the core algorithm of this hash map with highly optimization/benchmark.
+      normally linear probing is inefficient with high load factor, it use a new 3-way linear
+      probing strategy to search empty slot. from benchmark even the load factor > 0.9, it's more 2-3 timer fast than
+      one-way search strategy.
+
+      1. linear or quadratic probing a few cache line for less cache miss from input slot "bucket_from".
+      2. the first  search  slot from member variant "_last", init with 0
+      3. the second search slot from calculated pos "(_num_filled + _last) & _mask", it's like a rand value
+      */
+    // key is not in this mavalue. Find a place to put it.
+    size_type find_empty_bucket(const size_type bucket_from, uint32_t csize) noexcept
+    {
+        (void)csize;
+#if EMH_HIGH_LOAD
+        if (_ehead)
+            return pop_empty(_ehead);
+#endif
+
+        auto bucket = bucket_from;
+        if (EMH_EMPTY(++bucket) || EMH_EMPTY(++bucket))
+            return bucket;
+
+#ifdef EMH_QUADRATIC
+        constexpr size_type linear_probe_length = 2 * EMH_CACHE_LINE_SIZE / sizeof(Index);//16
+        for (size_type offset = csize + 2, step = 4; offset <= linear_probe_length; ) {
+            bucket = (bucket_from + offset) & _mask;
+            if (EMH_EMPTY(bucket) || EMH_EMPTY(++bucket))
+                return bucket;
+            offset += step; //7/8. 12. 16
+        }
+#else
+        constexpr size_type quadratic_probe_length = 6u;
+        for (size_type offset = 4u, step = 3u; step < quadratic_probe_length; ) {
+            bucket = (bucket_from + offset) & _mask;
+            if (EMH_EMPTY(bucket) || EMH_EMPTY(++bucket))
+                return bucket;
+            offset += step++;
+        }
+#endif
+
+#if EMH_PREFETCH
+        __builtin_prefetch(static_cast<const void*>(_index + _last + 1), 0, EMH_PREFETCH);
+#endif
+
+        for (;;) {
+#if EMH_PACK_TAIL
+            //find empty bucket and skip next
+            if (EMH_EMPTY(_last++))// || EMH_EMPTY(_last++))
+                return _last++ - 1;
+
+            if (EMH_UNLIKELY(_last >= _num_buckets))
+                _last = 0;
+
+            auto medium = (_mask / 4 + _last++) & _mask;
+            if (EMH_EMPTY(medium))
+                return medium;
+#else
+            _last &= _mask;
+            if (EMH_EMPTY(++_last))// || EMH_EMPTY(++_last))
+                return _last;
+
+            auto medium = (_num_buckets / 2 + _last) & _mask;
+            if (EMH_EMPTY(medium))// || EMH_EMPTY(++medium))
+                return medium;
+#endif
+        }
+
+        return 0;
+    }
+
+    size_type find_last_bucket(size_type main_bucket) const
+    {
+        auto next_bucket = _index[main_bucket].next;
+        if (next_bucket == main_bucket)
+            return main_bucket;
+
+        while (true) {
+            const auto nbucket = _index[next_bucket].next;
+            if (nbucket == next_bucket)
+                return next_bucket;
+            next_bucket = nbucket;
+        }
+    }
+
+    size_type find_prev_bucket(const size_type main_bucket, const size_type bucket) const
+    {
+        auto next_bucket = _index[main_bucket].next;
+        if (next_bucket == bucket)
+            return main_bucket;
+
+        while (true) {
+            const auto nbucket = _index[next_bucket].next;
+            if (nbucket == bucket)
+                return next_bucket;
+            next_bucket = nbucket;
+        }
+    }
+
+    size_type hash_bucket(const KeyT& key) const noexcept
+    {
+        return (size_type)hash_key(key) & _mask;
+    }
+
+    size_type hash_main(const size_type bucket) const noexcept
+    {
+        const auto slot = _index[bucket].slot & _mask;
+        return (size_type)hash_key(_pairs[slot].first) & _mask;
+    }
+
+#if EMH_INT_HASH
+    static constexpr uint64_t KC = UINT64_C(11400714819323198485);
+    static uint64_t hash64(uint64_t key)
+    {
+#if __SIZEOF_INT128__ && EMH_INT_HASH == 1
+        __uint128_t r = key; r *= KC;
+        return (uint64_t)(r >> 64) + (uint64_t)r;
+#elif EMH_INT_HASH == 2
+        //MurmurHash3Mixer
+        uint64_t h = key;
+        h ^= h >> 33;
+        h *= 0xff51afd7ed558ccd;
+        h ^= h >> 33;
+        h *= 0xc4ceb9fe1a85ec53;
+        h ^= h >> 33;
+        return h;
+#elif _WIN64 && EMH_INT_HASH == 1
+        uint64_t high;
+        return _umul128(key, KC, &high) + high;
+#elif EMH_INT_HASH == 3
+        auto ror  = (key >> 32) | (key << 32);
+        auto low  = key * 0xA24BAED4963EE407ull;
+        auto high = ror * 0x9FB21C651E98DF25ull;
+        auto mix  = low + high;
+        return mix;
+#elif EMH_INT_HASH == 1
+        uint64_t r = key * UINT64_C(0xca4bcaa75ec3f625);
+        return (r >> 32) + r;
+#elif EMH_WYHASH64
+        return wyhash64(key, KC);
+#else
+        uint64_t x = key;
+        x = (x ^ (x >> 30)) * UINT64_C(0xbf58476d1ce4e5b9);
+        x = (x ^ (x >> 27)) * UINT64_C(0x94d049bb133111eb);
+        x = x ^ (x >> 31);
+        return x;
+#endif
+    }
+#endif
+
+#if EMH_WYHASH_HASH
+    //#define WYHASH_CONDOM 1
+    static uint64_t wymix(uint64_t A, uint64_t B)
+    {
+#if defined(__SIZEOF_INT128__)
+        __uint128_t r = A; r *= B;
+#if WYHASH_CONDOM2
+        A ^= (uint64_t)r; B ^= (uint64_t)(r >> 64);
+#else
+        A = (uint64_t)r; B = (uint64_t)(r >> 64);
+#endif
+
+#elif defined(_MSC_VER) && defined(_M_X64)
+#if WYHASH_CONDOM2
+        uint64_t a, b;
+        a = _umul128(A, B, &b);
+        A ^= a; B ^= b;
+#else
+        A = _umul128(A, B, &B);
+#endif
+#else
+        uint64_t ha = A >> 32, hb = B >> 32, la = (uint32_t)A, lb = (uint32_t)B, hi, lo;
+        uint64_t rh = ha * hb, rm0 = ha * lb, rm1 = hb * la, rl = la * lb, t = rl + (rm0 << 32), c = t < rl;
+        lo = t + (rm1 << 32); c += lo < t; hi = rh + (rm0 >> 32) + (rm1 >> 32) + c;
+#if WYHASH_CONDOM2
+        A ^= lo; B ^= hi;
+#else
+        A = lo; B = hi;
+#endif
+#endif
+        return A ^ B;
+    }
+
+    //multiply and xor mix function, aka MUM
+    static inline uint64_t wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return v; }
+    static inline uint64_t wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return v; }
+    static inline uint64_t wyr3(const uint8_t *p, size_t k) {
+        return (((uint64_t)p[0]) << 16) | (((uint64_t)p[k >> 1]) << 8) | p[k - 1];
+    }
+
+    inline static const uint64_t secret[4] = {
+        0x2d358dccaa6c78a5ull, 0x8bb84b93962eacc9ull,
+        0x4b33a62ed433d4a3ull, 0x4d5a2da51de1aa47ull};
+public:
+    //wyhash main function https://github.com/wangyi-fudan/wyhash
+    static uint64_t wyhashstr(const char *key, const size_t len)
+    {
+        uint64_t a = 0, b = 0, seed = secret[0];
+        const uint8_t *p = (const uint8_t*)key;
+        if (EMH_LIKELY(len <= 16)) {
+            if (EMH_LIKELY(len >= 4)) {
+                const auto half = (len >> 3) << 2;
+                a = (wyr4(p) << 32U) | wyr4(p + half); p += len - 4;
+                b = (wyr4(p) << 32U) | wyr4(p - half);
+            } else if (len) {
+                a = wyr3(p, len);
+            }
+        } else {
+            size_t i = len;
+            if (EMH_UNLIKELY(i > 48)) {
+                uint64_t see1 = seed, see2 = seed;
+                do {
+                    seed = wymix(wyr8(p +  0) ^ secret[1], wyr8(p +  8) ^ seed);
+                    see1 = wymix(wyr8(p + 16) ^ secret[2], wyr8(p + 24) ^ see1);
+                    see2 = wymix(wyr8(p + 32) ^ secret[3], wyr8(p + 40) ^ see2);
+                    p += 48; i -= 48;
+                } while (EMH_LIKELY(i > 48));
+                seed ^= see1 ^ see2;
+            }
+            while (i > 16) {
+                seed = wymix(wyr8(p) ^ secret[1], wyr8(p + 8) ^ seed);
+                i -= 16; p += 16;
+            }
+            a = wyr8(p + i - 16);
+            b = wyr8(p + i - 8);
+        }
+
+        return wymix(secret[1] ^ len, wymix(a ^ secret[1], b ^ seed));
+    }
+#endif
+
+private:
+    template<typename UType, typename std::enable_if<std::is_integral<UType>::value, uint32_t>::type = 0>
+        inline uint64_t hash_key(const UType key) const
+        {
+#if EMH_INT_HASH
+            return hash64(key);
+#elif EMH_IDENTITY_HASH
+            return key + (key >> 24);
+#else
+            return _hasher(key);
+#endif
+        }
+
+    template<typename UType, typename std::enable_if<std::is_same<UType, std::string>::value, uint32_t>::type = 0>
+        inline uint64_t hash_key(const UType& key) const
+        {
+#if EMH_WYHASH_HASH
+            return wyhashstr(key.data(), key.size());
+#else
+            return _hasher(key);
+#endif
+        }
+
+    template<typename UType, typename std::enable_if<!std::is_integral<UType>::value && !std::is_same<UType, std::string>::value, uint32_t>::type = 0>
+        inline uint64_t hash_key(const UType& key) const
+        {
+            return _hasher(key);
+        }
+
+private:
+    Index*    _index;
+    value_type*_pairs;
+
+    HashT     _hasher;
+    EqT       _eq;
+    uint32_t  _mlf;
+    size_type _mask;
+    size_type _num_buckets;
+    size_type _num_filled;
+    size_type _last;
+#if EMH_HIGH_LOAD
+    size_type _ehead;
+#endif
+    size_type _etail;
+};
+} // namespace emhash
+
diff --git a/src/third_party/rapidhash/README.ninja b/src/third_party/rapidhash/README.ninja
new file mode 100644
index 0000000..1d74b67
--- /dev/null
+++ b/src/third_party/rapidhash/README.ninja
@@ -0,0 +1,7 @@
+Description: Very fast, high quality, platform-independent hashing algorithm.
+Version: commit 4a6b2570e868536be84800353efd92c699f37d2c
+URL: https://github.com/Nicoshev/rapidhash
+Copyright: Copyright (C) 2024 Nicolas De Carli, Based on 'wyhash', by Wang Yi <godspeed_china@yeah.net>
+SPDX-License-Identifier: BSD-2-Clause
+Local changes:
+ - Changed to UNIX line endings
diff --git a/src/third_party/rapidhash/rapidhash.h b/src/third_party/rapidhash/rapidhash.h
new file mode 100755
index 0000000..463f733
--- /dev/null
+++ b/src/third_party/rapidhash/rapidhash.h
@@ -0,0 +1,323 @@
+/*
+ * rapidhash - Very fast, high quality, platform-independent hashing algorithm.
+ * Copyright (C) 2024 Nicolas De Carli
+ *
+ * Based on 'wyhash', by Wang Yi <godspeed_china@yeah.net>
+ *
+ * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *    * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *    * Redistributions in binary form must reproduce the above
+ *      copyright notice, this list of conditions and the following disclaimer
+ *      in the documentation and/or other materials provided with the
+ *      distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You can contact the author at:
+ *   - rapidhash source repository: https://github.com/Nicoshev/rapidhash
+ */
+
+/*
+ *  Includes.
+ */
+#include <stdint.h>
+#include <string.h>
+#if defined(_MSC_VER)
+  #include <intrin.h>
+  #if defined(_M_X64) && !defined(_M_ARM64EC)
+    #pragma intrinsic(_umul128)
+  #endif
+#endif
+
+/*
+ *  C++ macros.
+ *
+ *  RAPIDHASH_INLINE can be overridden to be stronger than a hint, i.e. by adding __attribute__((always_inline)).
+ */
+#ifdef __cplusplus
+  #define RAPIDHASH_NOEXCEPT noexcept
+  #define RAPIDHASH_CONSTEXPR constexpr
+  #ifndef RAPIDHASH_INLINE
+    #define RAPIDHASH_INLINE inline
+  #endif
+#else
+  #define RAPIDHASH_NOEXCEPT
+  #define RAPIDHASH_CONSTEXPR static const
+  #ifndef RAPIDHASH_INLINE
+    #define RAPIDHASH_INLINE static inline
+  #endif
+#endif
+
+/*
+ *  Protection macro, alters behaviour of rapid_mum multiplication function.
+ *
+ *  RAPIDHASH_FAST: Normal behavior, max speed.
+ *  RAPIDHASH_PROTECTED: Extra protection against entropy loss.
+ */
+#ifndef RAPIDHASH_PROTECTED
+  #define RAPIDHASH_FAST
+#elif defined(RAPIDHASH_FAST)
+  #error "cannot define RAPIDHASH_PROTECTED and RAPIDHASH_FAST simultaneously."
+#endif
+
+/*
+ *  Unrolling macros, changes code definition for main hash function.
+ *
+ *  RAPIDHASH_COMPACT: Legacy variant, each loop process 48 bytes.
+ *  RAPIDHASH_UNROLLED: Unrolled variant, each loop process 96 bytes.
+ *
+ *  Most modern CPUs should benefit from having RAPIDHASH_UNROLLED.
+ *
+ *  These macros do not alter the output hash.
+ */
+#ifndef RAPIDHASH_COMPACT
+  #define RAPIDHASH_UNROLLED
+#elif defined(RAPIDHASH_UNROLLED)
+  #error "cannot define RAPIDHASH_COMPACT and RAPIDHASH_UNROLLED simultaneously."
+#endif
+
+/*
+ *  Likely and unlikely macros.
+ */
+#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
+  #define _likely_(x)  __builtin_expect(x,1)
+  #define _unlikely_(x)  __builtin_expect(x,0)
+#else
+  #define _likely_(x) (x)
+  #define _unlikely_(x) (x)
+#endif
+
+/*
+ *  Endianness macros.
+ */
+#ifndef RAPIDHASH_LITTLE_ENDIAN
+  #if defined(_WIN32) || defined(__LITTLE_ENDIAN__) || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+    #define RAPIDHASH_LITTLE_ENDIAN
+  #elif defined(__BIG_ENDIAN__) || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+    #define RAPIDHASH_BIG_ENDIAN
+  #else
+    #warning "could not determine endianness! Falling back to little endian."
+    #define RAPIDHASH_LITTLE_ENDIAN
+  #endif
+#endif
+
+/*
+ *  Default seed.
+ */
+#define RAPID_SEED (0xbdd89aa982704029ull)
+
+/*
+ *  Default secret parameters.
+ */
+RAPIDHASH_CONSTEXPR uint64_t rapid_secret[3] = {0x2d358dccaa6c78a5ull, 0x8bb84b93962eacc9ull, 0x4b33a62ed433d4a3ull};
+
+/*
+ *  64*64 -> 128bit multiply function.
+ *
+ *  @param A  Address of 64-bit number.
+ *  @param B  Address of 64-bit number.
+ *
+ *  Calculates 128-bit C = *A * *B.
+ *
+ *  When RAPIDHASH_FAST is defined:
+ *  Overwrites A contents with C's low 64 bits.
+ *  Overwrites B contents with C's high 64 bits.
+ *
+ *  When RAPIDHASH_PROTECTED is defined:
+ *  Xors and overwrites A contents with C's low 64 bits.
+ *  Xors and overwrites B contents with C's high 64 bits.
+ */
+RAPIDHASH_INLINE void rapid_mum(uint64_t *A, uint64_t *B) RAPIDHASH_NOEXCEPT {
+#if defined(__SIZEOF_INT128__)
+  __uint128_t r=*A; r*=*B;
+  #ifdef RAPIDHASH_PROTECTED
+  *A^=(uint64_t)r; *B^=(uint64_t)(r>>64);
+  #else
+  *A=(uint64_t)r; *B=(uint64_t)(r>>64);
+  #endif
+#elif defined(_MSC_VER) && (defined(_WIN64) || defined(_M_HYBRID_CHPE_ARM64))
+  #if defined(_M_X64)
+    #ifdef RAPIDHASH_PROTECTED
+    uint64_t a, b;
+    a=_umul128(*A,*B,&b);
+    *A^=a;  *B^=b;
+    #else
+    *A=_umul128(*A,*B,B);
+    #endif
+  #else
+    #ifdef RAPIDHASH_PROTECTED
+    uint64_t a, b;
+    b = __umulh(*A, *B);
+    a = *A * *B;
+    *A^=a;  *B^=b;
+    #else
+    uint64_t c = __umulh(*A, *B);
+    *A = *A * *B;
+    *B = c;
+    #endif
+  #endif
+#else
+  uint64_t ha=*A>>32, hb=*B>>32, la=(uint32_t)*A, lb=(uint32_t)*B, hi, lo;
+  uint64_t rh=ha*hb, rm0=ha*lb, rm1=hb*la, rl=la*lb, t=rl+(rm0<<32), c=t<rl;
+  lo=t+(rm1<<32); c+=lo<t; hi=rh+(rm0>>32)+(rm1>>32)+c;
+  #ifdef RAPIDHASH_PROTECTED
+  *A^=lo;  *B^=hi;
+  #else
+  *A=lo;  *B=hi;
+  #endif
+#endif
+}
+
+/*
+ *  Multiply and xor mix function.
+ *
+ *  @param A  64-bit number.
+ *  @param B  64-bit number.
+ *
+ *  Calculates 128-bit C = A * B.
+ *  Returns 64-bit xor between high and low 64 bits of C.
+ */
+RAPIDHASH_INLINE uint64_t rapid_mix(uint64_t A, uint64_t B) RAPIDHASH_NOEXCEPT { rapid_mum(&A,&B); return A^B; }
+
+/*
+ *  Read functions.
+ */
+#ifdef RAPIDHASH_LITTLE_ENDIAN
+RAPIDHASH_INLINE uint64_t rapid_read64(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint64_t v; memcpy(&v, p, sizeof(uint64_t)); return v;}
+RAPIDHASH_INLINE uint64_t rapid_read32(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint32_t v; memcpy(&v, p, sizeof(uint32_t)); return v;}
+#elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
+RAPIDHASH_INLINE uint64_t rapid_read64(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint64_t v; memcpy(&v, p, sizeof(uint64_t)); return __builtin_bswap64(v);}
+RAPIDHASH_INLINE uint64_t rapid_read32(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint32_t v; memcpy(&v, p, sizeof(uint32_t)); return __builtin_bswap32(v);}
+#elif defined(_MSC_VER)
+RAPIDHASH_INLINE uint64_t rapid_read64(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint64_t v; memcpy(&v, p, sizeof(uint64_t)); return _byteswap_uint64(v);}
+RAPIDHASH_INLINE uint64_t rapid_read32(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint32_t v; memcpy(&v, p, sizeof(uint32_t)); return _byteswap_ulong(v);}
+#else
+RAPIDHASH_INLINE uint64_t rapid_read64(const uint8_t *p) RAPIDHASH_NOEXCEPT {
+  uint64_t v; memcpy(&v, p, 8);
+  return (((v >> 56) & 0xff)| ((v >> 40) & 0xff00)| ((v >> 24) & 0xff0000)| ((v >>  8) & 0xff000000)| ((v <<  8) & 0xff00000000)| ((v << 24) & 0xff0000000000)| ((v << 40) & 0xff000000000000)| ((v << 56) & 0xff00000000000000));
+}
+RAPIDHASH_INLINE uint64_t rapid_read32(const uint8_t *p) RAPIDHASH_NOEXCEPT {
+  uint32_t v; memcpy(&v, p, 4);
+  return (((v >> 24) & 0xff)| ((v >>  8) & 0xff00)| ((v <<  8) & 0xff0000)| ((v << 24) & 0xff000000));
+}
+#endif
+
+/*
+ *  Reads and combines 3 bytes of input.
+ *
+ *  @param p  Buffer to read from.
+ *  @param k  Length of @p, in bytes.
+ *
+ *  Always reads and combines 3 bytes from memory.
+ *  Guarantees to read each buffer position at least once.
+ *
+ *  Returns a 64-bit value containing all three bytes read.
+ */
+RAPIDHASH_INLINE uint64_t rapid_readSmall(const uint8_t *p, size_t k) RAPIDHASH_NOEXCEPT { return (((uint64_t)p[0])<<56)|(((uint64_t)p[k>>1])<<32)|p[k-1];}
+
+/*
+ *  rapidhash main function.
+ *
+ *  @param key     Buffer to be hashed.
+ *  @param len     @key length, in bytes.
+ *  @param seed    64-bit seed used to alter the hash result predictably.
+ *  @param secret  Triplet of 64-bit secrets used to alter hash result predictably.
+ *
+ *  Returns a 64-bit hash.
+ */
+RAPIDHASH_INLINE uint64_t rapidhash_internal(const void *key, size_t len, uint64_t seed, const uint64_t* secret) RAPIDHASH_NOEXCEPT {
+  const uint8_t *p=(const uint8_t *)key; seed^=rapid_mix(seed^secret[0],secret[1])^len;  uint64_t  a,  b;
+  if(_likely_(len<=16)){
+    if(_likely_(len>=4)){
+      const uint8_t * plast = p + len - 4;
+      a = (rapid_read32(p) << 32) | rapid_read32(plast);
+      const uint64_t delta = ((len&24)>>(len>>3));
+      b = ((rapid_read32(p + delta) << 32) | rapid_read32(plast - delta)); }
+    else if(_likely_(len>0)){ a=rapid_readSmall(p,len); b=0;}
+    else a=b=0;
+  }
+  else{
+    size_t i=len;
+    if(_unlikely_(i>48)){
+      uint64_t see1=seed, see2=seed;
+#ifdef RAPIDHASH_UNROLLED
+      while(_likely_(i>=96)){
+        seed=rapid_mix(rapid_read64(p)^secret[0],rapid_read64(p+8)^seed);
+        see1=rapid_mix(rapid_read64(p+16)^secret[1],rapid_read64(p+24)^see1);
+        see2=rapid_mix(rapid_read64(p+32)^secret[2],rapid_read64(p+40)^see2);
+        seed=rapid_mix(rapid_read64(p+48)^secret[0],rapid_read64(p+56)^seed);
+        see1=rapid_mix(rapid_read64(p+64)^secret[1],rapid_read64(p+72)^see1);
+        see2=rapid_mix(rapid_read64(p+80)^secret[2],rapid_read64(p+88)^see2);
+        p+=96; i-=96;
+      }
+      if(_unlikely_(i>=48)){
+        seed=rapid_mix(rapid_read64(p)^secret[0],rapid_read64(p+8)^seed);
+        see1=rapid_mix(rapid_read64(p+16)^secret[1],rapid_read64(p+24)^see1);
+        see2=rapid_mix(rapid_read64(p+32)^secret[2],rapid_read64(p+40)^see2);
+        p+=48; i-=48;
+      }
+#else
+      do {
+        seed=rapid_mix(rapid_read64(p)^secret[0],rapid_read64(p+8)^seed);
+        see1=rapid_mix(rapid_read64(p+16)^secret[1],rapid_read64(p+24)^see1);
+        see2=rapid_mix(rapid_read64(p+32)^secret[2],rapid_read64(p+40)^see2);
+        p+=48; i-=48;
+      } while (_likely_(i>=48));
+#endif
+      seed^=see1^see2;
+    }
+    if(i>16){
+      seed=rapid_mix(rapid_read64(p)^secret[2],rapid_read64(p+8)^seed^secret[1]);
+      if(i>32)
+        seed=rapid_mix(rapid_read64(p+16)^secret[2],rapid_read64(p+24)^seed);
+    }
+    a=rapid_read64(p+i-16);  b=rapid_read64(p+i-8);
+  }
+  a^=secret[1]; b^=seed;  rapid_mum(&a,&b);
+  return  rapid_mix(a^secret[0]^len,b^secret[1]);
+}
+
+/*
+ *  rapidhash default seeded hash function.
+ *
+ *  @param key     Buffer to be hashed.
+ *  @param len     @key length, in bytes.
+ *  @param seed    64-bit seed used to alter the hash result predictably.
+ *
+ *  Calls rapidhash_internal using provided parameters and default secrets.
+ *
+ *  Returns a 64-bit hash.
+ */
+RAPIDHASH_INLINE uint64_t rapidhash_withSeed(const void *key, size_t len, uint64_t seed) RAPIDHASH_NOEXCEPT {
+  return rapidhash_internal(key, len, seed, rapid_secret);
+}
+
+/*
+ *  rapidhash default hash function.
+ *
+ *  @param key     Buffer to be hashed.
+ *  @param len     @key length, in bytes.
+ *
+ *  Calls rapidhash_withSeed using provided parameters and the default seed.
+ *
+ *  Returns a 64-bit hash.
+ */
+RAPIDHASH_INLINE uint64_t rapidhash(const void *key, size_t len) RAPIDHASH_NOEXCEPT {
+  return rapidhash_withSeed(key, len, RAPID_SEED);
+}
diff --git a/src/util.cc b/src/util.cc
index 7668e33..d88a6ba 100644
--- a/src/util.cc
+++ b/src/util.cc
@@ -21,6 +21,7 @@
 #include <windows.h>
 #include <io.h>
 #include <share.h>
+#include <direct.h>
 #endif
 
 #include <assert.h>
@@ -38,6 +39,7 @@
 #include <sys/time.h>
 #endif
 
+#include <algorithm>
 #include <vector>
 
 #if defined(__APPLE__) || defined(__FreeBSD__)
@@ -47,7 +49,7 @@
 #include <sys/loadavg.h>
 #elif defined(_AIX) && !defined(__PASE__)
 #include <libperfstat.h>
-#elif defined(linux) || defined(__GLIBC__)
+#elif defined(__linux__) || defined(__GLIBC__)
 #include <sys/sysinfo.h>
 #include <fstream>
 #include <map>
@@ -543,7 +545,7 @@
 
   if (msg_buf == nullptr) {
     char fallback_msg[128] = {0};
-    snprintf(fallback_msg, sizeof(fallback_msg), "GetLastError() = %d", err);
+    snprintf(fallback_msg, sizeof(fallback_msg), "GetLastError() = %lu", err);
     return fallback_msg;
   }
 
@@ -589,7 +591,7 @@
   return stripped;
 }
 
-#if defined(linux) || defined(__GLIBC__)
+#if defined(__linux__) || defined(__GLIBC__)
 std::pair<int64_t, bool> readCount(const std::string& path) {
   std::ifstream file(path.c_str());
   if (!file.is_open())
@@ -688,16 +690,33 @@
     MountPoint mp;
     if (!mp.parse(line))
       continue;
-    if (mp.fsType != "cgroup")
-      continue;
-    for (size_t i = 0; i < mp.superOptions.size(); i++) {
-      string opt = mp.superOptions[i].AsString();
-      map<string, CGroupSubSys>::iterator subsys = subsystems.find(opt);
-      if (subsys == subsystems.end())
+    if (mp.fsType == "cgroup") {
+      for (size_t i = 0; i < mp.superOptions.size(); i++) {
+        std::string opt = mp.superOptions[i].AsString();
+        auto subsys = subsystems.find(opt);
+        if (subsys == subsystems.end()) {
+          continue;
+        }
+        std::string newPath = mp.translate(subsys->second.name);
+        if (!newPath.empty()) {
+          cgroups.emplace(opt, newPath);
+        }
+      }
+    } else if (mp.fsType == "cgroup2") {
+      // Find cgroup2 entry in format "0::/path/to/cgroup"
+      auto subsys = std::find_if(subsystems.begin(), subsystems.end(),
+                                 [](const auto& sys) {
+                                   return sys.first == "" && sys.second.id == 0;
+                                 });
+      if (subsys == subsystems.end()) {
         continue;
-      string newPath = mp.translate(subsys->second.name);
-      if (!newPath.empty())
-        cgroups.insert(make_pair(opt, newPath));
+      }
+      std::string path = mp.mountPoint.AsString();
+      if (subsys->second.name != "/") {
+        // Append the relative path for the cgroup to the mount point
+        path.append(subsys->second.name);
+      }
+      cgroups.emplace("cgroup2", path);
     }
   }
   return cgroups;
@@ -721,23 +740,73 @@
   return cgroups;
 }
 
-int ParseCPUFromCGroup() {
-  map<string, CGroupSubSys> subsystems = ParseSelfCGroup();
-  map<string, string> cgroups = ParseMountInfo(subsystems);
-  map<string, string>::iterator cpu = cgroups.find("cpu");
-  if (cpu == cgroups.end())
-    return -1;
-  std::pair<int64_t, bool> quota = readCount(cpu->second + "/cpu.cfs_quota_us");
+int ParseCgroupV1(std::string& path) {
+  std::pair<int64_t, bool> quota = readCount(path + "/cpu.cfs_quota_us");
   if (!quota.second || quota.first == -1)
     return -1;
-  std::pair<int64_t, bool> period =
-      readCount(cpu->second + "/cpu.cfs_period_us");
+  std::pair<int64_t, bool> period = readCount(path + "/cpu.cfs_period_us");
   if (!period.second)
     return -1;
   if (period.first == 0)
     return -1;
   return quota.first / period.first;
 }
+
+int ParseCgroupV2(std::string& path) {
+  // Read CPU quota from cgroup v2
+  std::ifstream cpu_max(path + "/cpu.max");
+  if (!cpu_max.is_open()) {
+    return -1;
+  }
+  std::string max_line;
+  if (!std::getline(cpu_max, max_line) || max_line.empty()) {
+    return -1;
+  }
+  // Format is "quota period" or "max period"
+  size_t space_pos = max_line.find(' ');
+  if (space_pos == string::npos) {
+    return -1;
+  }
+  std::string quota_str = max_line.substr(0, space_pos);
+  std::string period_str = max_line.substr(space_pos + 1);
+  if (quota_str == "max") {
+    return -1;  // No CPU limit set
+  }
+  // Convert quota string to integer
+  char* quota_end = nullptr;
+  errno = 0;
+  int64_t quota = strtoll(quota_str.c_str(), &quota_end, 10);
+  // Check for conversion errors
+  if (errno == ERANGE || quota_end == quota_str.c_str() || *quota_end != '\0' ||
+      quota <= 0) {
+    return -1;
+  }
+  // Convert period string to integer
+  char* period_end = nullptr;
+  errno = 0;
+  int64_t period = strtoll(period_str.c_str(), &period_end, 10);
+  // Check for conversion errors
+  if (errno == ERANGE || period_end == period_str.c_str() ||
+      *period_end != '\0' || period <= 0) {
+    return -1;
+  }
+  return quota / period;
+}
+
+int ParseCPUFromCGroup() {
+  auto subsystems = ParseSelfCGroup();
+  auto cgroups = ParseMountInfo(subsystems);
+
+  // Prefer cgroup v2 if both v1 and v2 should be present
+  if (const auto cgroup2 = cgroups.find("cgroup2"); cgroup2 != cgroups.end()) {
+    return ParseCgroupV2(cgroup2->second);
+  }
+
+  if (const auto cpu = cgroups.find("cpu"); cpu != cgroups.end()) {
+    return ParseCgroupV1(cpu->second);
+  }
+  return -1;
+}
 #endif
 
 int GetProcessorCount() {
@@ -789,7 +858,7 @@
 #else
   int cgroupCount = -1;
   int schedCount = -1;
-#if defined(linux) || defined(__GLIBC__)
+#if defined(__linux__) || defined(__GLIBC__)
   cgroupCount = ParseCPUFromCGroup();
 #endif
   // The number of exposed processors might not represent the actual number of
@@ -917,22 +986,19 @@
 }
 #endif // _WIN32
 
-string ElideMiddle(const string& str, size_t width) {
-  switch (width) {
-      case 0: return "";
-      case 1: return ".";
-      case 2: return "..";
-      case 3: return "...";
+std::string GetWorkingDirectory() {
+  std::string ret;
+  char* success = NULL;
+  do {
+    ret.resize(ret.size() + 1024);
+    errno = 0;
+    success = getcwd(&ret[0], ret.size());
+  } while (!success && errno == ERANGE);
+  if (!success) {
+    Fatal("cannot determine working directory: %s", strerror(errno));
   }
-  const int kMargin = 3;  // Space for "...".
-  string result = str;
-  if (result.size() > width) {
-    size_t elide_size = (width - kMargin) / 2;
-    result = result.substr(0, elide_size)
-      + "..."
-      + result.substr(result.size() - elide_size, elide_size);
-  }
-  return result;
+  ret.resize(strlen(&ret[0]));
+  return ret;
 }
 
 bool Truncate(const string& path, size_t size, string* err) {
@@ -952,3 +1018,11 @@
   }
   return true;
 }
+
+int platformAwareUnlink(const char* filename) {
+	#ifdef _WIN32
+		return _unlink(filename);
+	#else
+		return unlink(filename);
+	#endif
+}
diff --git a/src/util.h b/src/util.h
index 4a7fea2..02c2418 100644
--- a/src/util.h
+++ b/src/util.h
@@ -26,27 +26,26 @@
 #include <string>
 #include <vector>
 
-#ifdef _MSC_VER
-#define NORETURN __declspec(noreturn)
+#if !defined(__has_cpp_attribute)
+#  define __has_cpp_attribute(x)  0
+#endif
+
+#if __has_cpp_attribute(noreturn)
+#  define NORETURN [[noreturn]]
 #else
-#define NORETURN __attribute__((noreturn))
+#  define NORETURN  // nothing for old compilers
 #endif
 
 /// Log a fatal message and exit.
 NORETURN void Fatal(const char* msg, ...);
 
 // Have a generic fall-through for different versions of C/C++.
-#if defined(__cplusplus) && __cplusplus >= 201703L
-#define NINJA_FALLTHROUGH [[fallthrough]]
-#elif defined(__cplusplus) && __cplusplus >= 201103L && defined(__clang__)
-#define NINJA_FALLTHROUGH [[clang::fallthrough]]
-#elif defined(__cplusplus) && __cplusplus >= 201103L && defined(__GNUC__) && \
-    __GNUC__ >= 7
-#define NINJA_FALLTHROUGH [[gnu::fallthrough]]
-#elif defined(__GNUC__) && __GNUC__ >= 7 // gcc 7
-#define NINJA_FALLTHROUGH __attribute__ ((fallthrough))
-#else // C++11 on gcc 6, and all other cases
-#define NINJA_FALLTHROUGH
+#if __has_cpp_attribute(fallthrough)
+#  define NINJA_FALLTHROUGH [[fallthrough]]
+#elif defined(__clang__)
+#  define NINJA_FALLTHROUGH [[clang::fallthrough]]
+#else
+#  define NINJA_FALLTHROUGH // nothing
 #endif
 
 /// Log a warning message.
@@ -103,9 +102,8 @@
 /// on error.
 double GetLoadAverage();
 
-/// Elide the given string @a str with '...' in the middle if the length
-/// exceeds @a width.
-std::string ElideMiddle(const std::string& str, size_t width);
+/// a wrapper for getcwd()
+std::string GetWorkingDirectory();
 
 /// Truncates a file to the given size.
 bool Truncate(const std::string& path, size_t size, std::string* err);
@@ -113,7 +111,6 @@
 #ifdef _MSC_VER
 #define snprintf _snprintf
 #define fileno _fileno
-#define unlink _unlink
 #define chdir _chdir
 #define strtoull _strtoui64
 #define getcwd _getcwd
@@ -126,6 +123,18 @@
 
 /// Calls Fatal() with a function name and GetLastErrorString.
 NORETURN void Win32Fatal(const char* function, const char* hint = NULL);
+
+/// Naive implementation of C++ 20 std::bit_cast(), used to fix Clang and GCC
+/// [-Wcast-function-type] warning on casting result of GetProcAddress().
+template <class To, class From>
+inline To FunctionCast(From from) {
+	static_assert(sizeof(To) == sizeof(From), "");
+	To result;
+	memcpy(&result, &from, sizeof(To));
+	return result;
+}
 #endif
 
+int platformAwareUnlink(const char* filename);
+
 #endif  // NINJA_UTIL_H_
diff --git a/src/util_test.cc b/src/util_test.cc
index d76954c..38e65e9 100644
--- a/src/util_test.cc
+++ b/src/util_test.cc
@@ -350,7 +350,7 @@
       "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\"
       "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\x\\y.h";
   CanonicalizePath(&path, &slash_bits);
-  EXPECT_EQ(slash_bits, 0x1ffffffff);
+  EXPECT_EQ(slash_bits, uint64_t(0x1ffffffff));
 
 
   // 59 after canonicalization is OK.
@@ -368,7 +368,7 @@
       "a\\a\\a\\a\\a\\a\\a\\a\\a\\x\\y.h";
   EXPECT_EQ(58, std::count(path.begin(), path.end(), '\\'));
   CanonicalizePath(&path, &slash_bits);
-  EXPECT_EQ(slash_bits, 0x3ffffffffffffff);
+  EXPECT_EQ(slash_bits, uint64_t(0x3ffffffffffffff));
 
   // More than 60 components is now completely ok too.
   path =
@@ -502,20 +502,3 @@
   EXPECT_EQ("affixmgr.cxx:286:15: warning: using the result... [-Wparentheses]",
             stripped);
 }
-
-TEST(ElideMiddle, NothingToElide) {
-  string input = "Nothing to elide in this short string.";
-  EXPECT_EQ(input, ElideMiddle(input, 80));
-  EXPECT_EQ(input, ElideMiddle(input, 38));
-  EXPECT_EQ("", ElideMiddle(input, 0));
-  EXPECT_EQ(".", ElideMiddle(input, 1));
-  EXPECT_EQ("..", ElideMiddle(input, 2));
-  EXPECT_EQ("...", ElideMiddle(input, 3));
-}
-
-TEST(ElideMiddle, ElideInTheMiddle) {
-  string input = "01234567890123456789";
-  string elided = ElideMiddle(input, 10);
-  EXPECT_EQ("012...789", elided);
-  EXPECT_EQ("01234567...23456789", ElideMiddle(input, 19));
-}
diff --git a/src/version.cc b/src/version.cc
index b19249f..0ee3061 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -20,7 +20,7 @@
 
 using namespace std;
 
-const char* kNinjaVersion = "1.12.1";
+const char* kNinjaVersion = "1.13.0";
 
 void ParseVersion(const string& version, int* major, int* minor) {
   size_t end = version.find('.');