Snap for 7570015 from 8360577fa728351012bb9e36631fa78b118c44c3 to mainline-sdkext-release

Change-Id: I6ec7cd92e85a62a1dbf79b19b01e88a7dfa8bc71
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
new file mode 100644
index 0000000..deb261d
--- /dev/null
+++ b/.github/workflows/build.yml
@@ -0,0 +1,47 @@
+name: build
+
+on:
+  push:
+    branches: [ master ]
+  pull_request:
+    branches: [ master ]
+
+jobs:
+
+  build:
+    runs-on: ubuntu-latest
+    strategy:
+      matrix:
+        go: [ '1.14', '1.13' ]
+    name: Build and test on go ${{ matrix.go }}
+    steps:
+
+    - name: Set up Go ${{ matrix.go }}
+      uses: actions/setup-go@v2
+      with:
+        go-version: ${{ matrix.go }}
+      id: go
+
+    - name: Check out code
+      uses: actions/checkout@v2
+
+    - name: Install ninja
+      run: |
+        mkdir -p ${GITHUB_WORKSPACE}/ninja-bin; cd ${GITHUB_WORKSPACE}/ninja-bin
+        wget https://github.com/ninja-build/ninja/releases/download/v1.7.2/ninja-linux.zip
+        unzip ninja-linux.zip
+        rm ninja-linux.zip
+        echo "${GITHUB_WORKSPACE}/ninja-bin" >> $GITHUB_PATH
+
+    - name: Run gofmt
+      run: ./.gofmt.sh
+
+    - name: Test
+      run: go test ./...
+
+    - name: Test with race detector
+      run: go test -race -short ./...
+
+    - run: ./tests/test.sh
+    - run: ./tests/test_tree_tests.sh
+    - run: ./tests/test_tree_tests.sh -t
diff --git a/.travis.gofmt.sh b/.gofmt.sh
similarity index 100%
rename from .travis.gofmt.sh
rename to .gofmt.sh
diff --git a/.travis.fix-fork.sh b/.travis.fix-fork.sh
deleted file mode 100755
index af26716..0000000
--- a/.travis.fix-fork.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-if echo $TRAVIS_BUILD_DIR | grep -vq "github.com/google/blueprint$" ; then
-  cd ../..
-  mkdir -p google
-  mv $TRAVIS_BUILD_DIR google/blueprint
-  cd google/blueprint
-  export TRAVIS_BUILD_DIR=$PWD
-fi
diff --git a/.travis.install-ninja.sh b/.travis.install-ninja.sh
deleted file mode 100755
index 5309945..0000000
--- a/.travis.install-ninja.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-# Version of ninja to build -- can be any git revision
-VERSION="v1.7.1"
-
-set -ev
-
-SCRIPT_HASH=$(sha1sum ${BASH_SOURCE[0]} | awk '{print $1}')
-
-cd ~
-if [[ -d ninjabin && "$SCRIPT_HASH" == "$(cat ninjabin/script_hash)" ]]; then
-  exit 0
-fi
-
-git clone https://github.com/martine/ninja
-cd ninja
-./configure.py --bootstrap
-
-mkdir -p ../ninjabin
-rm -f ../ninjabin/ninja
-echo -n $SCRIPT_HASH >../ninjabin/script_hash
-mv ninja ../ninjabin/
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 706e469..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-language: go
-
-go:
-    - "1.10"
-    - "1.11"
-    - "1.12"
-    - "1.13"
-
-cache:
-    directories:
-        - $HOME/ninjabin
-
-install:
-    - ./.travis.install-ninja.sh
-    - export PATH=$PATH:~/ninjabin
-
-before_script:
-    - source .travis.fix-fork.sh
-
-script:
-    - export GOROOT=$(go env GOROOT)
-    - ./.travis.gofmt.sh
-    - go test ./...
-    - go test -race -short ./...
-    - ./tests/test.sh
-    - ./tests/test_tree_tests.sh
-    - ./tests/test_tree_tests.sh -t
diff --git a/Blueprints b/Blueprints
index ecc0792..ab9fd3c 100644
--- a/Blueprints
+++ b/Blueprints
@@ -17,6 +17,7 @@
         "ninja_strings.go",
         "ninja_writer.go",
         "package_ctx.go",
+        "provider.go",
         "scope.go",
         "singleton_ctx.go",
     ],
@@ -26,6 +27,7 @@
         "module_ctx_test.go",
         "ninja_strings_test.go",
         "ninja_writer_test.go",
+        "provider_test.go",
         "splice_modules_test.go",
         "visit_test.go",
     ],
@@ -45,6 +47,7 @@
         "parser/modify_test.go",
         "parser/parser_test.go",
         "parser/printer_test.go",
+	"parser/sort_test.go",
     ],
 }
 
@@ -68,6 +71,7 @@
     testSrcs: [
         "pathtools/fs_test.go",
         "pathtools/glob_test.go",
+        "pathtools/lists_test.go",
     ],
 }
 
diff --git a/CODEOWNERS b/CODEOWNERS
new file mode 100644
index 0000000..8cf6944
--- /dev/null
+++ b/CODEOWNERS
@@ -0,0 +1 @@
+* @google/blueprint
diff --git a/OWNERS b/OWNERS
index 5dca797..1ee860c 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,4 +1,2 @@
-asmundak@google.com
-ccross@android.com
-dwillemsen@google.com
-jungjw@google.com
+include platform/build/soong:/OWNERS
+
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
new file mode 100644
index 0000000..317f5c4
--- /dev/null
+++ b/PREUPLOAD.cfg
@@ -0,0 +1,6 @@
+[Builtin Hooks]
+gofmt = true
+bpfmt = true
+
+[Hook Scripts]
+do_not_use_DO_NOT_MERGE = ${REPO_ROOT}/build/soong/scripts/check_do_not_merge.sh ${PREUPLOAD_COMMIT}
diff --git a/README.md b/README.md
index 5370c05..961bc64 100644
--- a/README.md
+++ b/README.md
@@ -1,14 +1,25 @@
 Blueprint Build System
 ======================
-[![Build Status](https://travis-ci.org/google/blueprint.svg?branch=master)](https://travis-ci.org/google/blueprint) 
 
-Blueprint is a meta-build system that reads in Blueprints files that describe
-modules that need to be built, and produces a
-[Ninja](https://ninja-build.org/) manifest describing the commands that
-need to be run and their dependencies.  Where most build systems use built-in
-rules or a domain-specific language to describe the logic for converting module
-descriptions to build rules, Blueprint delegates this to per-project build
-logic written in Go.  For large, heterogenous projects this allows the inherent
-complexity of the build logic to be maintained in a high-level language, while
-still allowing simple changes to individual modules by modifying easy to
-understand Blueprints files.
+Blueprint is being archived on 2021 May 3.
+
+On 2021 May 3, we will be archiving the Blueprint project. This means it will
+not be possible to file new issues or open new pull requests for this GitHub
+project. As the project is being archived, patches -- including security
+patches -- will not be applied after May 3. The source tree will remain
+available, but changes to Blueprint in AOSP will not be merged here and
+Blueprint's source tree in AOSP will eventually stop being usable outside of
+Android.
+
+Whereas there are no meta-build systems one can use as a drop-in replacement for
+Blueprint, there are a number of build systems that can be used:
+
+* [Bazel](https://bazel.build), Google's multi-language build tool to build and
+  test software of any size, quickly and reliably
+* [Soong](https://source.android.com/setup/build), for building the Android
+  operating system itself
+* [CMake](https://cmake.org), an open-source, cross-platform family of tools
+  designed to build, test and package software
+* [Buck](https://buck.build), a fast build system that encourages the creation
+  of small, reusable modules over a variety of platforms and languages
+* The venerable [GNU Make](https://www.gnu.org/software/make/)
diff --git a/bootstrap.bash b/bootstrap.bash
index 08b85b5..b08bf1e 100755
--- a/bootstrap.bash
+++ b/bootstrap.bash
@@ -67,12 +67,14 @@
     echo "  -h: print a help message and exit"
     echo "  -b <builddir>: set the build directory"
     echo "  -t: run tests"
+    echo "  -n: use validations to depend on tests"
 }
 
 # Parse the command line flags.
-while getopts ":b:ht" opt; do
+while getopts ":b:hnt" opt; do
     case $opt in
         b) BUILDDIR="$OPTARG";;
+        n) USE_VALIDATIONS=true;;
         t) RUN_TESTS=true;;
         h)
             usage
@@ -93,6 +95,9 @@
 # If RUN_TESTS is set, behave like -t was passed in as an option.
 [ ! -z "$RUN_TESTS" ] && EXTRA_ARGS="${EXTRA_ARGS} -t"
 
+# If $USE_VALIDATIONS is set, pass --use-validations.
+[ ! -z "$USE_VALIDATIONS" ] && EXTRA_ARGS="${EXTRA_ARGS} --use-validations"
+
 # If EMPTY_NINJA_FILE is set, have the primary build write out a 0-byte ninja
 # file instead of a full length one. Useful if you don't plan on executing the
 # build, but want to verify the primary builder execution.
diff --git a/bootstrap/bootstrap.go b/bootstrap/bootstrap.go
index 79e5c8e..d5befd9 100644
--- a/bootstrap/bootstrap.go
+++ b/bootstrap/bootstrap.go
@@ -17,8 +17,6 @@
 import (
 	"fmt"
 	"go/build"
-	"io/ioutil"
-	"os"
 	"path/filepath"
 	"runtime"
 	"strings"
@@ -59,7 +57,7 @@
 	compile = pctx.StaticRule("compile",
 		blueprint.RuleParams{
 			Command: "GOROOT='$goRoot' $compileCmd $parallelCompile -o $out.tmp " +
-				"-p $pkgPath -complete $incFlags -pack $in && " +
+				"$debugFlags -p $pkgPath -complete $incFlags -pack $in && " +
 				"if cmp --quiet $out.tmp $out; then rm $out.tmp; else mv -f $out.tmp $out; fi",
 			CommandDeps: []string{"$compileCmd"},
 			Description: "compile $out",
@@ -125,14 +123,27 @@
 
 	generateBuildNinja = pctx.StaticRule("build.ninja",
 		blueprint.RuleParams{
-			Command:     "$builder $extra -b $buildDir -n $ninjaBuildDir -d $out.d -globFile $globFile -o $out $in",
+			// TODO: it's kinda ugly that some parameters are computed from
+			// environment variables and some from Ninja parameters, but it's probably
+			// better to not to touch that while Blueprint and Soong are separate
+			// NOTE: The spaces at EOL are important because otherwise Ninja would
+			// omit all spaces between the different options.
+			Command: `cd "$$(dirname "$builder")" && ` +
+				`BUILDER="$$PWD/$$(basename "$builder")" && ` +
+				`cd / && ` +
+				`env -i "$$BUILDER" ` +
+				`    --top "$$TOP" ` +
+				`    --out "$buildDir" ` +
+				`    -n "$ninjaBuildDir" ` +
+				`    -d "$out.d" ` +
+				`    $extra`,
 			CommandDeps: []string{"$builder"},
 			Description: "$builder $out",
 			Deps:        blueprint.DepsGCC,
 			Depfile:     "$out.d",
 			Restat:      true,
 		},
-		"builder", "extra", "generator", "globFile")
+		"builder", "extra")
 
 	// Work around a Ninja issue.  See https://github.com/martine/ninja/pull/634
 	phony = pctx.StaticRule("phony",
@@ -144,7 +155,7 @@
 		"depfile")
 
 	_ = pctx.VariableFunc("BinDir", func(config interface{}) (string, error) {
-		return bootstrapBinDir(), nil
+		return bootstrapBinDir(config), nil
 	})
 
 	_ = pctx.VariableFunc("ToolDir", func(config interface{}) (string, error) {
@@ -167,21 +178,23 @@
 	isGoBinary()
 }
 
-func bootstrapBinDir() string {
-	return filepath.Join(BuildDir, bootstrapSubDir, "bin")
+func bootstrapBinDir(config interface{}) string {
+	return filepath.Join(config.(BootstrapConfig).BuildDir(), bootstrapSubDir, "bin")
 }
 
 func toolDir(config interface{}) string {
 	if c, ok := config.(ConfigBlueprintToolLocation); ok {
 		return filepath.Join(c.BlueprintToolLocation())
 	}
-	return filepath.Join(BuildDir, "bin")
+	return filepath.Join(config.(BootstrapConfig).BuildDir(), "bin")
 }
 
 func pluginDeps(ctx blueprint.BottomUpMutatorContext) {
 	if pkg, ok := ctx.Module().(*goPackage); ok {
-		for _, plugin := range pkg.properties.PluginFor {
-			ctx.AddReverseDependency(ctx.Module(), nil, plugin)
+		if ctx.PrimaryModule() == ctx.Module() {
+			for _, plugin := range pkg.properties.PluginFor {
+				ctx.AddReverseDependency(ctx.Module(), nil, plugin)
+			}
 		}
 	}
 }
@@ -211,7 +224,7 @@
 	}
 }
 
-func isBootstrapModule(module blueprint.Module) bool {
+func IsBootstrapModule(module blueprint.Module) bool {
 	_, isPackage := module.(*goPackage)
 	_, isBinary := module.(*goBinary)
 	return isPackage || isBinary
@@ -268,6 +281,9 @@
 }
 
 func (g *goPackage) DynamicDependencies(ctx blueprint.DynamicDependerModuleContext) []string {
+	if ctx.Module() != ctx.PrimaryModule() {
+		return nil
+	}
 	return g.properties.Deps
 }
 
@@ -297,6 +313,16 @@
 }
 
 func (g *goPackage) GenerateBuildActions(ctx blueprint.ModuleContext) {
+	// Allow the primary builder to create multiple variants.  Any variants after the first
+	// will copy outputs from the first.
+	if ctx.Module() != ctx.PrimaryModule() {
+		primary := ctx.PrimaryModule().(*goPackage)
+		g.pkgRoot = primary.pkgRoot
+		g.archiveFile = primary.archiveFile
+		g.testResultFile = primary.testResultFile
+		return
+	}
+
 	var (
 		name       = ctx.ModuleName()
 		hasPlugins = false
@@ -333,11 +359,13 @@
 		testSrcs = append(g.properties.TestSrcs, g.properties.Linux.TestSrcs...)
 	}
 
-	testArchiveFile := filepath.Join(testRoot(ctx, g.config),
-		filepath.FromSlash(g.properties.PkgPath)+".a")
-	g.testResultFile = buildGoTest(ctx, testRoot(ctx, g.config), testArchiveFile,
-		g.properties.PkgPath, srcs, genSrcs,
-		testSrcs)
+	if g.config.runGoTests {
+		testArchiveFile := filepath.Join(testRoot(ctx, g.config),
+			filepath.FromSlash(g.properties.PkgPath)+".a")
+		g.testResultFile = buildGoTest(ctx, testRoot(ctx, g.config), testArchiveFile,
+			g.properties.PkgPath, srcs, genSrcs,
+			testSrcs, g.config.useValidations)
+	}
 
 	buildGoPackage(ctx, g.pkgRoot, g.properties.PkgPath, g.archiveFile,
 		srcs, genSrcs)
@@ -384,6 +412,9 @@
 }
 
 func (g *goBinary) DynamicDependencies(ctx blueprint.DynamicDependerModuleContext) []string {
+	if ctx.Module() != ctx.PrimaryModule() {
+		return nil
+	}
 	return g.properties.Deps
 }
 
@@ -393,6 +424,14 @@
 }
 
 func (g *goBinary) GenerateBuildActions(ctx blueprint.ModuleContext) {
+	// Allow the primary builder to create multiple variants.  Any variants after the first
+	// will copy outputs from the first.
+	if ctx.Module() != ctx.PrimaryModule() {
+		primary := ctx.PrimaryModule().(*goBinary)
+		g.installPath = primary.installPath
+		return
+	}
+
 	var (
 		name            = ctx.ModuleName()
 		objDir          = moduleObjDir(ctx, g.config)
@@ -406,10 +445,8 @@
 
 	if g.properties.Tool_dir {
 		g.installPath = filepath.Join(toolDir(ctx.Config()), name)
-	} else if g.config.stage == StageMain {
-		g.installPath = filepath.Join(mainDir, "bin", name)
 	} else {
-		g.installPath = filepath.Join(bootstrapDir, "bin", name)
+		g.installPath = filepath.Join(stageDir(g.config), "bin", name)
 	}
 
 	ctx.VisitDepsDepthFirstIf(isGoPluginFor(name),
@@ -419,7 +456,7 @@
 		genSrcs = append(genSrcs, pluginSrc)
 	}
 
-	var deps []string
+	var testDeps []string
 
 	if hasPlugins && !buildGoPluginLoader(ctx, "main", pluginSrc) {
 		return
@@ -434,10 +471,9 @@
 		testSrcs = append(g.properties.TestSrcs, g.properties.Linux.TestSrcs...)
 	}
 
-	testDeps := buildGoTest(ctx, testRoot(ctx, g.config), testArchiveFile,
-		name, srcs, genSrcs, testSrcs)
 	if g.config.runGoTests {
-		deps = append(deps, testDeps...)
+		testDeps = buildGoTest(ctx, testRoot(ctx, g.config), testArchiveFile,
+			name, srcs, genSrcs, testSrcs, g.config.useValidations)
 	}
 
 	buildGoPackage(ctx, objDir, "main", archiveFile, srcs, genSrcs)
@@ -450,9 +486,7 @@
 			linkDeps = append(linkDeps, dep.GoPackageTarget())
 			libDir := dep.GoPkgRoot()
 			libDirFlags = append(libDirFlags, "-L "+libDir)
-			if g.config.runGoTests {
-				deps = append(deps, dep.GoTestTargets()...)
-			}
+			testDeps = append(testDeps, dep.GoTestTargets()...)
 		})
 
 	linkArgs := map[string]string{}
@@ -469,12 +503,20 @@
 		Optional:  true,
 	})
 
+	var orderOnlyDeps, validationDeps []string
+	if g.config.useValidations {
+		validationDeps = testDeps
+	} else {
+		orderOnlyDeps = testDeps
+	}
+
 	ctx.Build(pctx, blueprint.BuildParams{
-		Rule:      cp,
-		Outputs:   []string{g.installPath},
-		Inputs:    []string{aoutFile},
-		OrderOnly: deps,
-		Optional:  !g.properties.Default,
+		Rule:        cp,
+		Outputs:     []string{g.installPath},
+		Inputs:      []string{aoutFile},
+		OrderOnly:   orderOnlyDeps,
+		Validations: validationDeps,
+		Optional:    !g.properties.Default,
 	})
 }
 
@@ -539,7 +581,7 @@
 }
 
 func buildGoTest(ctx blueprint.ModuleContext, testRoot, testPkgArchive,
-	pkgPath string, srcs, genSrcs, testSrcs []string) []string {
+	pkgPath string, srcs, genSrcs, testSrcs []string, useValidations bool) []string {
 
 	if len(testSrcs) == 0 {
 		return nil
@@ -601,11 +643,19 @@
 		Optional: true,
 	})
 
+	var orderOnlyDeps, validationDeps []string
+	if useValidations {
+		validationDeps = testDeps
+	} else {
+		orderOnlyDeps = testDeps
+	}
+
 	ctx.Build(pctx, blueprint.BuildParams{
-		Rule:      test,
-		Outputs:   []string{testPassed},
-		Inputs:    []string{testFile},
-		OrderOnly: testDeps,
+		Rule:        test,
+		Outputs:     []string{testPassed},
+		Inputs:      []string{testFile},
+		OrderOnly:   orderOnlyDeps,
+		Validations: validationDeps,
 		Args: map[string]string{
 			"pkg":       pkgPath,
 			"pkgSrcDir": filepath.Dir(testFiles[0]),
@@ -636,89 +686,62 @@
 	var primaryBuilders []*goBinary
 	// blueprintTools contains blueprint go binaries that will be built in StageMain
 	var blueprintTools []string
-	// blueprintTests contains the result files from the tests
-	var blueprintTests []string
-	ctx.VisitAllModules(func(module blueprint.Module) {
-		if binaryModule, ok := module.(*goBinary); ok {
-			if binaryModule.properties.Tool_dir {
-				blueprintTools = append(blueprintTools, binaryModule.InstallPath())
+	ctx.VisitAllModulesIf(isBootstrapBinaryModule,
+		func(module blueprint.Module) {
+			if ctx.PrimaryModule(module) == module {
+				binaryModule := module.(*goBinary)
+
+				if binaryModule.properties.Tool_dir {
+					blueprintTools = append(blueprintTools, binaryModule.InstallPath())
+				}
+				if binaryModule.properties.PrimaryBuilder {
+					primaryBuilders = append(primaryBuilders, binaryModule)
+				}
 			}
-			if binaryModule.properties.PrimaryBuilder {
-				primaryBuilders = append(primaryBuilders, binaryModule)
-			}
-		}
+		})
 
-		if packageModule, ok := module.(goPackageProducer); ok {
-			blueprintTests = append(blueprintTests, packageModule.GoTestTargets()...)
-		}
-	})
+	var primaryBuilderCmdlinePrefix []string
+	var primaryBuilderName string
 
-	var extraSharedFlagArray []string
-	if s.config.runGoTests {
-		extraSharedFlagArray = append(extraSharedFlagArray, "-t")
-	}
-	if s.config.moduleListFile != "" {
-		extraSharedFlagArray = append(extraSharedFlagArray, "-l", s.config.moduleListFile)
-	}
-	if s.config.emptyNinjaFile {
-		extraSharedFlagArray = append(extraSharedFlagArray, "--empty-ninja-file")
-	}
-	extraSharedFlagString := strings.Join(extraSharedFlagArray, " ")
-
-	var primaryBuilderName, primaryBuilderExtraFlags string
-	switch len(primaryBuilders) {
-	case 0:
+	if len(primaryBuilders) == 0 {
 		// If there's no primary builder module then that means we'll use minibp
 		// as the primary builder.  We can trigger its primary builder mode with
 		// the -p flag.
 		primaryBuilderName = "minibp"
-		primaryBuilderExtraFlags = "-p " + extraSharedFlagString
-
-	case 1:
-		primaryBuilderName = ctx.ModuleName(primaryBuilders[0])
-		primaryBuilderExtraFlags = extraSharedFlagString
-
-	default:
+		primaryBuilderCmdlinePrefix = append(primaryBuilderCmdlinePrefix, "-p")
+	} else if len(primaryBuilders) > 1 {
 		ctx.Errorf("multiple primary builder modules present:")
 		for _, primaryBuilder := range primaryBuilders {
 			ctx.ModuleErrorf(primaryBuilder, "<-- module %s",
 				ctx.ModuleName(primaryBuilder))
 		}
 		return
+	} else {
+		primaryBuilderName = ctx.ModuleName(primaryBuilders[0])
 	}
 
 	primaryBuilderFile := filepath.Join("$BinDir", primaryBuilderName)
-
-	// Get the filename of the top-level Blueprints file to pass to minibp.
-	topLevelBlueprints := filepath.Join("$srcDir",
-		filepath.Base(s.config.topLevelBlueprintsFile))
-
 	ctx.SetNinjaBuildDir(pctx, "${ninjaBuildDir}")
 
 	if s.config.stage == StagePrimary {
-		mainNinjaFile := filepath.Join("$buildDir", "build.ninja")
-		primaryBuilderNinjaGlobFile := absolutePath(filepath.Join(BuildDir, bootstrapSubDir, "build-globs.ninja"))
+		ctx.AddSubninja(s.config.globFile)
 
-		if _, err := os.Stat(primaryBuilderNinjaGlobFile); os.IsNotExist(err) {
-			err = ioutil.WriteFile(primaryBuilderNinjaGlobFile, nil, 0666)
-			if err != nil {
-				ctx.Errorf("Failed to create empty ninja file: %s", err)
-			}
+		for _, i := range s.config.primaryBuilderInvocations {
+			flags := make([]string, 0)
+			flags = append(flags, primaryBuilderCmdlinePrefix...)
+			flags = append(flags, i.Args...)
+
+			// Build the main build.ninja
+			ctx.Build(pctx, blueprint.BuildParams{
+				Rule:    generateBuildNinja,
+				Outputs: i.Outputs,
+				Inputs:  i.Inputs,
+				Args: map[string]string{
+					"builder": primaryBuilderFile,
+					"extra":   strings.Join(flags, " "),
+				},
+			})
 		}
-
-		ctx.AddSubninja(primaryBuilderNinjaGlobFile)
-
-		// Build the main build.ninja
-		ctx.Build(pctx, blueprint.BuildParams{
-			Rule:    generateBuildNinja,
-			Outputs: []string{mainNinjaFile},
-			Inputs:  []string{topLevelBlueprints},
-			Args: map[string]string{
-				"builder":  primaryBuilderFile,
-				"extra":    primaryBuilderExtraFlags,
-				"globFile": primaryBuilderNinjaGlobFile,
-			},
-		})
 	}
 
 	if s.config.stage == StageMain {
@@ -741,8 +764,8 @@
 		docsFile := filepath.Join(docsDir, primaryBuilderName+".html")
 		bigbpDocs := ctx.Rule(pctx, "bigbpDocs",
 			blueprint.RuleParams{
-				Command: fmt.Sprintf("%s %s -b $buildDir --docs $out %s", primaryBuilderFile,
-					primaryBuilderExtraFlags, topLevelBlueprints),
+				Command: fmt.Sprintf("%s -b $buildDir --docs $out %s", primaryBuilderFile,
+					s.config.topLevelBlueprintsFile),
 				CommandDeps: []string{primaryBuilderFile},
 				Description: fmt.Sprintf("%s docs $out", primaryBuilderName),
 			})
@@ -765,14 +788,6 @@
 			Outputs: []string{"blueprint_tools"},
 			Inputs:  blueprintTools,
 		})
-
-		// Add a phony target for running all of the tests
-		ctx.Build(pctx, blueprint.BuildParams{
-			Rule:    blueprint.Phony,
-			Outputs: []string{"blueprint_tests"},
-			Inputs:  blueprintTests,
-		})
-
 	}
 }
 
diff --git a/bootstrap/bpdoc/bpdoc.go b/bootstrap/bpdoc/bpdoc.go
index 4abf2e7..8ed02c2 100644
--- a/bootstrap/bpdoc/bpdoc.go
+++ b/bootstrap/bpdoc/bpdoc.go
@@ -5,6 +5,7 @@
 	"html/template"
 	"reflect"
 	"sort"
+	"strings"
 
 	"github.com/google/blueprint/proptools"
 )
@@ -58,6 +59,7 @@
 	OtherTexts []template.HTML
 	Properties []Property
 	Default    string
+	Anonymous  bool
 }
 
 func AllPackages(pkgFiles map[string][]string, moduleTypeNameFactories map[string]reflect.Value,
@@ -75,6 +77,7 @@
 			return nil, err
 		}
 		// Some pruning work
+		removeAnonymousProperties(mtInfo)
 		removeEmptyPropertyStructs(mtInfo)
 		collapseDuplicatePropertyStructs(mtInfo)
 		collapseNestedPropertyStructs(mtInfo)
@@ -128,7 +131,9 @@
 		}
 		ps.ExcludeByTag("blueprint", "mutated")
 
-		for nestedName, nestedValue := range nestedPropertyStructs(v) {
+		for _, nestedProperty := range nestedPropertyStructs(v) {
+			nestedName := nestedProperty.nestPoint
+			nestedValue := nestedProperty.value
 			nestedType := nestedValue.Type()
 
 			// Ignore property structs with unexported or unnamed types
@@ -140,12 +145,28 @@
 				return nil, err
 			}
 			nested.ExcludeByTag("blueprint", "mutated")
-			nestPoint := ps.GetByName(nestedName)
-			if nestPoint == nil {
-				return nil, fmt.Errorf("nesting point %q not found", nestedName)
+			if nestedName == "" {
+				ps.Nest(nested)
+			} else {
+				nestPoint := ps.GetByName(nestedName)
+				if nestPoint == nil {
+					return nil, fmt.Errorf("nesting point %q not found", nestedName)
+				}
+				nestPoint.Nest(nested)
 			}
 
-			nestPoint.Nest(nested)
+			if nestedProperty.anonymous {
+				if nestedName != "" {
+					nestedName += "."
+				}
+				nestedName += proptools.PropertyNameForField(nested.Name)
+				nestedProp := ps.GetByName(nestedName)
+				// Anonymous properties may have already been omitted, no need to ensure they are filtered later
+				if nestedProp != nil {
+					// Set property to anonymous to allow future filtering
+					nestedProp.SetAnonymous()
+				}
+			}
 		}
 		mt.PropertyStructs = append(mt.PropertyStructs, ps)
 	}
@@ -153,10 +174,31 @@
 	return mt, nil
 }
 
-func nestedPropertyStructs(s reflect.Value) map[string]reflect.Value {
-	ret := make(map[string]reflect.Value)
+type nestedProperty struct {
+	nestPoint string
+	value     reflect.Value
+	anonymous bool
+}
+
+func nestedPropertyStructs(s reflect.Value) []nestedProperty {
+	ret := make([]nestedProperty, 0)
 	var walk func(structValue reflect.Value, prefix string)
 	walk = func(structValue reflect.Value, prefix string) {
+		var nestStruct func(field reflect.StructField, value reflect.Value, fieldName string)
+		nestStruct = func(field reflect.StructField, value reflect.Value, fieldName string) {
+			nestPoint := prefix
+			if field.Anonymous {
+				nestPoint = strings.TrimSuffix(nestPoint, ".")
+			} else {
+				nestPoint = nestPoint + proptools.PropertyNameForField(fieldName)
+			}
+			ret = append(ret, nestedProperty{nestPoint: nestPoint, value: value, anonymous: field.Anonymous})
+			if nestPoint != "" {
+				nestPoint += "."
+			}
+			walk(value, nestPoint)
+		}
+
 		typ := structValue.Type()
 		for i := 0; i < structValue.NumField(); i++ {
 			field := typ.Field(i)
@@ -174,8 +216,9 @@
 			case reflect.Bool, reflect.String, reflect.Slice, reflect.Int, reflect.Uint:
 				// Nothing
 			case reflect.Struct:
-				walk(fieldValue, prefix+proptools.PropertyNameForField(field.Name)+".")
+				nestStruct(field, fieldValue, field.Name)
 			case reflect.Ptr, reflect.Interface:
+
 				if !fieldValue.IsNil() {
 					// We leave the pointer intact and zero out the struct that's
 					// pointed to.
@@ -188,9 +231,7 @@
 						elem = elem.Elem()
 					}
 					if elem.Kind() == reflect.Struct {
-						nestPoint := prefix + proptools.PropertyNameForField(field.Name)
-						ret[nestPoint] = elem
-						walk(elem, nestPoint+".")
+						nestStruct(field, elem, field.Name)
 					}
 				}
 			default:
@@ -214,6 +255,27 @@
 	}
 }
 
+// Remove any property structs that are anonymous
+func removeAnonymousProperties(mt *ModuleType) {
+	var removeAnonymousProps func(props []Property) []Property
+	removeAnonymousProps = func(props []Property) []Property {
+		newProps := make([]Property, 0, len(props))
+		for _, p := range props {
+			if p.Anonymous {
+				continue
+			}
+			if len(p.Properties) > 0 {
+				p.Properties = removeAnonymousProps(p.Properties)
+			}
+			newProps = append(newProps, p)
+		}
+		return newProps
+	}
+	for _, ps := range mt.PropertyStructs {
+		ps.Properties = removeAnonymousProps(ps.Properties)
+	}
+}
+
 // Squashes duplicates of the same property struct into single entries
 func collapseDuplicatePropertyStructs(mt *ModuleType) {
 	var collapsed []*PropertyStruct
diff --git a/bootstrap/bpdoc/bpdoc_test.go b/bootstrap/bpdoc/bpdoc_test.go
index 687d97b..67ad783 100644
--- a/bootstrap/bpdoc/bpdoc_test.go
+++ b/bootstrap/bpdoc/bpdoc_test.go
@@ -1,10 +1,16 @@
 package bpdoc
 
 import (
+	"fmt"
 	"reflect"
 	"testing"
 )
 
+type propInfo struct {
+	name string
+	typ  string
+}
+
 type parentProps struct {
 	A string
 
@@ -35,12 +41,133 @@
 	// mutated shouldn't be found because it's a mutated property.
 	expected := []string{"child", "child.child"}
 	if len(allStructs) != len(expected) {
-		t.Errorf("expected %d structs, got %d, all entries: %q",
+		t.Fatalf("expected %d structs, got %d, all entries: %v",
 			len(expected), len(allStructs), allStructs)
 	}
-	for _, e := range expected {
-		if _, ok := allStructs[e]; !ok {
-			t.Errorf("missing entry %q, all entries: %q", e, allStructs)
+	got := []string{}
+	for _, s := range allStructs {
+		got = append(got, s.nestPoint)
+	}
+
+	if !reflect.DeepEqual(got, expected) {
+		t.Errorf("Expected nested properties:\n\t %q,\n but got\n\t %q", expected, got)
+	}
+}
+
+func TestAllPackages(t *testing.T) {
+	packages, err := AllPackages(pkgFiles, moduleTypeNameFactories, moduleTypeNamePropertyStructs)
+	if err != nil {
+		t.Fatalf("expected nil error for AllPackages(%v, %v, %v), got %s", pkgFiles, moduleTypeNameFactories, moduleTypeNamePropertyStructs, err)
+	}
+
+	if numPackages := len(packages); numPackages != 1 {
+		t.Errorf("Expected %d package, got %d packages %v instead", len(pkgFiles), numPackages, packages)
+	}
+
+	pkg := packages[0]
+
+	expectedProps := map[string][]propInfo{
+		"bar": []propInfo{
+			propInfo{
+				name: "a",
+				typ:  "string",
+			},
+			propInfo{
+				name: "nested",
+				typ:  "",
+			},
+			propInfo{
+				name: "nested.c",
+				typ:  "string",
+			},
+			propInfo{
+				name: "nested_struct",
+				typ:  "structToNest",
+			},
+			propInfo{
+				name: "nested_struct.e",
+				typ:  "string",
+			},
+			propInfo{
+				name: "struct_has_embed",
+				typ:  "StructWithEmbedded",
+			},
+			propInfo{
+				name: "struct_has_embed.nested_in_embedded",
+				typ:  "structToNest",
+			},
+			propInfo{
+				name: "struct_has_embed.nested_in_embedded.e",
+				typ:  "string",
+			},
+			propInfo{
+				name: "struct_has_embed.f",
+				typ:  "string",
+			},
+			propInfo{
+				name: "list_of_ints",
+				typ:  "list of int",
+			},
+			propInfo{
+				name: "list_of_nested",
+				typ:  "list of structToNest",
+			},
+			propInfo{
+				name: "nested_in_other_embedded",
+				typ:  "otherStructToNest",
+			},
+			propInfo{
+				name: "nested_in_other_embedded.g",
+				typ:  "string",
+			},
+			propInfo{
+				name: "h",
+				typ:  "string",
+			},
+		},
+		"foo": []propInfo{
+			propInfo{
+				name: "a",
+				typ:  "string",
+			},
+		},
+	}
+
+	for _, m := range pkg.ModuleTypes {
+		foundProps := []propInfo{}
+
+		for _, p := range m.PropertyStructs {
+			nestedProps, errs := findAllProperties("", p.Properties)
+			foundProps = append(foundProps, nestedProps...)
+			for _, err := range errs {
+				t.Errorf("%s", err)
+			}
+		}
+		if wanted, ok := expectedProps[m.Name]; ok {
+			if !reflect.DeepEqual(foundProps, wanted) {
+				t.Errorf("For %s, expected\n\t %q,\nbut got\n\t %q", m.Name, wanted, foundProps)
+			}
 		}
 	}
 }
+
+func findAllProperties(prefix string, properties []Property) ([]propInfo, []error) {
+	foundProps := []propInfo{}
+	errs := []error{}
+	for _, p := range properties {
+		prop := propInfo{
+			name: prefix + p.Name,
+			typ:  p.Type,
+		}
+		foundProps = append(foundProps, prop)
+		if hasTag(p.Tag, "blueprint", "mutated") {
+			err := fmt.Errorf("Property %s has `blueprint:\"mutated\" tag but should have been excluded.", p.Name)
+			errs = append(errs, err)
+		}
+
+		nestedProps, nestedErrs := findAllProperties(prefix+p.Name+".", p.Properties)
+		foundProps = append(foundProps, nestedProps...)
+		errs = append(errs, nestedErrs...)
+	}
+	return foundProps, errs
+}
diff --git a/bootstrap/bpdoc/properties.go b/bootstrap/bpdoc/properties.go
index 9256d8e..2ca8e65 100644
--- a/bootstrap/bpdoc/properties.go
+++ b/bootstrap/bpdoc/properties.go
@@ -142,6 +142,10 @@
 	return getByName(name, "", &ps.Properties)
 }
 
+func (ps *PropertyStruct) Nest(nested *PropertyStruct) {
+	ps.Properties = append(ps.Properties, nested.Properties...)
+}
+
 func getByName(name string, prefix string, props *[]Property) *Property {
 	for i := range *props {
 		if prefix+(*props)[i].Name == name {
@@ -157,6 +161,10 @@
 	p.Properties = append(p.Properties, nested.Properties...)
 }
 
+func (p *Property) SetAnonymous() {
+	p.Anonymous = true
+}
+
 func newPropertyStruct(t *doc.Type) (*PropertyStruct, error) {
 	typeSpec := t.Decl.Specs[0].(*ast.TypeSpec)
 	ps := PropertyStruct{
@@ -189,8 +197,7 @@
 			}
 		}
 		for _, n := range names {
-			var name, typ, tag, text string
-			var innerProps []Property
+			var name, tag, text string
 			if n != nil {
 				name = proptools.PropertyNameForField(n.Name)
 			}
@@ -203,25 +210,9 @@
 					return nil, err
 				}
 			}
-
-			t := f.Type
-			if star, ok := t.(*ast.StarExpr); ok {
-				t = star.X
-			}
-			switch a := t.(type) {
-			case *ast.ArrayType:
-				typ = "list of strings"
-			case *ast.InterfaceType:
-				typ = "interface"
-			case *ast.Ident:
-				typ = a.Name
-			case *ast.StructType:
-				innerProps, err = structProperties(a)
-				if err != nil {
-					return nil, err
-				}
-			default:
-				typ = fmt.Sprintf("%T", f.Type)
+			typ, innerProps, err := getType(f.Type)
+			if err != nil {
+				return nil, err
 			}
 
 			props = append(props, Property{
@@ -237,6 +228,37 @@
 	return props, nil
 }
 
+func getType(expr ast.Expr) (typ string, innerProps []Property, err error) {
+	var t ast.Expr
+	if star, ok := expr.(*ast.StarExpr); ok {
+		t = star.X
+	} else {
+		t = expr
+	}
+	switch a := t.(type) {
+	case *ast.ArrayType:
+		var elt string
+		elt, innerProps, err = getType(a.Elt)
+		if err != nil {
+			return "", nil, err
+		}
+		typ = "list of " + elt
+	case *ast.InterfaceType:
+		typ = "interface"
+	case *ast.Ident:
+		typ = a.Name
+	case *ast.StructType:
+		innerProps, err = structProperties(a)
+		if err != nil {
+			return "", nil, err
+		}
+	default:
+		typ = fmt.Sprintf("%T", expr)
+	}
+
+	return typ, innerProps, nil
+}
+
 func (ps *PropertyStruct) ExcludeByTag(key, value string) {
 	filterPropsByTag(&ps.Properties, key, value, true)
 }
@@ -251,6 +273,7 @@
 	filtered := (*props)[:0]
 	for _, x := range *props {
 		if hasTag(x.Tag, key, value) == !exclude {
+			filterPropsByTag(&x.Properties, key, value, exclude)
 			filtered = append(filtered, x)
 		}
 	}
diff --git a/bootstrap/bpdoc/properties_test.go b/bootstrap/bpdoc/properties_test.go
index 4045cb1..085bcdf 100644
--- a/bootstrap/bpdoc/properties_test.go
+++ b/bootstrap/bpdoc/properties_test.go
@@ -28,11 +28,8 @@
 
 	ps.ExcludeByTag("tag1", "a")
 
-	expected := []string{"c"}
-	actual := []string{}
-	for _, p := range ps.Properties {
-		actual = append(actual, p.Name)
-	}
+	expected := []string{"c", "d", "g"}
+	actual := actualProperties(t, ps.Properties)
 	if !reflect.DeepEqual(expected, actual) {
 		t.Errorf("unexpected ExcludeByTag result, expected: %q, actual: %q", expected, actual)
 	}
@@ -47,12 +44,20 @@
 
 	ps.IncludeByTag("tag1", "c")
 
-	expected := []string{"b", "c"}
-	actual := []string{}
-	for _, p := range ps.Properties {
-		actual = append(actual, p.Name)
-	}
+	expected := []string{"b", "c", "d", "f", "g"}
+	actual := actualProperties(t, ps.Properties)
 	if !reflect.DeepEqual(expected, actual) {
 		t.Errorf("unexpected IncludeByTag result, expected: %q, actual: %q", expected, actual)
 	}
 }
+
+func actualProperties(t *testing.T, props []Property) []string {
+	t.Helper()
+
+	actual := []string{}
+	for _, p := range props {
+		actual = append(actual, p.Name)
+		actual = append(actual, actualProperties(t, p.Properties)...)
+	}
+	return actual
+}
diff --git a/bootstrap/bpdoc/reader_test.go b/bootstrap/bpdoc/reader_test.go
index 0d608b3..bf324bf 100644
--- a/bootstrap/bpdoc/reader_test.go
+++ b/bootstrap/bpdoc/reader_test.go
@@ -16,6 +16,7 @@
 package bpdoc
 
 import (
+	"html/template"
 	"reflect"
 	"runtime"
 	"testing"
@@ -23,11 +24,65 @@
 	"github.com/google/blueprint"
 )
 
+type factoryFn func() (blueprint.Module, []interface{})
+
 // foo docs.
 func fooFactory() (blueprint.Module, []interface{}) {
 	return nil, []interface{}{&props{}}
 }
 
+// bar docs.
+func barFactory() (blueprint.Module, []interface{}) {
+	return nil, []interface{}{&complexProps{}}
+}
+
+type structToNest struct {
+	E string
+}
+
+type StructToEmbed struct {
+	Nested_in_embedded structToNest
+
+	// F string
+	F string
+}
+
+type otherStructToNest struct {
+	G string
+}
+
+type OtherStructToEmbed struct {
+	Nested_in_other_embedded otherStructToNest
+
+	// F string
+	H string
+}
+
+type StructWithEmbedded struct {
+	StructToEmbed
+}
+
+// for bpdoc_test.go
+type complexProps struct {
+	A         string
+	B_mutated string `blueprint:"mutated"`
+
+	Nested struct {
+		C         string
+		D_mutated string `blueprint:"mutated"`
+	}
+
+	Nested_struct structToNest
+
+	Struct_has_embed StructWithEmbedded
+
+	OtherStructToEmbed
+
+	List_of_ints []int
+
+	List_of_nested []structToNest
+}
+
 // props docs.
 type props struct {
 	// A docs.
@@ -39,10 +94,18 @@
 	A string `tag1:"a,b" tag2:"c"`
 	B string `tag1:"a,c"`
 	C string `tag1:"b,c"`
+
+	D struct {
+		E string `tag1:"a,b" tag2:"c"`
+		F string `tag1:"a,c"`
+		G string `tag1:"b,c"`
+	} `tag1:"b,c"`
 }
 
 var pkgPath string
 var pkgFiles map[string][]string
+var moduleTypeNameFactories map[string]reflect.Value
+var moduleTypeNamePropertyStructs map[string][]interface{}
 
 func init() {
 	pc, filename, _, _ := runtime.Caller(0)
@@ -57,21 +120,34 @@
 	pkgFiles = map[string][]string{
 		pkgPath: {filename},
 	}
+
+	factories := map[string]factoryFn{"foo": fooFactory, "bar": barFactory}
+
+	moduleTypeNameFactories = make(map[string]reflect.Value, len(factories))
+	moduleTypeNamePropertyStructs = make(map[string][]interface{}, len(factories))
+	for name, factory := range factories {
+		moduleTypeNameFactories[name] = reflect.ValueOf(factory)
+		_, structs := factory()
+		moduleTypeNamePropertyStructs[name] = structs
+	}
 }
 
 func TestModuleTypeDocs(t *testing.T) {
 	r := NewReader(pkgFiles)
-	mt, err := r.ModuleType("foo_module", reflect.ValueOf(fooFactory))
-	if err != nil {
-		t.Fatal(err)
-	}
+	for m := range moduleTypeNameFactories {
+		mt, err := r.ModuleType(m+"_module", moduleTypeNameFactories[m])
+		if err != nil {
+			t.Fatal(err)
+		}
 
-	if mt.Text != "foo docs.\n\n" {
-		t.Errorf("unexpected docs %q", mt.Text)
-	}
+		expectedText := template.HTML(m + " docs.\n\n")
+		if mt.Text != expectedText {
+			t.Errorf("unexpected docs %q", mt.Text)
+		}
 
-	if mt.PkgPath != pkgPath {
-		t.Errorf("expected pkgpath %q, got %q", pkgPath, mt.PkgPath)
+		if mt.PkgPath != pkgPath {
+			t.Errorf("expected pkgpath %q, got %q", pkgPath, mt.PkgPath)
+		}
 	}
 }
 
diff --git a/bootstrap/bpglob/bpglob.go b/bootstrap/bpglob/bpglob.go
index fe47b6f..81c0dd0 100644
--- a/bootstrap/bpglob/bpglob.go
+++ b/bootstrap/bpglob/bpglob.go
@@ -19,69 +19,206 @@
 package main
 
 import (
+	"bytes"
+	"errors"
 	"flag"
 	"fmt"
 	"io/ioutil"
 	"os"
+	"strconv"
 	"time"
 
+	"github.com/google/blueprint/deptools"
 	"github.com/google/blueprint/pathtools"
 )
 
 var (
-	out = flag.String("o", "", "file to write list of files that match glob")
+	// flagSet is a flag.FlagSet with flag.ContinueOnError so that we can handle the versionMismatchError
+	// error from versionArg.
+	flagSet = flag.NewFlagSet("bpglob", flag.ContinueOnError)
 
-	excludes multiArg
+	out = flagSet.String("o", "", "file to write list of files that match glob")
+
+	versionMatch versionArg
+	globs        []globArg
 )
 
 func init() {
-	flag.Var(&excludes, "e", "pattern to exclude from results")
+	flagSet.Var(&versionMatch, "v", "version number the command line was generated for")
+	flagSet.Var((*patternsArgs)(&globs), "p", "pattern to include in results")
+	flagSet.Var((*excludeArgs)(&globs), "e", "pattern to exclude from results from the most recent pattern")
 }
 
-type multiArg []string
+// bpglob is executed through the rules in build-globs.ninja to determine whether soong_build
+// needs to rerun.  That means when the arguments accepted by bpglob change it will be called
+// with the old arguments, then soong_build will rerun and update build-globs.ninja with the new
+// arguments.
+//
+// To avoid having to maintain backwards compatibility with old arguments across the transition,
+// a version argument is used to detect the transition in order to stop parsing arguments, touch the
+// output file and exit immediately.  Aborting parsing arguments is necessary to handle parsing
+// errors that would be fatal, for example the removal of a flag.  The version number in
+// pathtools.BPGlobArgumentVersion should be manually incremented when the bpglob argument format
+// changes.
+//
+// If the version argument is not passed then a version mismatch is assumed.
 
-func (m *multiArg) String() string {
-	return `""`
-}
+// versionArg checks the argument against pathtools.BPGlobArgumentVersion, returning a
+// versionMismatchError error if it does not match.
+type versionArg bool
 
-func (m *multiArg) Set(s string) error {
-	*m = append(*m, s)
+var versionMismatchError = errors.New("version mismatch")
+
+func (v *versionArg) String() string { return "" }
+
+func (v *versionArg) Set(s string) error {
+	vers, err := strconv.Atoi(s)
+	if err != nil {
+		return fmt.Errorf("error parsing version argument: %w", err)
+	}
+
+	// Force the -o argument to come before the -v argument so that the output file can be
+	// updated on error.
+	if *out == "" {
+		return fmt.Errorf("-o argument must be passed before -v")
+	}
+
+	if vers != pathtools.BPGlobArgumentVersion {
+		return versionMismatchError
+	}
+
+	*v = true
+
 	return nil
 }
 
-func (m *multiArg) Get() interface{} {
-	return m
+// A glob arg holds a single -p argument with zero or more following -e arguments.
+type globArg struct {
+	pattern  string
+	excludes []string
+}
+
+// patternsArgs implements flag.Value to handle -p arguments by adding a new globArg to the list.
+type patternsArgs []globArg
+
+func (p *patternsArgs) String() string { return `""` }
+
+func (p *patternsArgs) Set(s string) error {
+	globs = append(globs, globArg{
+		pattern: s,
+	})
+	return nil
+}
+
+// excludeArgs implements flag.Value to handle -e arguments by adding to the last globArg in the
+// list.
+type excludeArgs []globArg
+
+func (e *excludeArgs) String() string { return `""` }
+
+func (e *excludeArgs) Set(s string) error {
+	if len(*e) == 0 {
+		return fmt.Errorf("-p argument is required before the first -e argument")
+	}
+
+	glob := &(*e)[len(*e)-1]
+	glob.excludes = append(glob.excludes, s)
+	return nil
 }
 
 func usage() {
-	fmt.Fprintln(os.Stderr, "usage: bpglob -o out glob")
-	flag.PrintDefaults()
+	fmt.Fprintln(os.Stderr, "usage: bpglob -o out -v version -p glob [-e excludes ...] [-p glob ...]")
+	flagSet.PrintDefaults()
 	os.Exit(2)
 }
 
 func main() {
-	flag.Parse()
+	// Save the command line flag error output to a buffer, the flag package unconditionally
+	// writes an error message to the output on error, and we want to hide the error for the
+	// version mismatch case.
+	flagErrorBuffer := &bytes.Buffer{}
+	flagSet.SetOutput(flagErrorBuffer)
+
+	err := flagSet.Parse(os.Args[1:])
+
+	if !versionMatch {
+		// A version mismatch error occurs when the arguments written into build-globs.ninja
+		// don't match the format expected by the bpglob binary.  This happens during the
+		// first incremental build after bpglob is changed.  Handle this case by aborting
+		// argument parsing and updating the output file with something that will always cause
+		// the primary builder to rerun.
+		// This can happen when there is no -v argument or if the -v argument doesn't match
+		// pathtools.BPGlobArgumentVersion.
+		writeErrorOutput(*out, versionMismatchError)
+		os.Exit(0)
+	}
+
+	if err != nil {
+		os.Stderr.Write(flagErrorBuffer.Bytes())
+		fmt.Fprintln(os.Stderr, "error:", err.Error())
+		usage()
+	}
 
 	if *out == "" {
 		fmt.Fprintln(os.Stderr, "error: -o is required")
 		usage()
 	}
 
-	if flag.NArg() != 1 {
+	if flagSet.NArg() > 0 {
 		usage()
 	}
 
-	_, err := pathtools.GlobWithDepFile(flag.Arg(0), *out, *out+".d", excludes)
+	err = globsWithDepFile(*out, *out+".d", globs)
 	if err != nil {
 		// Globs here were already run in the primary builder without error.  The only errors here should be if the glob
 		// pattern was made invalid by a change in the pathtools glob implementation, in which case the primary builder
 		// needs to be rerun anyways.  Update the output file with something that will always cause the primary builder
 		// to rerun.
-		s := fmt.Sprintf("%s: error: %s\n", time.Now().Format(time.StampNano), err.Error())
-		err := ioutil.WriteFile(*out, []byte(s), 0666)
-		if err != nil {
-			fmt.Fprintf(os.Stderr, "error: %s\n", err.Error())
-			os.Exit(1)
-		}
+		writeErrorOutput(*out, err)
 	}
 }
+
+// writeErrorOutput writes an error to the output file with a timestamp to ensure that it is
+// considered dirty by ninja.
+func writeErrorOutput(path string, globErr error) {
+	s := fmt.Sprintf("%s: error: %s\n", time.Now().Format(time.StampNano), globErr.Error())
+	err := ioutil.WriteFile(path, []byte(s), 0666)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "error: %s\n", err.Error())
+		os.Exit(1)
+	}
+}
+
+// globsWithDepFile finds all files and directories that match glob.  Directories
+// will have a trailing '/'.  It compares the list of matches against the
+// contents of fileListFile, and rewrites fileListFile if it has changed.  It
+// also writes all of the directories it traversed as dependencies on fileListFile
+// to depFile.
+//
+// The format of glob is either path/*.ext for a single directory glob, or
+// path/**/*.ext for a recursive glob.
+func globsWithDepFile(fileListFile, depFile string, globs []globArg) error {
+	var results pathtools.MultipleGlobResults
+	for _, glob := range globs {
+		result, err := pathtools.Glob(glob.pattern, glob.excludes, pathtools.FollowSymlinks)
+		if err != nil {
+			return err
+		}
+		results = append(results, result)
+	}
+
+	// Only write the output file if it has changed.
+	err := pathtools.WriteFileIfChanged(fileListFile, results.FileList(), 0666)
+	if err != nil {
+		return fmt.Errorf("failed to write file list to %q: %w", fileListFile, err)
+	}
+
+	// The depfile can be written unconditionally as its timestamp doesn't affect ninja's restat
+	// feature.
+	err = deptools.WriteDepFile(depFile, fileListFile, results.Deps())
+	if err != nil {
+		return fmt.Errorf("failed to write dep file to %q: %w", depFile, err)
+	}
+
+	return nil
+}
diff --git a/bootstrap/cleanup.go b/bootstrap/cleanup.go
index 6444081..9dbea2a 100644
--- a/bootstrap/cleanup.go
+++ b/bootstrap/cleanup.go
@@ -31,8 +31,8 @@
 // removeAbandonedFilesUnder removes any files that appear in the Ninja log, and
 // are prefixed with one of the `under` entries, but that are not currently
 // build targets, or in `exempt`
-func removeAbandonedFilesUnder(ctx *blueprint.Context, config *Config,
-	srcDir string, under, exempt []string) error {
+func removeAbandonedFilesUnder(ctx *blueprint.Context,
+	srcDir, buildDir string, under, exempt []string) error {
 
 	if len(under) == 0 {
 		return nil
@@ -50,7 +50,7 @@
 
 	replacer := strings.NewReplacer(
 		"@@SrcDir@@", srcDir,
-		"@@BuildDir@@", BuildDir)
+		"@@BuildDir@@", buildDir)
 	ninjaBuildDir = replacer.Replace(ninjaBuildDir)
 	targets := make(map[string]bool)
 	for target := range targetRules {
diff --git a/bootstrap/command.go b/bootstrap/command.go
index cbbd32d..4a938db 100644
--- a/bootstrap/command.go
+++ b/bootstrap/command.go
@@ -31,57 +31,123 @@
 	"github.com/google/blueprint/deptools"
 )
 
+type Args struct {
+	OutFile                  string
+	GlobFile                 string
+	DepFile                  string
+	DocFile                  string
+	Cpuprofile               string
+	Memprofile               string
+	DelveListen              string
+	DelvePath                string
+	TraceFile                string
+	RunGoTests               bool
+	UseValidations           bool
+	NoGC                     bool
+	EmptyNinjaFile           bool
+	BuildDir                 string
+	ModuleListFile           string
+	NinjaBuildDir            string
+	TopFile                  string
+	GeneratingPrimaryBuilder bool
+
+	PrimaryBuilderInvocations []PrimaryBuilderInvocation
+}
+
 var (
-	outFile        string
-	globFile       string
-	depFile        string
-	docFile        string
-	cpuprofile     string
-	memprofile     string
-	traceFile      string
-	runGoTests     bool
-	noGC           bool
-	moduleListFile string
-	emptyNinjaFile bool
-
-	BuildDir      string
-	NinjaBuildDir string
-	SrcDir        string
-
-	absSrcDir string
+	CmdlineArgs Args
+	absSrcDir   string
 )
 
 func init() {
-	flag.StringVar(&outFile, "o", "build.ninja", "the Ninja file to output")
-	flag.StringVar(&globFile, "globFile", "build-globs.ninja", "the Ninja file of globs to output")
-	flag.StringVar(&BuildDir, "b", ".", "the build output directory")
-	flag.StringVar(&NinjaBuildDir, "n", "", "the ninja builddir directory")
-	flag.StringVar(&depFile, "d", "", "the dependency file to output")
-	flag.StringVar(&docFile, "docs", "", "build documentation file to output")
-	flag.StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to file")
-	flag.StringVar(&traceFile, "trace", "", "write trace to file")
-	flag.StringVar(&memprofile, "memprofile", "", "write memory profile to file")
-	flag.BoolVar(&noGC, "nogc", false, "turn off GC for debugging")
-	flag.BoolVar(&runGoTests, "t", false, "build and run go tests during bootstrap")
-	flag.StringVar(&moduleListFile, "l", "", "file that lists filepaths to parse")
-	flag.BoolVar(&emptyNinjaFile, "empty-ninja-file", false, "write out a 0-byte ninja file")
+	flag.StringVar(&CmdlineArgs.OutFile, "o", "build.ninja", "the Ninja file to output")
+	flag.StringVar(&CmdlineArgs.GlobFile, "globFile", "build-globs.ninja", "the Ninja file of globs to output")
+	flag.StringVar(&CmdlineArgs.BuildDir, "b", ".", "the build output directory")
+	flag.StringVar(&CmdlineArgs.NinjaBuildDir, "n", "", "the ninja builddir directory")
+	flag.StringVar(&CmdlineArgs.DepFile, "d", "", "the dependency file to output")
+	flag.StringVar(&CmdlineArgs.DocFile, "docs", "", "build documentation file to output")
+	flag.StringVar(&CmdlineArgs.Cpuprofile, "cpuprofile", "", "write cpu profile to file")
+	flag.StringVar(&CmdlineArgs.TraceFile, "trace", "", "write trace to file")
+	flag.StringVar(&CmdlineArgs.Memprofile, "memprofile", "", "write memory profile to file")
+	flag.BoolVar(&CmdlineArgs.NoGC, "nogc", false, "turn off GC for debugging")
+	flag.BoolVar(&CmdlineArgs.RunGoTests, "t", false, "build and run go tests during bootstrap")
+	flag.BoolVar(&CmdlineArgs.UseValidations, "use-validations", false, "use validations to depend on go tests")
+	flag.StringVar(&CmdlineArgs.ModuleListFile, "l", "", "file that lists filepaths to parse")
+	flag.BoolVar(&CmdlineArgs.EmptyNinjaFile, "empty-ninja-file", false, "write out a 0-byte ninja file")
 }
 
-func Main(ctx *blueprint.Context, config interface{}, extraNinjaFileDeps ...string) {
+func Main(ctx *blueprint.Context, config interface{}, generatingPrimaryBuilder bool) {
 	if !flag.Parsed() {
 		flag.Parse()
 	}
 
+	if flag.NArg() != 1 {
+		fatalf("no Blueprints file specified")
+	}
+
+	CmdlineArgs.TopFile = flag.Arg(0)
+	CmdlineArgs.GeneratingPrimaryBuilder = generatingPrimaryBuilder
+	ninjaDeps := RunBlueprint(CmdlineArgs, ctx, config)
+	err := deptools.WriteDepFile(CmdlineArgs.DepFile, CmdlineArgs.OutFile, ninjaDeps)
+	if err != nil {
+		fatalf("Cannot write depfile '%s': %s", CmdlineArgs.DepFile, err)
+	}
+}
+
+func PrimaryBuilderExtraFlags(args Args, globFile, mainNinjaFile string) []string {
+	result := make([]string, 0)
+
+	if args.RunGoTests {
+		result = append(result, "-t")
+	}
+
+	result = append(result, "-l", args.ModuleListFile)
+	result = append(result, "-globFile", globFile)
+	result = append(result, "-o", mainNinjaFile)
+
+	if args.EmptyNinjaFile {
+		result = append(result, "--empty-ninja-file")
+	}
+
+	if args.DelveListen != "" {
+		result = append(result, "--delve_listen", args.DelveListen)
+	}
+
+	if args.DelvePath != "" {
+		result = append(result, "--delve_path", args.DelvePath)
+	}
+
+	return result
+}
+
+func writeEmptyGlobFile(path string) {
+	err := os.MkdirAll(filepath.Dir(path), 0777)
+	if err != nil {
+		fatalf("Failed to create parent directories of empty ninja glob file '%s': %s", path, err)
+	}
+
+	if _, err := os.Stat(path); os.IsNotExist(err) {
+		err = ioutil.WriteFile(path, nil, 0666)
+		if err != nil {
+			fatalf("Failed to create empty ninja glob file '%s': %s", path, err)
+		}
+	}
+}
+
+// Returns the list of dependencies the emitted Ninja files has. These can be
+// written to the .d file for the output so that it is correctly rebuilt when
+// needed in case Blueprint is itself invoked from Ninja
+func RunBlueprint(args Args, ctx *blueprint.Context, config interface{}) []string {
 	runtime.GOMAXPROCS(runtime.NumCPU())
 
-	if noGC {
+	if args.NoGC {
 		debug.SetGCPercent(-1)
 	}
 
 	absSrcDir = ctx.SrcDir()
 
-	if cpuprofile != "" {
-		f, err := os.Create(absolutePath(cpuprofile))
+	if args.Cpuprofile != "" {
+		f, err := os.Create(absolutePath(args.Cpuprofile))
 		if err != nil {
 			fatalf("error opening cpuprofile: %s", err)
 		}
@@ -90,8 +156,8 @@
 		defer pprof.StopCPUProfile()
 	}
 
-	if traceFile != "" {
-		f, err := os.Create(absolutePath(traceFile))
+	if args.TraceFile != "" {
+		f, err := os.Create(absolutePath(args.TraceFile))
 		if err != nil {
 			fatalf("error opening trace: %s", err)
 		}
@@ -100,40 +166,56 @@
 		defer trace.Stop()
 	}
 
-	if flag.NArg() != 1 {
-		fatalf("no Blueprints file specified")
-	}
+	srcDir := filepath.Dir(args.TopFile)
 
-	SrcDir = filepath.Dir(flag.Arg(0))
-	if moduleListFile != "" {
-		ctx.SetModuleListFile(moduleListFile)
-		extraNinjaFileDeps = append(extraNinjaFileDeps, moduleListFile)
+	ninjaDeps := make([]string, 0)
+
+	if args.ModuleListFile != "" {
+		ctx.SetModuleListFile(args.ModuleListFile)
+		ninjaDeps = append(ninjaDeps, args.ModuleListFile)
 	} else {
 		fatalf("-l <moduleListFile> is required and must be nonempty")
 	}
-	filesToParse, err := ctx.ListModulePaths(SrcDir)
+	filesToParse, err := ctx.ListModulePaths(srcDir)
 	if err != nil {
 		fatalf("could not enumerate files: %v\n", err.Error())
 	}
 
-	if NinjaBuildDir == "" {
-		NinjaBuildDir = BuildDir
-	}
+	buildDir := config.(BootstrapConfig).BuildDir()
 
 	stage := StageMain
-	if c, ok := config.(ConfigInterface); ok {
-		if c.GeneratingPrimaryBuilder() {
-			stage = StagePrimary
-		}
+	if args.GeneratingPrimaryBuilder {
+		stage = StagePrimary
+	}
+
+	primaryBuilderNinjaGlobFile := absolutePath(filepath.Join(args.BuildDir, bootstrapSubDir, "build-globs.ninja"))
+	mainNinjaFile := filepath.Join("$buildDir", "build.ninja")
+
+	writeEmptyGlobFile(primaryBuilderNinjaGlobFile)
+
+	var invocations []PrimaryBuilderInvocation
+
+	if args.PrimaryBuilderInvocations != nil {
+		invocations = args.PrimaryBuilderInvocations
+	} else {
+		primaryBuilderArgs := PrimaryBuilderExtraFlags(args, primaryBuilderNinjaGlobFile, mainNinjaFile)
+		primaryBuilderArgs = append(primaryBuilderArgs, args.TopFile)
+
+		invocations = []PrimaryBuilderInvocation{{
+			Inputs:  []string{args.TopFile},
+			Outputs: []string{mainNinjaFile},
+			Args:    primaryBuilderArgs,
+		}}
 	}
 
 	bootstrapConfig := &Config{
 		stage: stage,
 
-		topLevelBlueprintsFile: flag.Arg(0),
-		emptyNinjaFile:         emptyNinjaFile,
-		runGoTests:             runGoTests,
-		moduleListFile:         moduleListFile,
+		topLevelBlueprintsFile:    args.TopFile,
+		globFile:                  primaryBuilderNinjaGlobFile,
+		runGoTests:                args.RunGoTests,
+		useValidations:            args.UseValidations,
+		primaryBuilderInvocations: invocations,
 	}
 
 	ctx.RegisterBottomUpMutator("bootstrap_plugin_deps", pluginDeps)
@@ -142,33 +224,33 @@
 	ctx.RegisterModuleType("blueprint_go_binary", newGoBinaryModuleFactory(bootstrapConfig, true))
 	ctx.RegisterSingletonType("bootstrap", newSingletonFactory(bootstrapConfig))
 
-	ctx.RegisterSingletonType("glob", globSingletonFactory(ctx))
+	ctx.RegisterSingletonType("glob", globSingletonFactory(bootstrapConfig, ctx))
 
-	deps, errs := ctx.ParseFileList(filepath.Dir(bootstrapConfig.topLevelBlueprintsFile), filesToParse, config)
+	blueprintFiles, errs := ctx.ParseFileList(filepath.Dir(args.TopFile), filesToParse, config)
 	if len(errs) > 0 {
 		fatalErrors(errs)
 	}
 
 	// Add extra ninja file dependencies
-	deps = append(deps, extraNinjaFileDeps...)
+	ninjaDeps = append(ninjaDeps, blueprintFiles...)
 
 	extraDeps, errs := ctx.ResolveDependencies(config)
 	if len(errs) > 0 {
 		fatalErrors(errs)
 	}
-	deps = append(deps, extraDeps...)
+	ninjaDeps = append(ninjaDeps, extraDeps...)
 
-	if docFile != "" {
-		err := writeDocs(ctx, absolutePath(docFile))
+	if args.DocFile != "" {
+		err := writeDocs(ctx, config, absolutePath(args.DocFile))
 		if err != nil {
 			fatalErrors([]error{err})
 		}
-		return
+		return nil
 	}
 
 	if c, ok := config.(ConfigStopBefore); ok {
 		if c.StopBefore() == StopBeforePrepareBuildActions {
-			return
+			return ninjaDeps
 		}
 	}
 
@@ -176,46 +258,45 @@
 	if len(errs) > 0 {
 		fatalErrors(errs)
 	}
-	deps = append(deps, extraDeps...)
+	ninjaDeps = append(ninjaDeps, extraDeps...)
+
+	if c, ok := config.(ConfigStopBefore); ok {
+		if c.StopBefore() == StopBeforeWriteNinja {
+			return ninjaDeps
+		}
+	}
 
 	const outFilePermissions = 0666
-	var out io.Writer
+	var out io.StringWriter
 	var f *os.File
 	var buf *bufio.Writer
 
-	if emptyNinjaFile {
-		if err := ioutil.WriteFile(absolutePath(outFile), []byte(nil), outFilePermissions); err != nil {
+	if args.EmptyNinjaFile {
+		if err := ioutil.WriteFile(absolutePath(args.OutFile), []byte(nil), outFilePermissions); err != nil {
 			fatalf("error writing empty Ninja file: %s", err)
 		}
 	}
 
-	if stage != StageMain || !emptyNinjaFile {
-		f, err = os.OpenFile(absolutePath(outFile), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, outFilePermissions)
+	if stage != StageMain || !args.EmptyNinjaFile {
+		f, err = os.OpenFile(absolutePath(args.OutFile), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, outFilePermissions)
 		if err != nil {
 			fatalf("error opening Ninja file: %s", err)
 		}
-		buf = bufio.NewWriter(f)
+		buf = bufio.NewWriterSize(f, 16*1024*1024)
 		out = buf
 	} else {
-		out = ioutil.Discard
+		out = ioutil.Discard.(io.StringWriter)
 	}
 
-	if globFile != "" {
-		buffer, errs := generateGlobNinjaFile(ctx.Globs)
+	if args.GlobFile != "" {
+		buffer, errs := generateGlobNinjaFile(bootstrapConfig, config, ctx.Globs)
 		if len(errs) > 0 {
 			fatalErrors(errs)
 		}
 
-		err = ioutil.WriteFile(absolutePath(globFile), buffer, outFilePermissions)
+		err = ioutil.WriteFile(absolutePath(args.GlobFile), buffer, outFilePermissions)
 		if err != nil {
-			fatalf("error writing %s: %s", globFile, err)
-		}
-	}
-
-	if depFile != "" {
-		err := deptools.WriteDepFile(absolutePath(depFile), outFile, deps)
-		if err != nil {
-			fatalf("error writing depfile: %s", err)
+			fatalf("error writing %s: %s", args.GlobFile, err)
 		}
 	}
 
@@ -239,21 +320,23 @@
 	}
 
 	if c, ok := config.(ConfigRemoveAbandonedFilesUnder); ok {
-		under, except := c.RemoveAbandonedFilesUnder()
-		err := removeAbandonedFilesUnder(ctx, bootstrapConfig, SrcDir, under, except)
+		under, except := c.RemoveAbandonedFilesUnder(buildDir)
+		err := removeAbandonedFilesUnder(ctx, srcDir, buildDir, under, except)
 		if err != nil {
 			fatalf("error removing abandoned files: %s", err)
 		}
 	}
 
-	if memprofile != "" {
-		f, err := os.Create(absolutePath(memprofile))
+	if args.Memprofile != "" {
+		f, err := os.Create(absolutePath(args.Memprofile))
 		if err != nil {
 			fatalf("error opening memprofile: %s", err)
 		}
 		defer f.Close()
 		pprof.WriteHeapProfile(f)
 	}
+
+	return ninjaDeps
 }
 
 func fatalf(format string, args ...interface{}) {
diff --git a/bootstrap/config.go b/bootstrap/config.go
index 9499aeb..a29ba76 100644
--- a/bootstrap/config.go
+++ b/bootstrap/config.go
@@ -15,6 +15,7 @@
 package bootstrap
 
 import (
+	"fmt"
 	"os"
 	"path/filepath"
 	"runtime"
@@ -23,25 +24,30 @@
 	"github.com/google/blueprint"
 )
 
-func bootstrapVariable(name string, value func() string) blueprint.Variable {
+func bootstrapVariable(name string, value func(BootstrapConfig) string) blueprint.Variable {
 	return pctx.VariableFunc(name, func(config interface{}) (string, error) {
-		return value(), nil
+		c, ok := config.(BootstrapConfig)
+		if !ok {
+			panic(fmt.Sprintf("Bootstrap rules were passed a configuration that does not include theirs, config=%q",
+				config))
+		}
+		return value(c), nil
 	})
 }
 
 var (
 	// These variables are the only configuration needed by the bootstrap
 	// modules.
-	srcDir = bootstrapVariable("srcDir", func() string {
-		return SrcDir
+	srcDirVariable = bootstrapVariable("srcDir", func(c BootstrapConfig) string {
+		return c.SrcDir()
 	})
-	buildDir = bootstrapVariable("buildDir", func() string {
-		return BuildDir
+	buildDirVariable = bootstrapVariable("buildDir", func(c BootstrapConfig) string {
+		return c.BuildDir()
 	})
-	ninjaBuildDir = bootstrapVariable("ninjaBuildDir", func() string {
-		return NinjaBuildDir
+	ninjaBuildDirVariable = bootstrapVariable("ninjaBuildDir", func(c BootstrapConfig) string {
+		return c.NinjaBuildDir()
 	})
-	goRoot = bootstrapVariable("goRoot", func() string {
+	goRootVariable = bootstrapVariable("goRoot", func(c BootstrapConfig) string {
 		goroot := runtime.GOROOT()
 		// Prefer to omit absolute paths from the ninja file
 		if cwd, err := os.Getwd(); err == nil {
@@ -53,19 +59,35 @@
 		}
 		return goroot
 	})
-	compileCmd = bootstrapVariable("compileCmd", func() string {
+	compileCmdVariable = bootstrapVariable("compileCmd", func(c BootstrapConfig) string {
 		return "$goRoot/pkg/tool/" + runtime.GOOS + "_" + runtime.GOARCH + "/compile"
 	})
-	linkCmd = bootstrapVariable("linkCmd", func() string {
+	linkCmdVariable = bootstrapVariable("linkCmd", func(c BootstrapConfig) string {
 		return "$goRoot/pkg/tool/" + runtime.GOOS + "_" + runtime.GOARCH + "/link"
 	})
+	debugFlagsVariable = bootstrapVariable("debugFlags", func(c BootstrapConfig) string {
+		if c.DebugCompilation() {
+			// -N: disable optimizations, -l: disable inlining
+			return "-N -l"
+		} else {
+			return ""
+		}
+	})
 )
 
-type ConfigInterface interface {
-	// GeneratingPrimaryBuilder should return true if this build invocation is
-	// creating a .bootstrap/build.ninja file to be used to build the
-	// primary builder
-	GeneratingPrimaryBuilder() bool
+type BootstrapConfig interface {
+	// The top-level directory of the source tree
+	SrcDir() string
+
+	// The directory where files emitted during bootstrapping are located.
+	// Usually NinjaBuildDir() + "/soong".
+	BuildDir() string
+
+	// The output directory for the build.
+	NinjaBuildDir() string
+
+	// Whether to compile Go code in such a way that it can be debugged
+	DebugCompilation() bool
 }
 
 type ConfigRemoveAbandonedFilesUnder interface {
@@ -73,7 +95,7 @@
 	// - a slice of path prefixes that will be cleaned of files that are no
 	//   longer active targets, but are listed in the .ninja_log.
 	// - a slice of paths that are exempt from cleaning
-	RemoveAbandonedFilesUnder() (under, except []string)
+	RemoveAbandonedFilesUnder(buildDir string) (under, except []string)
 }
 
 type ConfigBlueprintToolLocation interface {
@@ -87,6 +109,7 @@
 
 const (
 	StopBeforePrepareBuildActions StopBefore = 1
+	StopBeforeWriteNinja          StopBefore = 2
 )
 
 type ConfigStopBefore interface {
@@ -100,12 +123,20 @@
 	StageMain
 )
 
+type PrimaryBuilderInvocation struct {
+	Inputs  []string
+	Outputs []string
+	Args    []string
+}
+
 type Config struct {
 	stage Stage
 
 	topLevelBlueprintsFile string
+	globFile               string
 
-	emptyNinjaFile bool
 	runGoTests     bool
-	moduleListFile string
+	useValidations bool
+
+	primaryBuilderInvocations []PrimaryBuilderInvocation
 }
diff --git a/bootstrap/glob.go b/bootstrap/glob.go
index 52dbf2f..39c662b 100644
--- a/bootstrap/glob.go
+++ b/bootstrap/glob.go
@@ -17,11 +17,13 @@
 import (
 	"bytes"
 	"fmt"
+	"hash/fnv"
+	"io"
 	"path/filepath"
+	"strconv"
 	"strings"
 
 	"github.com/google/blueprint"
-	"github.com/google/blueprint/deptools"
 	"github.com/google/blueprint/pathtools"
 )
 
@@ -47,36 +49,71 @@
 	// and writes it to $out if it has changed, and writes the directories to $out.d
 	GlobRule = pctx.StaticRule("GlobRule",
 		blueprint.RuleParams{
-			Command:     fmt.Sprintf(`%s -o $out $excludes "$glob"`, globCmd),
+			Command: fmt.Sprintf(`%s -o $out -v %d $args`,
+				globCmd, pathtools.BPGlobArgumentVersion),
 			CommandDeps: []string{globCmd},
-			Description: "glob $glob",
+			Description: "glob",
 
 			Restat:  true,
 			Deps:    blueprint.DepsGCC,
 			Depfile: "$out.d",
 		},
-		"glob", "excludes")
+		"args")
 )
 
 // GlobFileContext is the subset of ModuleContext and SingletonContext needed by GlobFile
 type GlobFileContext interface {
+	Config() interface{}
 	Build(pctx blueprint.PackageContext, params blueprint.BuildParams)
 }
 
 // GlobFile creates a rule to write to fileListFile a list of the files that match the specified
 // pattern but do not match any of the patterns specified in excludes.  The file will include
-// appropriate dependencies written to depFile to regenerate the file if and only if the list of
-// matching files has changed.
-func GlobFile(ctx GlobFileContext, pattern string, excludes []string,
-	fileListFile, depFile string) {
+// appropriate dependencies to regenerate the file if and only if the list of matching files has
+// changed.
+func GlobFile(ctx GlobFileContext, pattern string, excludes []string, fileListFile string) {
+	args := `-p "` + pattern + `"`
+	if len(excludes) > 0 {
+		args += " " + joinWithPrefixAndQuote(excludes, "-e ")
+	}
+	ctx.Build(pctx, blueprint.BuildParams{
+		Rule:    GlobRule,
+		Outputs: []string{fileListFile},
+		Args: map[string]string{
+			"args": args,
+		},
+		Description: "glob " + pattern,
+	})
+}
+
+// multipleGlobFilesRule creates a rule to write to fileListFile a list of the files that match the specified
+// pattern but do not match any of the patterns specified in excludes.  The file will include
+// appropriate dependencies to regenerate the file if and only if the list of matching files has
+// changed.
+func multipleGlobFilesRule(ctx GlobFileContext, fileListFile string, shard int, globs pathtools.MultipleGlobResults) {
+	args := strings.Builder{}
+
+	for i, glob := range globs {
+		if i != 0 {
+			args.WriteString(" ")
+		}
+		args.WriteString(`-p "`)
+		args.WriteString(glob.Pattern)
+		args.WriteString(`"`)
+		for _, exclude := range glob.Excludes {
+			args.WriteString(` -e "`)
+			args.WriteString(exclude)
+			args.WriteString(`"`)
+		}
+	}
 
 	ctx.Build(pctx, blueprint.BuildParams{
 		Rule:    GlobRule,
 		Outputs: []string{fileListFile},
 		Args: map[string]string{
-			"glob":     pattern,
-			"excludes": joinWithPrefixAndQuote(excludes, "-e "),
+			"args": args.String(),
 		},
+		Description: fmt.Sprintf("regenerate globs shard %d of %d", shard, numGlobBuckets),
 	})
 }
 
@@ -111,53 +148,76 @@
 // re-evaluate them whenever the contents of the searched directories change, and retrigger the
 // primary builder if the results change.
 type globSingleton struct {
-	globLister func() []blueprint.GlobPath
+	config     *Config
+	globLister func() pathtools.MultipleGlobResults
 	writeRule  bool
 }
 
-func globSingletonFactory(ctx *blueprint.Context) func() blueprint.Singleton {
+func globSingletonFactory(config *Config, ctx *blueprint.Context) func() blueprint.Singleton {
 	return func() blueprint.Singleton {
 		return &globSingleton{
+			config:     config,
 			globLister: ctx.Globs,
 		}
 	}
 }
 
 func (s *globSingleton) GenerateBuildActions(ctx blueprint.SingletonContext) {
+	// Sort the list of globs into buckets.  A hash function is used instead of sharding so that
+	// adding a new glob doesn't force rerunning all the buckets by shifting them all by 1.
+	globBuckets := make([]pathtools.MultipleGlobResults, numGlobBuckets)
 	for _, g := range s.globLister() {
-		fileListFile := filepath.Join(BuildDir, ".glob", g.Name)
+		bucket := globToBucket(g)
+		globBuckets[bucket] = append(globBuckets[bucket], g)
+	}
+
+	// The directory for the intermediates needs to be different for bootstrap and the primary
+	// builder.
+	globsDir := globsDir(ctx.Config().(BootstrapConfig), s.config.stage)
+
+	for i, globs := range globBuckets {
+		fileListFile := filepath.Join(globsDir, strconv.Itoa(i))
 
 		if s.writeRule {
-			depFile := fileListFile + ".d"
-
-			fileList := strings.Join(g.Files, "\n") + "\n"
-			err := pathtools.WriteFileIfChanged(absolutePath(fileListFile), []byte(fileList), 0666)
+			// Called from generateGlobNinjaFile.  Write out the file list to disk, and add a ninja
+			// rule to run bpglob if any of the dependencies (usually directories that contain
+			// globbed files) have changed.  The file list produced by bpglob should match exactly
+			// with the file written here so that restat can prevent rerunning the primary builder.
+			//
+			// We need to write the file list here so that it has an older modified date
+			// than the build.ninja (otherwise we'd run the primary builder twice on
+			// every new glob)
+			//
+			// We don't need to write the depfile because we're guaranteed that ninja
+			// will run the command at least once (to record it into the ninja_log), so
+			// the depfile will be loaded from that execution.
+			err := pathtools.WriteFileIfChanged(absolutePath(fileListFile), globs.FileList(), 0666)
 			if err != nil {
 				panic(fmt.Errorf("error writing %s: %s", fileListFile, err))
 			}
-			err = deptools.WriteDepFile(absolutePath(depFile), fileListFile, g.Deps)
-			if err != nil {
-				panic(fmt.Errorf("error writing %s: %s", depFile, err))
-			}
 
-			GlobFile(ctx, g.Pattern, g.Excludes, fileListFile, depFile)
+			// Write out the ninja rule to run bpglob.
+			multipleGlobFilesRule(ctx, fileListFile, i, globs)
 		} else {
-			// Make build.ninja depend on the fileListFile
+			// Called from the main Context, make build.ninja depend on the fileListFile.
 			ctx.AddNinjaFileDeps(fileListFile)
 		}
 	}
 }
 
-func generateGlobNinjaFile(globLister func() []blueprint.GlobPath) ([]byte, []error) {
+func generateGlobNinjaFile(bootstrapConfig *Config, config interface{},
+	globLister func() pathtools.MultipleGlobResults) ([]byte, []error) {
+
 	ctx := blueprint.NewContext()
 	ctx.RegisterSingletonType("glob", func() blueprint.Singleton {
 		return &globSingleton{
+			config:     bootstrapConfig,
 			globLister: globLister,
 			writeRule:  true,
 		}
 	})
 
-	extraDeps, errs := ctx.ResolveDependencies(nil)
+	extraDeps, errs := ctx.ResolveDependencies(config)
 	if len(extraDeps) > 0 {
 		return nil, []error{fmt.Errorf("shouldn't have extra deps")}
 	}
@@ -165,7 +225,7 @@
 		return nil, errs
 	}
 
-	extraDeps, errs = ctx.PrepareBuildActions(nil)
+	extraDeps, errs = ctx.PrepareBuildActions(config)
 	if len(extraDeps) > 0 {
 		return nil, []error{fmt.Errorf("shouldn't have extra deps")}
 	}
@@ -181,3 +241,37 @@
 
 	return buf.Bytes(), nil
 }
+
+// globsDir returns a different directory to store glob intermediates for the bootstrap and
+// primary builder executions.
+func globsDir(config BootstrapConfig, stage Stage) string {
+	buildDir := config.BuildDir()
+	if stage == StageMain {
+		return filepath.Join(buildDir, mainSubDir, "globs")
+	} else {
+		return filepath.Join(buildDir, bootstrapSubDir, "globs")
+	}
+}
+
+// GlobFileListFiles returns the list of sharded glob file list files for the main stage.
+func GlobFileListFiles(config BootstrapConfig) []string {
+	globsDir := globsDir(config, StageMain)
+	var fileListFiles []string
+	for i := 0; i < numGlobBuckets; i++ {
+		fileListFiles = append(fileListFiles, filepath.Join(globsDir, strconv.Itoa(i)))
+	}
+	return fileListFiles
+}
+
+const numGlobBuckets = 1024
+
+// globToBucket converts a pathtools.GlobResult into a hashed bucket number in the range
+// [0, numGlobBuckets).
+func globToBucket(g pathtools.GlobResult) int {
+	hash := fnv.New32a()
+	io.WriteString(hash, g.Pattern)
+	for _, e := range g.Excludes {
+		io.WriteString(hash, e)
+	}
+	return int(hash.Sum32() % numGlobBuckets)
+}
diff --git a/bootstrap/minibp/main.go b/bootstrap/minibp/main.go
index 1714739..165f058 100644
--- a/bootstrap/minibp/main.go
+++ b/bootstrap/minibp/main.go
@@ -23,24 +23,22 @@
 )
 
 var runAsPrimaryBuilder bool
-var buildPrimaryBuilder bool
 
 func init() {
 	flag.BoolVar(&runAsPrimaryBuilder, "p", false, "run as a primary builder")
 }
 
 type Config struct {
-	generatingPrimaryBuilder bool
 }
 
-func (c Config) GeneratingPrimaryBuilder() bool {
-	return c.generatingPrimaryBuilder
+func (c Config) SrcDir() string {
+	return bootstrap.CmdlineArgs.BuildDir
 }
 
-func (c Config) RemoveAbandonedFilesUnder() (under, exempt []string) {
-	if c.generatingPrimaryBuilder {
-		under = []string{filepath.Join(bootstrap.BuildDir, ".bootstrap")}
-		exempt = []string{filepath.Join(bootstrap.BuildDir, ".bootstrap", "build.ninja")}
+func (c Config) RemoveAbandonedFilesUnder(buildDir string) (under, exempt []string) {
+	if !runAsPrimaryBuilder {
+		under = []string{filepath.Join(buildDir, ".bootstrap")}
+		exempt = []string{filepath.Join(buildDir, ".bootstrap", "build.ninja")}
 	}
 	return
 }
@@ -53,9 +51,6 @@
 		ctx.SetIgnoreUnknownModuleTypes(true)
 	}
 
-	config := Config{
-		generatingPrimaryBuilder: !runAsPrimaryBuilder,
-	}
-
-	bootstrap.Main(ctx, config)
+	config := Config{}
+	bootstrap.Main(ctx, config, !runAsPrimaryBuilder)
 }
diff --git a/bootstrap/writedocs.go b/bootstrap/writedocs.go
index 4edbcab..99df32f 100644
--- a/bootstrap/writedocs.go
+++ b/bootstrap/writedocs.go
@@ -15,7 +15,7 @@
 
 // ModuleTypeDocs returns a list of bpdoc.ModuleType objects that contain information relevant
 // to generating documentation for module types supported by the primary builder.
-func ModuleTypeDocs(ctx *blueprint.Context, factories map[string]reflect.Value) ([]*bpdoc.Package, error) {
+func ModuleTypeDocs(ctx *blueprint.Context, config interface{}, factories map[string]reflect.Value) ([]*bpdoc.Package, error) {
 	// Find the module that's marked as the "primary builder", which means it's
 	// creating the binary that we'll use to generate the non-bootstrap
 	// build.ninja file.
@@ -55,7 +55,7 @@
 		switch m := module.(type) {
 		case (*goPackage):
 			pkgFiles[m.properties.PkgPath] = pathtools.PrefixPaths(m.properties.Srcs,
-				filepath.Join(SrcDir, ctx.ModuleDir(m)))
+				filepath.Join(config.(BootstrapConfig).SrcDir(), ctx.ModuleDir(m)))
 		default:
 			panic(fmt.Errorf("unknown dependency type %T", module))
 		}
@@ -75,8 +75,8 @@
 	return bpdoc.AllPackages(pkgFiles, mergedFactories, ctx.ModuleTypePropertyStructs())
 }
 
-func writeDocs(ctx *blueprint.Context, filename string) error {
-	moduleTypeList, err := ModuleTypeDocs(ctx, nil)
+func writeDocs(ctx *blueprint.Context, config interface{}, filename string) error {
+	moduleTypeList, err := ModuleTypeDocs(ctx, config, nil)
 	if err != nil {
 		return err
 	}
diff --git a/bpfmt/bpfmt.go b/bpfmt/bpfmt.go
index c287ea2..4e6bd1e 100644
--- a/bpfmt/bpfmt.go
+++ b/bpfmt/bpfmt.go
@@ -141,7 +141,7 @@
 		if err := processReader("<standard input>", os.Stdin, os.Stdout); err != nil {
 			report(err)
 		}
-		return
+		os.Exit(exitCode)
 	}
 
 	for i := 0; i < flag.NArg(); i++ {
diff --git a/bpmodify/bpmodify.go b/bpmodify/bpmodify.go
index 29e97d1..29d28f0 100644
--- a/bpmodify/bpmodify.go
+++ b/bpmodify/bpmodify.go
@@ -22,20 +22,25 @@
 
 var (
 	// main operation modes
-	list            = flag.Bool("l", false, "list files that would be modified by bpmodify")
-	write           = flag.Bool("w", false, "write result to (source) file instead of stdout")
-	doDiff          = flag.Bool("d", false, "display diffs instead of rewriting files")
-	sortLists       = flag.Bool("s", false, "sort touched lists, even if they were unsorted")
-	parameter       = flag.String("parameter", "deps", "name of parameter to modify on each module")
-	targetedModules = new(identSet)
-	addIdents       = new(identSet)
-	removeIdents    = new(identSet)
+	list             = flag.Bool("l", false, "list files that would be modified by bpmodify")
+	write            = flag.Bool("w", false, "write result to (source) file instead of stdout")
+	doDiff           = flag.Bool("d", false, "display diffs instead of rewriting files")
+	sortLists        = flag.Bool("s", false, "sort touched lists, even if they were unsorted")
+	targetedModules  = new(identSet)
+	targetedProperty = new(qualifiedProperty)
+	addIdents        = new(identSet)
+	removeIdents     = new(identSet)
+
+	setString *string
 )
 
 func init() {
 	flag.Var(targetedModules, "m", "comma or whitespace separated list of modules on which to operate")
+	flag.Var(targetedProperty, "parameter", "alias to -property=`name`")
+	flag.Var(targetedProperty, "property", "fully qualified `name` of property to modify (default \"deps\")")
 	flag.Var(addIdents, "a", "comma or whitespace separated list of identifiers to add")
 	flag.Var(removeIdents, "r", "comma or whitespace separated list of identifiers to remove")
+	flag.Var(stringPtrFlag{&setString}, "str", "set a string property")
 	flag.Usage = usage
 }
 
@@ -140,24 +145,80 @@
 
 func processModule(module *parser.Module, moduleName string,
 	file *parser.File) (modified bool, errs []error) {
-
-	for _, prop := range module.Properties {
-		if prop.Name == *parameter {
-			modified, errs = processParameter(prop.Value, *parameter, moduleName, file)
-			return
+	prop, err := getRecursiveProperty(module, targetedProperty.name(), targetedProperty.prefixes())
+	if err != nil {
+		return false, []error{err}
+	}
+	if prop == nil {
+		if len(addIdents.idents) > 0 {
+			// We are adding something to a non-existing list prop, so we need to create it first.
+			prop, modified, err = createRecursiveProperty(module, targetedProperty.name(), targetedProperty.prefixes(), &parser.List{})
+		} else if setString != nil {
+			// We setting a non-existent string property, so we need to create it first.
+			prop, modified, err = createRecursiveProperty(module, targetedProperty.name(), targetedProperty.prefixes(), &parser.String{})
+		} else {
+			// We cannot find an existing prop, and we aren't adding anything to the prop,
+			// which means we must be removing something from a non-existing prop,
+			// which means this is a noop.
+			return false, nil
+		}
+		if err != nil {
+			// Here should be unreachable, but still handle it for completeness.
+			return false, []error{err}
 		}
 	}
-
-	prop := parser.Property{Name: *parameter, Value: &parser.List{}}
-	modified, errs = processParameter(prop.Value, *parameter, moduleName, file)
-
-	if modified {
-		module.Properties = append(module.Properties, &prop)
-	}
-
+	m, errs := processParameter(prop.Value, targetedProperty.String(), moduleName, file)
+	modified = modified || m
 	return modified, errs
 }
 
+func getRecursiveProperty(module *parser.Module, name string, prefixes []string) (prop *parser.Property, err error) {
+	prop, _, err = getOrCreateRecursiveProperty(module, name, prefixes, nil)
+	return prop, err
+}
+
+func createRecursiveProperty(module *parser.Module, name string, prefixes []string,
+	empty parser.Expression) (prop *parser.Property, modified bool, err error) {
+
+	return getOrCreateRecursiveProperty(module, name, prefixes, empty)
+}
+
+func getOrCreateRecursiveProperty(module *parser.Module, name string, prefixes []string,
+	empty parser.Expression) (prop *parser.Property, modified bool, err error) {
+	m := &module.Map
+	for i, prefix := range prefixes {
+		if prop, found := m.GetProperty(prefix); found {
+			if mm, ok := prop.Value.Eval().(*parser.Map); ok {
+				m = mm
+			} else {
+				// We've found a property in the AST and such property is not of type
+				// *parser.Map, which must mean we didn't modify the AST.
+				return nil, false, fmt.Errorf("Expected property %q to be a map, found %s",
+					strings.Join(prefixes[:i+1], "."), prop.Value.Type())
+			}
+		} else if empty != nil {
+			mm := &parser.Map{}
+			m.Properties = append(m.Properties, &parser.Property{Name: prefix, Value: mm})
+			m = mm
+			// We've created a new node in the AST. This means the m.GetProperty(name)
+			// check after this for loop must fail, because the node we inserted is an
+			// empty parser.Map, thus this function will return |modified| is true.
+		} else {
+			return nil, false, nil
+		}
+	}
+	if prop, found := m.GetProperty(name); found {
+		// We've found a property in the AST, which must mean we didn't modify the AST.
+		return prop, false, nil
+	} else if empty != nil {
+		prop = &parser.Property{Name: name, Value: empty}
+		m.Properties = append(m.Properties, prop)
+		return prop, true, nil
+	} else {
+		return nil, false, nil
+	}
+}
+
 func processParameter(value parser.Expression, paramName, moduleName string,
 	file *parser.File) (modified bool, errs []error) {
 	if _, ok := value.(*parser.Variable); ok {
@@ -170,26 +231,37 @@
 			paramName, moduleName)}
 	}
 
-	list, ok := value.(*parser.List)
-	if !ok {
-		return false, []error{fmt.Errorf("expected parameter %s in module %s to be list, found %s",
-			paramName, moduleName, value.Type().String())}
-	}
+	if len(addIdents.idents) > 0 || len(removeIdents.idents) > 0 {
+		list, ok := value.(*parser.List)
+		if !ok {
+			return false, []error{fmt.Errorf("expected parameter %s in module %s to be list, found %s",
+				paramName, moduleName, value.Type().String())}
+		}
 
-	wasSorted := parser.ListIsSorted(list)
+		wasSorted := parser.ListIsSorted(list)
 
-	for _, a := range addIdents.idents {
-		m := parser.AddStringToList(list, a)
-		modified = modified || m
-	}
+		for _, a := range addIdents.idents {
+			m := parser.AddStringToList(list, a)
+			modified = modified || m
+		}
 
-	for _, r := range removeIdents.idents {
-		m := parser.RemoveStringFromList(list, r)
-		modified = modified || m
-	}
+		for _, r := range removeIdents.idents {
+			m := parser.RemoveStringFromList(list, r)
+			modified = modified || m
+		}
 
-	if (wasSorted || *sortLists) && modified {
-		parser.SortList(file, list)
+		if (wasSorted || *sortLists) && modified {
+			parser.SortList(file, list)
+		}
+	} else if setString != nil {
+		str, ok := value.(*parser.String)
+		if !ok {
+			return false, []error{fmt.Errorf("expected parameter %s in module %s to be string, found %s",
+				paramName, moduleName, value.Type().String())}
+		}
+
+		str.Value = *setString
+		modified = true
 	}
 
 	return modified, nil
@@ -232,6 +304,10 @@
 
 	flag.Parse()
 
+	if len(targetedProperty.parts) == 0 {
+		targetedProperty.Set("deps")
+	}
+
 	if flag.NArg() == 0 {
 		if *write {
 			report(fmt.Errorf("error: cannot use -w with standard input"))
@@ -248,8 +324,8 @@
 		return
 	}
 
-	if len(addIdents.idents) == 0 && len(removeIdents.idents) == 0 {
-		report(fmt.Errorf("-a or -r parameter is required"))
+	if len(addIdents.idents) == 0 && len(removeIdents.idents) == 0 && setString == nil {
+		report(fmt.Errorf("-a, -r or -str parameter is required"))
 		return
 	}
 
@@ -296,6 +372,22 @@
 
 }
 
+type stringPtrFlag struct {
+	s **string
+}
+
+func (f stringPtrFlag) Set(s string) error {
+	*f.s = &s
+	return nil
+}
+
+func (f stringPtrFlag) String() string {
+	if f.s == nil || *f.s == nil {
+		return ""
+	}
+	return **f.s
+}
+
 type identSet struct {
 	idents []string
 	all    bool
@@ -318,3 +410,38 @@
 func (m *identSet) Get() interface{} {
 	return m.idents
 }
+
+type qualifiedProperty struct {
+	parts []string
+}
+
+var _ flag.Getter = (*qualifiedProperty)(nil)
+
+func (p *qualifiedProperty) name() string {
+	return p.parts[len(p.parts)-1]
+}
+
+func (p *qualifiedProperty) prefixes() []string {
+	return p.parts[:len(p.parts)-1]
+}
+
+func (p *qualifiedProperty) String() string {
+	return strings.Join(p.parts, ".")
+}
+
+func (p *qualifiedProperty) Set(s string) error {
+	p.parts = strings.Split(s, ".")
+	if len(p.parts) == 0 {
+		return fmt.Errorf("%q is not a valid property name", s)
+	}
+	for _, part := range p.parts {
+		if part == "" {
+			return fmt.Errorf("%q is not a valid property name", s)
+		}
+	}
+	return nil
+}
+
+func (p *qualifiedProperty) Get() interface{} {
+	return p.parts
+}
diff --git a/bpmodify/bpmodify_test.go b/bpmodify/bpmodify_test.go
new file mode 100644
index 0000000..a92d439
--- /dev/null
+++ b/bpmodify/bpmodify_test.go
@@ -0,0 +1,338 @@
+// Copyright 2020 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"strings"
+	"testing"
+
+	"github.com/google/blueprint/parser"
+	"github.com/google/blueprint/proptools"
+)
+
+var testCases = []struct {
+	name      string
+	input     string
+	output    string
+	property  string
+	addSet    string
+	removeSet string
+	setString *string
+}{
+	{
+		name: "add",
+		input: `
+			cc_foo {
+				name: "foo",
+			}
+		`,
+		output: `
+			cc_foo {
+				name: "foo",
+				deps: ["bar"],
+			}
+		`,
+		property: "deps",
+		addSet:   "bar",
+	},
+	{
+		name: "remove",
+		input: `
+			cc_foo {
+				name: "foo",
+				deps: ["bar"],
+			}
+		`,
+		output: `
+			cc_foo {
+				name: "foo",
+				deps: [],
+			}
+		`,
+		property:  "deps",
+		removeSet: "bar",
+	},
+	{
+		name: "nested add",
+		input: `
+			cc_foo {
+				name: "foo",
+			}
+		`,
+		output: `
+			cc_foo {
+				name: "foo",
+				arch: {
+					arm: {
+						deps: [
+							"dep2",
+							"nested_dep",],
+					},
+				},
+			}
+		`,
+		property: "arch.arm.deps",
+		addSet:   "nested_dep,dep2",
+	},
+	{
+		name: "nested remove",
+		input: `
+			cc_foo {
+				name: "foo",
+				arch: {
+					arm: {
+						deps: [
+							"dep2",
+							"nested_dep",
+						],
+					},
+				},
+			}
+		`,
+		output: `
+			cc_foo {
+				name: "foo",
+				arch: {
+					arm: {
+						deps: [
+						],
+					},
+				},
+			}
+		`,
+		property:  "arch.arm.deps",
+		removeSet: "nested_dep,dep2",
+	},
+	{
+		name: "add existing",
+		input: `
+			cc_foo {
+				name: "foo",
+				arch: {
+					arm: {
+						deps: [
+							"nested_dep",
+							"dep2",
+						],
+					},
+				},
+			}
+		`,
+		output: `
+			cc_foo {
+				name: "foo",
+				arch: {
+					arm: {
+						deps: [
+							"nested_dep",
+							"dep2",
+						],
+					},
+				},
+			}
+		`,
+		property: "arch.arm.deps",
+		addSet:   "dep2,dep2",
+	},
+	{
+		name: "remove missing",
+		input: `
+			cc_foo {
+				name: "foo",
+				arch: {
+					arm: {
+						deps: [
+							"nested_dep",
+							"dep2",
+						],
+					},
+				},
+			}
+		`,
+		output: `
+			cc_foo {
+				name: "foo",
+				arch: {
+					arm: {
+						deps: [
+							"nested_dep",
+							"dep2",
+						],
+					},
+				},
+			}
+		`,
+		property:  "arch.arm.deps",
+		removeSet: "dep3,dep4",
+	},
+	{
+		name: "remove non existent",
+		input: `
+			cc_foo {
+				name: "foo",
+			}
+		`,
+		output: `
+			cc_foo {
+				name: "foo",
+			}
+		`,
+		property:  "deps",
+		removeSet: "bar",
+	},
+	{
+		name: "remove non existent nested",
+		input: `
+			cc_foo {
+				name: "foo",
+				arch: {},
+			}
+		`,
+		output: `
+			cc_foo {
+				name: "foo",
+				arch: {},
+			}
+		`,
+		property:  "arch.arm.deps",
+		removeSet: "dep3,dep4",
+	},
+	{
+		name: "add numeric sorted",
+		input: `
+			cc_foo {
+				name: "foo",
+				versions: ["1", "2"],
+			}
+		`,
+		output: `
+			cc_foo {
+				name: "foo",
+				versions: [
+					"1",
+					"2",
+					"10",
+				],
+			}
+		`,
+		property: "versions",
+		addSet:   "10",
+	},
+	{
+		name: "add mixed sorted",
+		input: `
+			cc_foo {
+				name: "foo",
+				deps: ["bar-v1-bar", "bar-v2-bar"],
+			}
+		`,
+		output: `
+			cc_foo {
+				name: "foo",
+				deps: [
+					"bar-v1-bar",
+					"bar-v2-bar",
+					"bar-v10-bar",
+				],
+			}
+		`,
+		property: "deps",
+		addSet:   "bar-v10-bar",
+	},
+	{
+		name: "set string",
+		input: `
+			cc_foo {
+				name: "foo",
+			}
+		`,
+		output: `
+			cc_foo {
+				name: "foo",
+				foo: "bar",
+			}
+		`,
+		property:  "foo",
+		setString: proptools.StringPtr("bar"),
+	},
+	{
+		name: "set existing string",
+		input: `
+			cc_foo {
+				name: "foo",
+				foo: "baz",
+			}
+		`,
+		output: `
+			cc_foo {
+				name: "foo",
+				foo: "bar",
+			}
+		`,
+		property:  "foo",
+		setString: proptools.StringPtr("bar"),
+	},
+}
+
+func simplifyModuleDefinition(def string) string {
+	var result string
+	for _, line := range strings.Split(def, "\n") {
+		result += strings.TrimSpace(line)
+	}
+	return result
+}
+
+func TestProcessModule(t *testing.T) {
+	for i, testCase := range testCases {
+		t.Run(testCase.name, func(t *testing.T) {
+			targetedProperty.Set(testCase.property)
+			addIdents.Set(testCase.addSet)
+			removeIdents.Set(testCase.removeSet)
+			setString = testCase.setString
+
+			inAst, errs := parser.ParseAndEval("", strings.NewReader(testCase.input), parser.NewScope(nil))
+			if len(errs) > 0 {
+				for _, err := range errs {
+					t.Errorf("  %s", err)
+				}
+				t.Errorf("failed to parse:")
+				t.Errorf("%+v", testCase)
+				t.FailNow()
+			}
+
+			if inModule, ok := inAst.Defs[0].(*parser.Module); !ok {
+				t.Fatalf("  input must only contain a single module definition: %s", testCase.input)
+			} else {
+				_, errs := processModule(inModule, "", inAst)
+				if len(errs) > 0 {
+					t.Errorf("test case %d:", i)
+					for _, err := range errs {
+						t.Errorf("  %s", err)
+					}
+				}
+				inModuleText, _ := parser.Print(inAst)
+				inModuleString := string(inModuleText)
+				if simplifyModuleDefinition(inModuleString) != simplifyModuleDefinition(testCase.output) {
+					t.Errorf("test case %d:", i)
+					t.Errorf("expected module definition:")
+					t.Errorf("  %s", testCase.output)
+					t.Errorf("actual module definition:")
+					t.Errorf("  %s", inModuleString)
+				}
+			}
+		})
+	}
+
+}
diff --git a/context.go b/context.go
index 3018209..e891c23 100644
--- a/context.go
+++ b/context.go
@@ -17,6 +17,7 @@
 import (
 	"bytes"
 	"context"
+	"encoding/json"
 	"errors"
 	"fmt"
 	"io"
@@ -110,13 +111,28 @@
 
 	// set lazily by sortedModuleGroups
 	cachedSortedModuleGroups []*moduleGroup
+	// cache deps modified to determine whether cachedSortedModuleGroups needs to be recalculated
+	cachedDepsModified bool
 
-	globs    map[string]GlobPath
+	globs    map[globKey]pathtools.GlobResult
 	globLock sync.Mutex
 
 	srcDir         string
 	fs             pathtools.FileSystem
 	moduleListFile string
+
+	// Mutators indexed by the ID of the provider associated with them.  Not all mutators will
+	// have providers, and not all providers will have a mutator, or if they do the mutator may
+	// not be registered in this Context.
+	providerMutators []*mutatorInfo
+
+	// The currently running mutator
+	startedMutator *mutatorInfo
+	// True for any mutators that have already run over all modules
+	finishedMutators map[*mutatorInfo]bool
+
+	// Can be set by tests to avoid invalidating Module values after mutators.
+	skipCloneModulesAfterMutators bool
 }
 
 // An Error describes a problem that was encountered that is related to a
@@ -159,22 +175,69 @@
 }
 
 type moduleAlias struct {
-	variantName       string
-	variant           variationMap
-	dependencyVariant variationMap
-	target            *moduleInfo
+	variant variant
+	target  *moduleInfo
+}
+
+func (m *moduleAlias) alias() *moduleAlias              { return m }
+func (m *moduleAlias) module() *moduleInfo              { return nil }
+func (m *moduleAlias) moduleOrAliasTarget() *moduleInfo { return m.target }
+func (m *moduleAlias) moduleOrAliasVariant() variant    { return m.variant }
+
+func (m *moduleInfo) alias() *moduleAlias              { return nil }
+func (m *moduleInfo) module() *moduleInfo              { return m }
+func (m *moduleInfo) moduleOrAliasTarget() *moduleInfo { return m }
+func (m *moduleInfo) moduleOrAliasVariant() variant    { return m.variant }
+
+type moduleOrAlias interface {
+	alias() *moduleAlias
+	module() *moduleInfo
+	moduleOrAliasTarget() *moduleInfo
+	moduleOrAliasVariant() variant
+}
+
+type modulesOrAliases []moduleOrAlias
+
+func (l modulesOrAliases) firstModule() *moduleInfo {
+	for _, moduleOrAlias := range l {
+		if m := moduleOrAlias.module(); m != nil {
+			return m
+		}
+	}
+	panic(fmt.Errorf("no first module!"))
+}
+
+func (l modulesOrAliases) lastModule() *moduleInfo {
+	for i := range l {
+		if m := l[len(l)-1-i].module(); m != nil {
+			return m
+		}
+	}
+	panic(fmt.Errorf("no last module!"))
 }
 
 type moduleGroup struct {
 	name      string
 	ninjaName string
 
-	modules []*moduleInfo
-	aliases []*moduleAlias
+	modules modulesOrAliases
 
 	namespace Namespace
 }
 
+func (group *moduleGroup) moduleOrAliasByVariantName(name string) moduleOrAlias {
+	for _, module := range group.modules {
+		if module.moduleOrAliasVariant().name == name {
+			return module
+		}
+	}
+	return nil
+}
+
+func (group *moduleGroup) moduleByVariantName(name string) *moduleInfo {
+	return group.moduleOrAliasByVariantName(name).module()
+}
+
 type moduleInfo struct {
 	// set during Parse
 	typeName          string
@@ -184,9 +247,7 @@
 	propertyPos       map[string]scanner.Position
 	createdBy         *moduleInfo
 
-	variantName       string
-	variant           variationMap
-	dependencyVariant variationMap
+	variant variant
 
 	logicModule Module
 	group       *moduleGroup
@@ -201,15 +262,28 @@
 	forwardDeps []*moduleInfo
 	directDeps  []depInfo
 
-	// used by parallelVisitAllBottomUp
+	// used by parallelVisit
 	waitingCount int
 
 	// set during each runMutator
-	splitModules []*moduleInfo
-	aliasTarget  *moduleInfo
+	splitModules modulesOrAliases
 
 	// set during PrepareBuildActions
 	actionDefs localBuildActions
+
+	providers []interface{}
+
+	startedMutator  *mutatorInfo
+	finishedMutator *mutatorInfo
+
+	startedGenerateBuildActions  bool
+	finishedGenerateBuildActions bool
+}
+
+type variant struct {
+	name                 string
+	variations           variationMap
+	dependencyVariations variationMap
 }
 
 type depInfo struct {
@@ -232,8 +306,8 @@
 
 func (module *moduleInfo) String() string {
 	s := fmt.Sprintf("module %q", module.Name())
-	if module.variantName != "" {
-		s += fmt.Sprintf(" variant %q", module.variantName)
+	if module.variant.name != "" {
+		s += fmt.Sprintf(" variant %q", module.variant.name)
 	}
 	if module.createdBy != nil {
 		s += fmt.Sprintf(" (created by %s)", module.createdBy)
@@ -273,10 +347,10 @@
 }
 
 // Compare this variationMap to another one.  Returns true if the every entry in this map
-// is either the same in the other map or doesn't exist in the other map.
-func (vm variationMap) subset(other variationMap) bool {
+// exists and has the same value in the other map.
+func (vm variationMap) subsetOf(other variationMap) bool {
 	for k, v1 := range vm {
-		if v2, ok := other[k]; ok && v1 != v2 {
+		if v2, ok := other[k]; !ok || v1 != v2 {
 			return false
 		}
 	}
@@ -311,8 +385,9 @@
 		moduleFactories:    make(map[string]ModuleFactory),
 		nameInterface:      NewSimpleNameInterface(),
 		moduleInfo:         make(map[Module]*moduleInfo),
-		globs:              make(map[string]GlobPath),
+		globs:              make(map[globKey]pathtools.GlobResult),
 		fs:                 pathtools.OsFs,
+		finishedMutators:   make(map[*mutatorInfo]bool),
 		ninjaBuildDir:      nil,
 		requiredNinjaMajor: 1,
 		requiredNinjaMinor: 7,
@@ -672,6 +747,7 @@
 
 	type newModuleInfo struct {
 		*moduleInfo
+		deps  []string
 		added chan<- struct{}
 	}
 
@@ -697,12 +773,12 @@
 			// registered by name. This allows load hooks to set and/or modify any aspect
 			// of the module (including names) using information that is not available when
 			// the module factory is called.
-			newModules, errs := runAndRemoveLoadHooks(c, config, module, &scopedModuleFactories)
+			newModules, newDeps, errs := runAndRemoveLoadHooks(c, config, module, &scopedModuleFactories)
 			if len(errs) > 0 {
 				return errs
 			}
 
-			moduleCh <- newModuleInfo{module, addedCh}
+			moduleCh <- newModuleInfo{module, newDeps, addedCh}
 			<-addedCh
 			for _, n := range newModules {
 				errs = addModule(n)
@@ -745,6 +821,7 @@
 		doneCh <- struct{}{}
 	}()
 
+	var hookDeps []string
 loop:
 	for {
 		select {
@@ -752,6 +829,7 @@
 			errs = append(errs, newErrs...)
 		case module := <-moduleCh:
 			newErrs := c.addModule(module.moduleInfo)
+			hookDeps = append(hookDeps, module.deps...)
 			if module.added != nil {
 				module.added <- struct{}{}
 			}
@@ -766,6 +844,7 @@
 		}
 	}
 
+	deps = append(deps, hookDeps...)
 	return deps, errs
 }
 
@@ -1224,15 +1303,44 @@
 	return newLogicModule, newProperties
 }
 
+func newVariant(module *moduleInfo, mutatorName string, variationName string,
+	local bool) variant {
+
+	newVariantName := module.variant.name
+	if variationName != "" {
+		if newVariantName == "" {
+			newVariantName = variationName
+		} else {
+			newVariantName += "_" + variationName
+		}
+	}
+
+	newVariations := module.variant.variations.clone()
+	if newVariations == nil {
+		newVariations = make(variationMap)
+	}
+	newVariations[mutatorName] = variationName
+
+	newDependencyVariations := module.variant.dependencyVariations.clone()
+	if !local {
+		if newDependencyVariations == nil {
+			newDependencyVariations = make(variationMap)
+		}
+		newDependencyVariations[mutatorName] = variationName
+	}
+
+	return variant{newVariantName, newVariations, newDependencyVariations}
+}
+
 func (c *Context) createVariations(origModule *moduleInfo, mutatorName string,
-	defaultVariationName *string, variationNames []string) ([]*moduleInfo, []error) {
+	defaultVariationName *string, variationNames []string, local bool) (modulesOrAliases, []error) {
 
 	if len(variationNames) == 0 {
 		panic(fmt.Errorf("mutator %q passed zero-length variation list for module %q",
 			mutatorName, origModule.Name()))
 	}
 
-	newModules := []*moduleInfo{}
+	var newModules modulesOrAliases
 
 	var errs []error
 
@@ -1249,27 +1357,15 @@
 			newLogicModule, newProperties = c.cloneLogicModule(origModule)
 		}
 
-		newVariant := origModule.variant.clone()
-		if newVariant == nil {
-			newVariant = make(variationMap)
-		}
-		newVariant[mutatorName] = variationName
-
 		m := *origModule
 		newModule := &m
-		newModule.directDeps = append([]depInfo{}, origModule.directDeps...)
+		newModule.directDeps = append([]depInfo(nil), origModule.directDeps...)
+		newModule.reverseDeps = nil
+		newModule.forwardDeps = nil
 		newModule.logicModule = newLogicModule
-		newModule.variant = newVariant
-		newModule.dependencyVariant = origModule.dependencyVariant.clone()
+		newModule.variant = newVariant(origModule, mutatorName, variationName, local)
 		newModule.properties = newProperties
-
-		if variationName != "" {
-			if newModule.variantName == "" {
-				newModule.variantName = variationName
-			} else {
-				newModule.variantName += "_" + variationName
-			}
-		}
+		newModule.providers = append([]interface{}(nil), origModule.providers...)
 
 		newModules = append(newModules, newModule)
 
@@ -1296,16 +1392,16 @@
 		if dep.module.logicModule == nil {
 			var newDep *moduleInfo
 			for _, m := range dep.module.splitModules {
-				if m.variant[mutatorName] == variationName {
-					newDep = m
+				if m.moduleOrAliasVariant().variations[mutatorName] == variationName {
+					newDep = m.moduleOrAliasTarget()
 					break
 				}
 			}
 			if newDep == nil && defaultVariationName != nil {
 				// give it a second chance; match with defaultVariationName
 				for _, m := range dep.module.splitModules {
-					if m.variant[mutatorName] == *defaultVariationName {
-						newDep = m
+					if m.moduleOrAliasVariant().variations[mutatorName] == *defaultVariationName {
+						newDep = m.moduleOrAliasTarget()
 						break
 					}
 				}
@@ -1325,27 +1421,27 @@
 	return errs
 }
 
-func (c *Context) prettyPrintVariant(variant variationMap) string {
-	names := make([]string, 0, len(variant))
+func (c *Context) prettyPrintVariant(variations variationMap) string {
+	names := make([]string, 0, len(variations))
 	for _, m := range c.variantMutatorNames {
-		if v, ok := variant[m]; ok {
+		if v, ok := variations[m]; ok {
 			names = append(names, m+":"+v)
 		}
 	}
 
-	return strings.Join(names, ", ")
+	return strings.Join(names, ",")
 }
 
 func (c *Context) prettyPrintGroupVariants(group *moduleGroup) string {
 	var variants []string
-	for _, mod := range group.modules {
-		variants = append(variants, c.prettyPrintVariant(mod.variant))
+	for _, moduleOrAlias := range group.modules {
+		if mod := moduleOrAlias.module(); mod != nil {
+			variants = append(variants, c.prettyPrintVariant(mod.variant.variations))
+		} else if alias := moduleOrAlias.alias(); alias != nil {
+			variants = append(variants, c.prettyPrintVariant(alias.variant.variations)+
+				" (alias to "+c.prettyPrintVariant(alias.target.variant.variations)+")")
+		}
 	}
-	for _, mod := range group.aliases {
-		variants = append(variants, c.prettyPrintVariant(mod.variant)+
-			"(alias to "+c.prettyPrintVariant(mod.target.variant)+")")
-	}
-	sort.Strings(variants)
 	return strings.Join(variants, "\n  ")
 }
 
@@ -1424,7 +1520,7 @@
 
 	group := &moduleGroup{
 		name:    name,
-		modules: []*moduleInfo{module},
+		modules: modulesOrAliases{module},
 	}
 	module.group = group
 	namespace, errs := c.nameInterface.NewModule(
@@ -1454,6 +1550,8 @@
 
 func (c *Context) resolveDependencies(ctx context.Context, config interface{}) (deps []string, errs []error) {
 	pprof.Do(ctx, pprof.Labels("blueprint", "ResolveDependencies"), func(ctx context.Context) {
+		c.initProviders()
+
 		c.liveGlobals = newLiveTracker(config)
 
 		deps, errs = c.generateSingletonBuildActions(config, c.preSingletonInfo, c.liveGlobals)
@@ -1473,7 +1571,9 @@
 		}
 		deps = append(deps, mutatorDeps...)
 
-		c.cloneModules()
+		if !c.skipCloneModulesAfterMutators {
+			c.cloneModules()
+		}
 
 		c.dependenciesReady = true
 	})
@@ -1508,43 +1608,32 @@
 	}
 }
 
-// findMatchingVariant searches the moduleGroup for a module with the same variant as module,
-// and returns the matching module, or nil if one is not found.
-func (c *Context) findMatchingVariant(module *moduleInfo, possible *moduleGroup, reverse bool) *moduleInfo {
-	if len(possible.modules) == 1 {
-		return possible.modules[0]
-	} else {
-		var variantToMatch variationMap
-		if !reverse {
-			// For forward dependency, ignore local variants by matching against
-			// dependencyVariant which doesn't have the local variants
-			variantToMatch = module.dependencyVariant
-		} else {
-			// For reverse dependency, use all the variants
-			variantToMatch = module.variant
-		}
-		for _, m := range possible.modules {
-			if m.variant.equal(variantToMatch) {
-				return m
-			}
-		}
-		for _, m := range possible.aliases {
-			if m.variant.equal(variantToMatch) {
-				return m.target
+// findExactVariantOrSingle searches the moduleGroup for a module with the same variant as module,
+// and returns the matching module, or nil if one is not found.  A group with exactly one module
+// is always considered matching.
+func findExactVariantOrSingle(module *moduleInfo, possible *moduleGroup, reverse bool) *moduleInfo {
+	found, _ := findVariant(module, possible, nil, false, reverse)
+	if found == nil {
+		for _, moduleOrAlias := range possible.modules {
+			if m := moduleOrAlias.module(); m != nil {
+				if found != nil {
+					// more than one possible match, give up
+					return nil
+				}
+				found = m
 			}
 		}
 	}
-
-	return nil
+	return found
 }
 
-func (c *Context) addDependency(module *moduleInfo, tag DependencyTag, depName string) []error {
+func (c *Context) addDependency(module *moduleInfo, tag DependencyTag, depName string) (*moduleInfo, []error) {
 	if _, ok := tag.(BaseDependencyTag); ok {
 		panic("BaseDependencyTag is not allowed to be used directly!")
 	}
 
 	if depName == module.Name() {
-		return []error{&BlueprintError{
+		return nil, []error{&BlueprintError{
 			Err: fmt.Errorf("%q depends on itself", depName),
 			Pos: module.pos,
 		}}
@@ -1552,24 +1641,24 @@
 
 	possibleDeps := c.moduleGroupFromName(depName, module.namespace())
 	if possibleDeps == nil {
-		return c.discoveredMissingDependencies(module, depName)
+		return nil, c.discoveredMissingDependencies(module, depName, nil)
 	}
 
-	if m := c.findMatchingVariant(module, possibleDeps, false); m != nil {
+	if m := findExactVariantOrSingle(module, possibleDeps, false); m != nil {
 		module.newDirectDeps = append(module.newDirectDeps, depInfo{m, tag})
 		atomic.AddUint32(&c.depsModified, 1)
-		return nil
+		return m, nil
 	}
 
 	if c.allowMissingDependencies {
 		// Allow missing variants.
-		return c.discoveredMissingDependencies(module, depName+c.prettyPrintVariant(module.dependencyVariant))
+		return nil, c.discoveredMissingDependencies(module, depName, module.variant.dependencyVariations)
 	}
 
-	return []error{&BlueprintError{
+	return nil, []error{&BlueprintError{
 		Err: fmt.Errorf("dependency %q of %q missing variant:\n  %s\navailable variants:\n  %s",
 			depName, module.Name(),
-			c.prettyPrintVariant(module.dependencyVariant),
+			c.prettyPrintVariant(module.variant.dependencyVariations),
 			c.prettyPrintGroupVariants(possibleDeps)),
 		Pos: module.pos,
 	}}
@@ -1592,41 +1681,38 @@
 		}}
 	}
 
-	if m := c.findMatchingVariant(module, possibleDeps, true); m != nil {
+	if m := findExactVariantOrSingle(module, possibleDeps, true); m != nil {
 		return m, nil
 	}
 
 	if c.allowMissingDependencies {
 		// Allow missing variants.
-		return module, c.discoveredMissingDependencies(module, destName+c.prettyPrintVariant(module.dependencyVariant))
+		return module, c.discoveredMissingDependencies(module, destName, module.variant.dependencyVariations)
 	}
 
 	return nil, []error{&BlueprintError{
 		Err: fmt.Errorf("reverse dependency %q of %q missing variant:\n  %s\navailable variants:\n  %s",
 			destName, module.Name(),
-			c.prettyPrintVariant(module.dependencyVariant),
+			c.prettyPrintVariant(module.variant.dependencyVariations),
 			c.prettyPrintGroupVariants(possibleDeps)),
 		Pos: module.pos,
 	}}
 }
 
-func (c *Context) addVariationDependency(module *moduleInfo, variations []Variation,
-	tag DependencyTag, depName string, far bool) []error {
-	if _, ok := tag.(BaseDependencyTag); ok {
-		panic("BaseDependencyTag is not allowed to be used directly!")
-	}
-
-	possibleDeps := c.moduleGroupFromName(depName, module.namespace())
-	if possibleDeps == nil {
-		return c.discoveredMissingDependencies(module, depName)
-	}
-
-	// We can't just append variant.Variant to module.dependencyVariants.variantName and
+func findVariant(module *moduleInfo, possibleDeps *moduleGroup, variations []Variation, far bool, reverse bool) (*moduleInfo, variationMap) {
+	// We can't just append variant.Variant to module.dependencyVariant.variantName and
 	// compare the strings because the result won't be in mutator registration order.
 	// Create a new map instead, and then deep compare the maps.
 	var newVariant variationMap
 	if !far {
-		newVariant = module.dependencyVariant.clone()
+		if !reverse {
+			// For forward dependency, ignore local variants by matching against
+			// dependencyVariant which doesn't have the local variants
+			newVariant = module.variant.dependencyVariations.clone()
+		} else {
+			// For reverse dependency, use all the variants
+			newVariant = module.variant.variations.clone()
+		}
 	}
 	for _, v := range variations {
 		if newVariant == nil {
@@ -1637,7 +1723,7 @@
 
 	check := func(variant variationMap) bool {
 		if far {
-			return variant.subset(newVariant)
+			return newVariant.subsetOf(variant)
 		} else {
 			return variant.equal(newVariant)
 		}
@@ -1645,27 +1731,34 @@
 
 	var foundDep *moduleInfo
 	for _, m := range possibleDeps.modules {
-		if check(m.variant) {
-			foundDep = m
+		if check(m.moduleOrAliasVariant().variations) {
+			foundDep = m.moduleOrAliasTarget()
 			break
 		}
 	}
 
-	if foundDep == nil {
-		for _, m := range possibleDeps.aliases {
-			if check(m.variant) {
-				foundDep = m.target
-				break
-			}
-		}
+	return foundDep, newVariant
+}
+
+func (c *Context) addVariationDependency(module *moduleInfo, variations []Variation,
+	tag DependencyTag, depName string, far bool) (*moduleInfo, []error) {
+	if _, ok := tag.(BaseDependencyTag); ok {
+		panic("BaseDependencyTag is not allowed to be used directly!")
 	}
 
+	possibleDeps := c.moduleGroupFromName(depName, module.namespace())
+	if possibleDeps == nil {
+		return nil, c.discoveredMissingDependencies(module, depName, nil)
+	}
+
+	foundDep, newVariant := findVariant(module, possibleDeps, variations, far, false)
+
 	if foundDep == nil {
 		if c.allowMissingDependencies {
 			// Allow missing variants.
-			return c.discoveredMissingDependencies(module, depName+c.prettyPrintVariant(newVariant))
+			return nil, c.discoveredMissingDependencies(module, depName, newVariant)
 		}
-		return []error{&BlueprintError{
+		return nil, []error{&BlueprintError{
 			Err: fmt.Errorf("dependency %q of %q missing variant:\n  %s\navailable variants:\n  %s",
 				depName, module.Name(),
 				c.prettyPrintVariant(newVariant),
@@ -1675,7 +1768,7 @@
 	}
 
 	if module == foundDep {
-		return []error{&BlueprintError{
+		return nil, []error{&BlueprintError{
 			Err: fmt.Errorf("%q depends on itself", depName),
 			Pos: module.pos,
 		}}
@@ -1684,31 +1777,33 @@
 	// that module is earlier in the module list than this one, since we always
 	// run GenerateBuildActions in order for the variants of a module
 	if foundDep.group == module.group && beforeInModuleList(module, foundDep, module.group.modules) {
-		return []error{&BlueprintError{
+		return nil, []error{&BlueprintError{
 			Err: fmt.Errorf("%q depends on later version of itself", depName),
 			Pos: module.pos,
 		}}
 	}
 	module.newDirectDeps = append(module.newDirectDeps, depInfo{foundDep, tag})
 	atomic.AddUint32(&c.depsModified, 1)
-	return nil
+	return foundDep, nil
 }
 
 func (c *Context) addInterVariantDependency(origModule *moduleInfo, tag DependencyTag,
-	from, to Module) {
+	from, to Module) *moduleInfo {
 	if _, ok := tag.(BaseDependencyTag); ok {
 		panic("BaseDependencyTag is not allowed to be used directly!")
 	}
 
 	var fromInfo, toInfo *moduleInfo
-	for _, m := range origModule.splitModules {
-		if m.logicModule == from {
-			fromInfo = m
-		}
-		if m.logicModule == to {
-			toInfo = m
-			if fromInfo != nil {
-				panic(fmt.Errorf("%q depends on later version of itself", origModule.Name()))
+	for _, moduleOrAlias := range origModule.splitModules {
+		if m := moduleOrAlias.module(); m != nil {
+			if m.logicModule == from {
+				fromInfo = m
+			}
+			if m.logicModule == to {
+				toInfo = m
+				if fromInfo != nil {
+					panic(fmt.Errorf("%q depends on later version of itself", origModule.Name()))
+				}
 			}
 		}
 	}
@@ -1720,6 +1815,7 @@
 
 	fromInfo.newDirectDeps = append(fromInfo.newDirectDeps, depInfo{toInfo, tag})
 	atomic.AddUint32(&c.depsModified, 1)
+	return toInfo
 }
 
 // findBlueprintDescendants returns a map linking parent Blueprints files to child Blueprints files
@@ -1768,7 +1864,7 @@
 	// returns the list of modules that are waiting for this module
 	propagate(module *moduleInfo) []*moduleInfo
 	// visit modules in order
-	visit(modules []*moduleInfo, visit func(*moduleInfo) bool)
+	visit(modules []*moduleInfo, visit func(*moduleInfo, chan<- pauseSpec) bool)
 }
 
 type unorderedVisitorImpl struct{}
@@ -1781,9 +1877,9 @@
 	return nil
 }
 
-func (unorderedVisitorImpl) visit(modules []*moduleInfo, visit func(*moduleInfo) bool) {
+func (unorderedVisitorImpl) visit(modules []*moduleInfo, visit func(*moduleInfo, chan<- pauseSpec) bool) {
 	for _, module := range modules {
-		if visit(module) {
+		if visit(module, nil) {
 			return
 		}
 	}
@@ -1799,9 +1895,9 @@
 	return module.reverseDeps
 }
 
-func (bottomUpVisitorImpl) visit(modules []*moduleInfo, visit func(*moduleInfo) bool) {
+func (bottomUpVisitorImpl) visit(modules []*moduleInfo, visit func(*moduleInfo, chan<- pauseSpec) bool) {
 	for _, module := range modules {
-		if visit(module) {
+		if visit(module, nil) {
 			return
 		}
 	}
@@ -1817,10 +1913,10 @@
 	return module.forwardDeps
 }
 
-func (topDownVisitorImpl) visit(modules []*moduleInfo, visit func(*moduleInfo) bool) {
+func (topDownVisitorImpl) visit(modules []*moduleInfo, visit func(*moduleInfo, chan<- pauseSpec) bool) {
 	for i := 0; i < len(modules); i++ {
 		module := modules[len(modules)-1-i]
-		if visit(module) {
+		if visit(module, nil) {
 			return
 		}
 	}
@@ -1831,25 +1927,50 @@
 	topDownVisitor  topDownVisitorImpl
 )
 
+// pauseSpec describes a pause that a module needs to occur until another module has been visited,
+// at which point the unpause channel will be closed.
+type pauseSpec struct {
+	paused  *moduleInfo
+	until   *moduleInfo
+	unpause unpause
+}
+
+type unpause chan struct{}
+
+const parallelVisitLimit = 1000
+
 // Calls visit on each module, guaranteeing that visit is not called on a module until visit on all
-// of its dependencies has finished.
-func (c *Context) parallelVisit(order visitOrderer, visit func(group *moduleInfo) bool) {
+// of its dependencies has finished.  A visit function can write a pauseSpec to the pause channel
+// to wait for another dependency to be visited.  If a visit function returns true to cancel
+// while another visitor is paused, the paused visitor will never be resumed and its goroutine
+// will stay paused forever.
+func parallelVisit(modules []*moduleInfo, order visitOrderer, limit int,
+	visit func(module *moduleInfo, pause chan<- pauseSpec) bool) []error {
+
 	doneCh := make(chan *moduleInfo)
 	cancelCh := make(chan bool)
-	count := 0
+	pauseCh := make(chan pauseSpec)
 	cancel := false
-	var backlog []*moduleInfo
-	const limit = 1000
 
-	for _, module := range c.modulesSorted {
+	var backlog []*moduleInfo      // Visitors that are ready to start but backlogged due to limit.
+	var unpauseBacklog []pauseSpec // Visitors that are ready to unpause but backlogged due to limit.
+
+	active := 0  // Number of visitors running, not counting paused visitors.
+	visited := 0 // Number of finished visitors.
+
+	pauseMap := make(map[*moduleInfo][]pauseSpec)
+
+	for _, module := range modules {
 		module.waitingCount = order.waitCount(module)
 	}
 
-	visitOne := func(module *moduleInfo) {
-		if count < limit {
-			count++
+	// Call the visitor on a module if there are fewer active visitors than the parallelism
+	// limit, otherwise add it to the backlog.
+	startOrBacklog := func(module *moduleInfo) {
+		if active < limit {
+			active++
 			go func() {
-				ret := visit(module)
+				ret := visit(module, pauseCh)
 				if ret {
 					cancelCh <- true
 				}
@@ -1860,34 +1981,195 @@
 		}
 	}
 
-	for _, module := range c.modulesSorted {
-		if module.waitingCount == 0 {
-			visitOne(module)
+	// Unpause the already-started but paused  visitor on a module if there are fewer active
+	// visitors than the parallelism limit, otherwise add it to the backlog.
+	unpauseOrBacklog := func(pauseSpec pauseSpec) {
+		if active < limit {
+			active++
+			close(pauseSpec.unpause)
+		} else {
+			unpauseBacklog = append(unpauseBacklog, pauseSpec)
 		}
 	}
 
-	for count > 0 || len(backlog) > 0 {
+	// Start any modules in the backlog up to the parallelism limit.  Unpause paused modules first
+	// since they may already be holding resources.
+	unpauseOrStartFromBacklog := func() {
+		for active < limit && len(unpauseBacklog) > 0 {
+			unpause := unpauseBacklog[0]
+			unpauseBacklog = unpauseBacklog[1:]
+			unpauseOrBacklog(unpause)
+		}
+		for active < limit && len(backlog) > 0 {
+			toVisit := backlog[0]
+			backlog = backlog[1:]
+			startOrBacklog(toVisit)
+		}
+	}
+
+	toVisit := len(modules)
+
+	// Start or backlog any modules that are not waiting for any other modules.
+	for _, module := range modules {
+		if module.waitingCount == 0 {
+			startOrBacklog(module)
+		}
+	}
+
+	for active > 0 {
 		select {
 		case <-cancelCh:
 			cancel = true
 			backlog = nil
 		case doneModule := <-doneCh:
-			count--
+			active--
 			if !cancel {
-				for count < limit && len(backlog) > 0 {
-					toVisit := backlog[0]
-					backlog = backlog[1:]
-					visitOne(toVisit)
+				// Mark this module as done.
+				doneModule.waitingCount = -1
+				visited++
+
+				// Unpause or backlog any modules that were waiting for this one.
+				if unpauses, ok := pauseMap[doneModule]; ok {
+					delete(pauseMap, doneModule)
+					for _, unpause := range unpauses {
+						unpauseOrBacklog(unpause)
+					}
 				}
+
+				// Start any backlogged modules up to limit.
+				unpauseOrStartFromBacklog()
+
+				// Decrement waitingCount on the next modules in the tree based
+				// on propagation order, and start or backlog them if they are
+				// ready to start.
 				for _, module := range order.propagate(doneModule) {
 					module.waitingCount--
 					if module.waitingCount == 0 {
-						visitOne(module)
+						startOrBacklog(module)
+					}
+				}
+			}
+		case pauseSpec := <-pauseCh:
+			if pauseSpec.until.waitingCount == -1 {
+				// Module being paused for is already finished, resume immediately.
+				close(pauseSpec.unpause)
+			} else {
+				// Register for unpausing.
+				pauseMap[pauseSpec.until] = append(pauseMap[pauseSpec.until], pauseSpec)
+
+				// Don't count paused visitors as active so that this can't deadlock
+				// if 1000 visitors are paused simultaneously.
+				active--
+				unpauseOrStartFromBacklog()
+			}
+		}
+	}
+
+	if !cancel {
+		// Invariant check: no backlogged modules, these weren't waiting on anything except
+		// the parallelism limit so they should have run.
+		if len(backlog) > 0 {
+			panic(fmt.Errorf("parallelVisit finished with %d backlogged visitors", len(backlog)))
+		}
+
+		// Invariant check: no backlogged paused modules, these weren't waiting on anything
+		// except the parallelism limit so they should have run.
+		if len(unpauseBacklog) > 0 {
+			panic(fmt.Errorf("parallelVisit finished with %d backlogged unpaused visitors", len(unpauseBacklog)))
+		}
+
+		if len(pauseMap) > 0 {
+			// Probably a deadlock due to a newly added dependency cycle. Start from each module in
+			// the order of the input modules list and perform a depth-first search for the module
+			// it is paused on, ignoring modules that are marked as done.  Note this traverses from
+			// modules to the modules that would have been unblocked when that module finished, i.e
+			// the reverse of the visitOrderer.
+
+			// In order to reduce duplicated work, once a module has been checked and determined
+			// not to be part of a cycle add it and everything that depends on it to the checked
+			// map.
+			checked := make(map[*moduleInfo]struct{})
+
+			var check func(module, end *moduleInfo) []*moduleInfo
+			check = func(module, end *moduleInfo) []*moduleInfo {
+				if module.waitingCount == -1 {
+					// This module was finished, it can't be part of a loop.
+					return nil
+				}
+				if module == end {
+					// This module is the end of the loop, start rolling up the cycle.
+					return []*moduleInfo{module}
+				}
+
+				if _, alreadyChecked := checked[module]; alreadyChecked {
+					return nil
+				}
+
+				for _, dep := range order.propagate(module) {
+					cycle := check(dep, end)
+					if cycle != nil {
+						return append([]*moduleInfo{module}, cycle...)
+					}
+				}
+				for _, depPauseSpec := range pauseMap[module] {
+					cycle := check(depPauseSpec.paused, end)
+					if cycle != nil {
+						return append([]*moduleInfo{module}, cycle...)
+					}
+				}
+
+				checked[module] = struct{}{}
+				return nil
+			}
+
+			// Iterate over the modules list instead of pauseMap to provide deterministic ordering.
+			for _, module := range modules {
+				for _, pauseSpec := range pauseMap[module] {
+					cycle := check(pauseSpec.paused, pauseSpec.until)
+					if len(cycle) > 0 {
+						return cycleError(cycle)
 					}
 				}
 			}
 		}
+
+		// Invariant check: if there was no deadlock and no cancellation every module
+		// should have been visited.
+		if visited != toVisit {
+			panic(fmt.Errorf("parallelVisit ran %d visitors, expected %d", visited, toVisit))
+		}
+
+		// Invariant check: if there was no deadlock and no cancellation  every module
+		// should have been visited, so there is nothing left to be paused on.
+		if len(pauseMap) > 0 {
+			panic(fmt.Errorf("parallelVisit finished with %d paused visitors", len(pauseMap)))
+		}
 	}
+
+	return nil
+}
+
+func cycleError(cycle []*moduleInfo) (errs []error) {
+	// The cycle list is in reverse order because all the 'check' calls append
+	// their own module to the list.
+	errs = append(errs, &BlueprintError{
+		Err: fmt.Errorf("encountered dependency cycle:"),
+		Pos: cycle[len(cycle)-1].pos,
+	})
+
+	// Iterate backwards through the cycle list.
+	curModule := cycle[0]
+	for i := len(cycle) - 1; i >= 0; i-- {
+		nextModule := cycle[i]
+		errs = append(errs, &BlueprintError{
+			Err: fmt.Errorf("    %s depends on %s",
+				curModule, nextModule),
+			Pos: curModule.pos,
+		})
+		curModule = nextModule
+	}
+
+	return errs
 }
 
 // updateDependencies recursively walks the module dependency graph and updates
@@ -1897,6 +2179,7 @@
 // it encounters dependency cycles.  This should called after resolveDependencies,
 // as well as after any mutator pass has called addDependency
 func (c *Context) updateDependencies() (errs []error) {
+	c.cachedDepsModified = true
 	visited := make(map[*moduleInfo]bool)  // modules that were already checked
 	checking := make(map[*moduleInfo]bool) // modules actively being checked
 
@@ -1904,53 +2187,37 @@
 
 	var check func(group *moduleInfo) []*moduleInfo
 
-	cycleError := func(cycle []*moduleInfo) {
-		// We are the "start" of the cycle, so we're responsible
-		// for generating the errors.  The cycle list is in
-		// reverse order because all the 'check' calls append
-		// their own module to the list.
-		errs = append(errs, &BlueprintError{
-			Err: fmt.Errorf("encountered dependency cycle:"),
-			Pos: cycle[len(cycle)-1].pos,
-		})
-
-		// Iterate backwards through the cycle list.
-		curModule := cycle[0]
-		for i := len(cycle) - 1; i >= 0; i-- {
-			nextModule := cycle[i]
-			errs = append(errs, &BlueprintError{
-				Err: fmt.Errorf("    %q depends on %q",
-					curModule.Name(),
-					nextModule.Name()),
-				Pos: curModule.pos,
-			})
-			curModule = nextModule
-		}
-	}
-
 	check = func(module *moduleInfo) []*moduleInfo {
 		visited[module] = true
 		checking[module] = true
 		defer delete(checking, module)
 
-		deps := make(map[*moduleInfo]bool)
+		// Reset the forward and reverse deps without reducing their capacity to avoid reallocation.
+		module.reverseDeps = module.reverseDeps[:0]
+		module.forwardDeps = module.forwardDeps[:0]
 
 		// Add an implicit dependency ordering on all earlier modules in the same module group
 		for _, dep := range module.group.modules {
 			if dep == module {
 				break
 			}
-			deps[dep] = true
+			if depModule := dep.module(); depModule != nil {
+				module.forwardDeps = append(module.forwardDeps, depModule)
+			}
 		}
 
+	outer:
 		for _, dep := range module.directDeps {
-			deps[dep.module] = true
+			// use a loop to check for duplicates, average number of directDeps measured to be 9.5.
+			for _, exists := range module.forwardDeps {
+				if dep.module == exists {
+					continue outer
+				}
+			}
+			module.forwardDeps = append(module.forwardDeps, dep.module)
 		}
 
-		module.reverseDeps = []*moduleInfo{}
-		module.forwardDeps = []*moduleInfo{}
-
-		for dep := range deps {
+		for _, dep := range module.forwardDeps {
 			if checking[dep] {
 				// This is a cycle.
 				return []*moduleInfo{dep, module}
@@ -1961,10 +2228,8 @@
 				if cycle != nil {
 					if cycle[0] == module {
 						// We are the "start" of the cycle, so we're responsible
-						// for generating the errors.  The cycle list is in
-						// reverse order because all the 'check' calls append
-						// their own module to the list.
-						cycleError(cycle)
+						// for generating the errors.
+						errs = append(errs, cycleError(cycle)...)
 
 						// We can continue processing this module's children to
 						// find more cycles.  Since all the modules that were
@@ -1978,7 +2243,6 @@
 				}
 			}
 
-			module.forwardDeps = append(module.forwardDeps, dep)
 			dep.reverseDeps = append(dep.reverseDeps, module)
 		}
 
@@ -1994,7 +2258,7 @@
 				if cycle[len(cycle)-1] != module {
 					panic("inconceivable!")
 				}
-				cycleError(cycle)
+				errs = append(errs, cycleError(cycle)...)
 			}
 		}
 	}
@@ -2004,6 +2268,64 @@
 	return
 }
 
+type jsonVariationMap map[string]string
+
+type jsonModuleName struct {
+	Name                 string
+	Variations           jsonVariationMap
+	DependencyVariations jsonVariationMap
+}
+
+type jsonDep struct {
+	jsonModuleName
+	Tag string
+}
+
+type jsonModule struct {
+	jsonModuleName
+	Deps      []jsonDep
+	Type      string
+	Blueprint string
+}
+
+func toJsonVariationMap(vm variationMap) jsonVariationMap {
+	return jsonVariationMap(vm)
+}
+
+func jsonModuleNameFromModuleInfo(m *moduleInfo) *jsonModuleName {
+	return &jsonModuleName{
+		Name:                 m.Name(),
+		Variations:           toJsonVariationMap(m.variant.variations),
+		DependencyVariations: toJsonVariationMap(m.variant.dependencyVariations),
+	}
+}
+
+func jsonModuleFromModuleInfo(m *moduleInfo) *jsonModule {
+	return &jsonModule{
+		jsonModuleName: *jsonModuleNameFromModuleInfo(m),
+		Deps:           make([]jsonDep, 0),
+		Type:           m.typeName,
+		Blueprint:      m.relBlueprintsFile,
+	}
+}
+
+func (c *Context) PrintJSONGraph(w io.Writer) {
+	modules := make([]*jsonModule, 0)
+	for _, m := range c.modulesSorted {
+		jm := jsonModuleFromModuleInfo(m)
+		for _, d := range m.directDeps {
+			jm.Deps = append(jm.Deps, jsonDep{
+				jsonModuleName: *jsonModuleNameFromModuleInfo(d.module),
+				Tag:            fmt.Sprintf("%T %+v", d.tag, d.tag),
+			})
+		}
+
+		modules = append(modules, jm)
+	}
+
+	json.NewEncoder(w).Encode(modules)
+}
+
 // PrepareBuildActions generates an internal representation of all the build
 // actions that need to be performed.  This process involves invoking the
 // GenerateBuildActions method on each of the Module objects created during the
@@ -2022,6 +2344,7 @@
 // by the modules and singletons via the ModuleContext.AddNinjaFileDeps(),
 // SingletonContext.AddNinjaFileDeps(), and PackageContext.AddNinjaFileDeps()
 // methods.
+
 func (c *Context) PrepareBuildActions(config interface{}) (deps []string, errs []error) {
 	pprof.Do(c.Context, pprof.Labels("blueprint", "PrepareBuildActions"), func(ctx context.Context) {
 		c.buildActionsReady = false
@@ -2062,6 +2385,8 @@
 
 		deps = append(deps, depsPackages...)
 
+		c.memoizeFullNames(c.liveGlobals, pkgNames)
+
 		// This will panic if it finds a problem since it's a programming error.
 		c.checkForVariableReferenceCycles(c.liveGlobals.variables, pkgNames)
 
@@ -2182,12 +2507,12 @@
 
 	errsCh := make(chan []error)
 	globalStateCh := make(chan globalStateChange)
-	newVariationsCh := make(chan []*moduleInfo)
+	newVariationsCh := make(chan modulesOrAliases)
 	done := make(chan bool)
 
 	c.depsModified = 0
 
-	visit := func(module *moduleInfo) bool {
+	visit := func(module *moduleInfo, pause chan<- pauseSpec) bool {
 		if module.splitModules != nil {
 			panic("split module found in sorted module list")
 		}
@@ -2198,9 +2523,12 @@
 				config:  config,
 				module:  module,
 			},
-			name: mutator.name,
+			name:    mutator.name,
+			pauseCh: pause,
 		}
 
+		module.startedMutator = mutator
+
 		func() {
 			defer func() {
 				if r := recover(); r != nil {
@@ -2216,6 +2544,8 @@
 			direction.run(mutator, mctx)
 		}()
 
+		module.finishedMutator = mutator
+
 		if len(mctx.errs) > 0 {
 			errsCh <- mctx.errs
 			return true
@@ -2253,8 +2583,10 @@
 				newModules = append(newModules, globalStateChange.newModules...)
 				deps = append(deps, globalStateChange.deps...)
 			case newVariations := <-newVariationsCh:
-				for _, m := range newVariations {
-					newModuleInfo[m.logicModule] = m
+				for _, moduleOrAlias := range newVariations {
+					if m := moduleOrAlias.module(); m != nil {
+						newModuleInfo[m.logicModule] = m
+					}
 				}
 			case <-done:
 				return
@@ -2262,12 +2594,21 @@
 		}
 	}()
 
+	c.startedMutator = mutator
+
+	var visitErrs []error
 	if mutator.parallel {
-		c.parallelVisit(direction.orderer(), visit)
+		visitErrs = parallelVisit(c.modulesSorted, direction.orderer(), parallelVisitLimit, visit)
 	} else {
 		direction.orderer().visit(c.modulesSorted, visit)
 	}
 
+	if len(visitErrs) > 0 {
+		return nil, visitErrs
+	}
+
+	c.finishedMutators[mutator] = true
+
 	done <- true
 
 	if len(errs) > 0 {
@@ -2278,33 +2619,27 @@
 
 	for _, group := range c.moduleGroups {
 		for i := 0; i < len(group.modules); i++ {
-			module := group.modules[i]
+			module := group.modules[i].module()
+			if module == nil {
+				// Existing alias, skip it
+				continue
+			}
 
 			// Update module group to contain newly split variants
 			if module.splitModules != nil {
 				group.modules, i = spliceModules(group.modules, i, module.splitModules)
 			}
 
-			// Create any new aliases.
-			if module.aliasTarget != nil {
-				group.aliases = append(group.aliases, &moduleAlias{
-					variantName:       module.variantName,
-					variant:           module.variant,
-					dependencyVariant: module.dependencyVariant,
-					target:            module.aliasTarget,
-				})
-			}
-
 			// Fix up any remaining dependencies on modules that were split into variants
 			// by replacing them with the first variant
 			for j, dep := range module.directDeps {
 				if dep.module.logicModule == nil {
-					module.directDeps[j].module = dep.module.splitModules[0]
+					module.directDeps[j].module = dep.module.splitModules.firstModule()
 				}
 			}
 
 			if module.createdBy != nil && module.createdBy.logicModule == nil {
-				module.createdBy = module.createdBy.splitModules[0]
+				module.createdBy = module.createdBy.splitModules.firstModule()
 			}
 
 			// Add in any new direct dependencies that were added by the mutator
@@ -2312,17 +2647,31 @@
 			module.newDirectDeps = nil
 		}
 
-		// Forward or delete any dangling aliases.
-		for i := 0; i < len(group.aliases); i++ {
-			alias := group.aliases[i]
+		findAliasTarget := func(variant variant) *moduleInfo {
+			for _, moduleOrAlias := range group.modules {
+				if alias := moduleOrAlias.alias(); alias != nil {
+					if alias.variant.variations.equal(variant.variations) {
+						return alias.target
+					}
+				}
+			}
+			return nil
+		}
 
-			if alias.target.logicModule == nil {
-				if alias.target.aliasTarget != nil {
-					alias.target = alias.target.aliasTarget
-				} else {
-					// The alias was left dangling, remove it.
-					group.aliases = append(group.aliases[:i], group.aliases[i+1:]...)
-					i--
+		// Forward or delete any dangling aliases.
+		// Use a manual loop instead of range because len(group.modules) can
+		// change inside the loop
+		for i := 0; i < len(group.modules); i++ {
+			if alias := group.modules[i].alias(); alias != nil {
+				if alias.target.logicModule == nil {
+					newTarget := findAliasTarget(alias.target.variant)
+					if newTarget != nil {
+						alias.target = newTarget
+					} else {
+						// The alias was left dangling, remove it.
+						group.modules = append(group.modules[:i], group.modules[i+1:]...)
+						i--
+					}
 				}
 			}
 		}
@@ -2374,12 +2723,16 @@
 	ch := make(chan update)
 	doneCh := make(chan bool)
 	go func() {
-		c.parallelVisit(unorderedVisitorImpl{}, func(m *moduleInfo) bool {
-			origLogicModule := m.logicModule
-			m.logicModule, m.properties = c.cloneLogicModule(m)
-			ch <- update{origLogicModule, m}
-			return false
-		})
+		errs := parallelVisit(c.modulesSorted, unorderedVisitorImpl{}, parallelVisitLimit,
+			func(m *moduleInfo, pause chan<- pauseSpec) bool {
+				origLogicModule := m.logicModule
+				m.logicModule, m.properties = c.cloneLogicModule(m)
+				ch <- update{origLogicModule, m}
+				return false
+			})
+		if len(errs) > 0 {
+			panic(errs)
+		}
 		doneCh <- true
 	}()
 
@@ -2397,15 +2750,15 @@
 
 // Removes modules[i] from the list and inserts newModules... where it was located, returning
 // the new slice and the index of the last inserted element
-func spliceModules(modules []*moduleInfo, i int, newModules []*moduleInfo) ([]*moduleInfo, int) {
+func spliceModules(modules modulesOrAliases, i int, newModules modulesOrAliases) (modulesOrAliases, int) {
 	spliceSize := len(newModules)
 	newLen := len(modules) + spliceSize - 1
-	var dest []*moduleInfo
+	var dest modulesOrAliases
 	if cap(modules) >= len(modules)-1+len(newModules) {
 		// We can fit the splice in the existing capacity, do everything in place
 		dest = modules[:newLen]
 	} else {
-		dest = make([]*moduleInfo, newLen)
+		dest = make(modulesOrAliases, newLen)
 		copy(dest, modules[:i])
 	}
 
@@ -2443,71 +2796,77 @@
 		}
 	}()
 
-	c.parallelVisit(bottomUpVisitor, func(module *moduleInfo) bool {
+	visitErrs := parallelVisit(c.modulesSorted, bottomUpVisitor, parallelVisitLimit,
+		func(module *moduleInfo, pause chan<- pauseSpec) bool {
+			uniqueName := c.nameInterface.UniqueName(newNamespaceContext(module), module.group.name)
+			sanitizedName := toNinjaName(uniqueName)
 
-		uniqueName := c.nameInterface.UniqueName(newNamespaceContext(module), module.group.name)
-		sanitizedName := toNinjaName(uniqueName)
+			prefix := moduleNamespacePrefix(sanitizedName + "_" + module.variant.name)
 
-		prefix := moduleNamespacePrefix(sanitizedName + "_" + module.variantName)
+			// The parent scope of the moduleContext's local scope gets overridden to be that of the
+			// calling Go package on a per-call basis.  Since the initial parent scope doesn't matter we
+			// just set it to nil.
+			scope := newLocalScope(nil, prefix)
 
-		// The parent scope of the moduleContext's local scope gets overridden to be that of the
-		// calling Go package on a per-call basis.  Since the initial parent scope doesn't matter we
-		// just set it to nil.
-		scope := newLocalScope(nil, prefix)
-
-		mctx := &moduleContext{
-			baseModuleContext: baseModuleContext{
-				context: c,
-				config:  config,
-				module:  module,
-			},
-			scope:              scope,
-			handledMissingDeps: module.missingDeps == nil,
-		}
-
-		func() {
-			defer func() {
-				if r := recover(); r != nil {
-					in := fmt.Sprintf("GenerateBuildActions for %s", module)
-					if err, ok := r.(panicError); ok {
-						err.addIn(in)
-						mctx.error(err)
-					} else {
-						mctx.error(newPanicErrorf(r, in))
-					}
-				}
-			}()
-			mctx.module.logicModule.GenerateBuildActions(mctx)
-		}()
-
-		if len(mctx.errs) > 0 {
-			errsCh <- mctx.errs
-			return true
-		}
-
-		if module.missingDeps != nil && !mctx.handledMissingDeps {
-			var errs []error
-			for _, depName := range module.missingDeps {
-				errs = append(errs, c.missingDependencyError(module, depName))
+			mctx := &moduleContext{
+				baseModuleContext: baseModuleContext{
+					context: c,
+					config:  config,
+					module:  module,
+				},
+				scope:              scope,
+				handledMissingDeps: module.missingDeps == nil,
 			}
-			errsCh <- errs
-			return true
-		}
 
-		depsCh <- mctx.ninjaFileDeps
+			mctx.module.startedGenerateBuildActions = true
 
-		newErrs := c.processLocalBuildActions(&module.actionDefs,
-			&mctx.actionDefs, liveGlobals)
-		if len(newErrs) > 0 {
-			errsCh <- newErrs
-			return true
-		}
-		return false
-	})
+			func() {
+				defer func() {
+					if r := recover(); r != nil {
+						in := fmt.Sprintf("GenerateBuildActions for %s", module)
+						if err, ok := r.(panicError); ok {
+							err.addIn(in)
+							mctx.error(err)
+						} else {
+							mctx.error(newPanicErrorf(r, in))
+						}
+					}
+				}()
+				mctx.module.logicModule.GenerateBuildActions(mctx)
+			}()
+
+			mctx.module.finishedGenerateBuildActions = true
+
+			if len(mctx.errs) > 0 {
+				errsCh <- mctx.errs
+				return true
+			}
+
+			if module.missingDeps != nil && !mctx.handledMissingDeps {
+				var errs []error
+				for _, depName := range module.missingDeps {
+					errs = append(errs, c.missingDependencyError(module, depName))
+				}
+				errsCh <- errs
+				return true
+			}
+
+			depsCh <- mctx.ninjaFileDeps
+
+			newErrs := c.processLocalBuildActions(&module.actionDefs,
+				&mctx.actionDefs, liveGlobals)
+			if len(newErrs) > 0 {
+				errsCh <- newErrs
+				return true
+			}
+			return false
+		})
 
 	cancelCh <- struct{}{}
 	<-cancelCh
 
+	errs = append(errs, visitErrs...)
+
 	return deps, errs
 }
 
@@ -2645,7 +3004,8 @@
 }
 
 type replace struct {
-	from, to *moduleInfo
+	from, to  *moduleInfo
+	predicate ReplaceDependencyPredicate
 }
 
 type rename struct {
@@ -2661,14 +3021,8 @@
 	}
 
 	for _, m := range group.modules {
-		if module.variantName == m.variantName {
-			return m
-		}
-	}
-
-	for _, m := range group.aliases {
-		if module.variantName == m.variantName {
-			return m.target
+		if module.variant.name == m.moduleOrAliasVariant().name {
+			return m.moduleOrAliasTarget()
 		}
 	}
 
@@ -2691,22 +3045,32 @@
 
 func (c *Context) handleReplacements(replacements []replace) []error {
 	var errs []error
+	changedDeps := false
 	for _, replace := range replacements {
 		for _, m := range replace.from.reverseDeps {
 			for i, d := range m.directDeps {
 				if d.module == replace.from {
-					m.directDeps[i].module = replace.to
+					// If the replacement has a predicate then check it.
+					if replace.predicate == nil || replace.predicate(m.logicModule, d.tag, d.module.logicModule) {
+						m.directDeps[i].module = replace.to
+						changedDeps = true
+					}
 				}
 			}
 		}
 
-		atomic.AddUint32(&c.depsModified, 1)
 	}
 
+	if changedDeps {
+		atomic.AddUint32(&c.depsModified, 1)
+	}
 	return errs
 }
 
-func (c *Context) discoveredMissingDependencies(module *moduleInfo, depName string) (errs []error) {
+func (c *Context) discoveredMissingDependencies(module *moduleInfo, depName string, depVariations variationMap) (errs []error) {
+	if depVariations != nil {
+		depName = depName + "{" + c.prettyPrintVariant(depVariations) + "}"
+	}
 	if c.allowMissingDependencies {
 		module.missingDeps = append(module.missingDeps, depName)
 		return nil
@@ -2732,7 +3096,7 @@
 }
 
 func (c *Context) sortedModuleGroups() []*moduleGroup {
-	if c.cachedSortedModuleGroups == nil {
+	if c.cachedSortedModuleGroups == nil || c.cachedDepsModified {
 		unwrap := func(wrappers []ModuleGroup) []*moduleGroup {
 			result := make([]*moduleGroup, 0, len(wrappers))
 			for _, group := range wrappers {
@@ -2742,6 +3106,7 @@
 		}
 
 		c.cachedSortedModuleGroups = unwrap(c.nameInterface.AllModules())
+		c.cachedDepsModified = false
 	}
 
 	return c.cachedSortedModuleGroups
@@ -2758,8 +3123,10 @@
 	}()
 
 	for _, moduleGroup := range c.sortedModuleGroups() {
-		for _, module = range moduleGroup.modules {
-			visit(module.logicModule)
+		for _, moduleOrAlias := range moduleGroup.modules {
+			if module = moduleOrAlias.module(); module != nil {
+				visit(module.logicModule)
+			}
 		}
 	}
 }
@@ -2777,9 +3144,11 @@
 	}()
 
 	for _, moduleGroup := range c.sortedModuleGroups() {
-		for _, module := range moduleGroup.modules {
-			if pred(module.logicModule) {
-				visit(module.logicModule)
+		for _, moduleOrAlias := range moduleGroup.modules {
+			if module = moduleOrAlias.module(); module != nil {
+				if pred(module.logicModule) {
+					visit(module.logicModule)
+				}
 			}
 		}
 	}
@@ -2797,8 +3166,10 @@
 		}
 	}()
 
-	for _, variant = range module.group.modules {
-		visit(variant.logicModule)
+	for _, moduleOrAlias := range module.group.modules {
+		if variant = moduleOrAlias.module(); variant != nil {
+			visit(variant.logicModule)
+		}
 	}
 }
 
@@ -2880,6 +3251,21 @@
 	return pkgNames, deps
 }
 
+// memoizeFullNames stores the full name of each live global variable, rule and pool since each is
+// guaranteed to be used at least twice, once in the definition and once for each usage, and many
+// are used much more than once.
+func (c *Context) memoizeFullNames(liveGlobals *liveTracker, pkgNames map[*packageContext]string) {
+	for v := range liveGlobals.variables {
+		v.memoizeFullName(pkgNames)
+	}
+	for r := range liveGlobals.rules {
+		r.memoizeFullName(pkgNames)
+	}
+	for p := range liveGlobals.pools {
+		p.memoizeFullName(pkgNames)
+	}
+}
+
 func (c *Context) checkForVariableReferenceCycles(
 	variables map[Variable]ninjaString, pkgNames map[*packageContext]string) {
 
@@ -3026,18 +3412,13 @@
 	return module.Name()
 }
 
-func (c *Context) ModulePath(logicModule Module) string {
-	module := c.moduleInfo[logicModule]
-	return module.relBlueprintsFile
-}
-
 func (c *Context) ModuleDir(logicModule Module) string {
-	return filepath.Dir(c.ModulePath(logicModule))
+	return filepath.Dir(c.BlueprintFile(logicModule))
 }
 
 func (c *Context) ModuleSubDir(logicModule Module) string {
 	module := c.moduleInfo[logicModule]
-	return module.variantName
+	return module.variant.name
 }
 
 func (c *Context) ModuleType(logicModule Module) string {
@@ -3045,6 +3426,25 @@
 	return module.typeName
 }
 
+// ModuleProvider returns the value, if any, for the provider for a module.  If the value for the
+// provider was not set it returns the zero value of the type of the provider, which means the
+// return value can always be type-asserted to the type of the provider.  The return value should
+// always be considered read-only.  It panics if called before the appropriate mutator or
+// GenerateBuildActions pass for the provider on the module.  The value returned may be a deep
+// copy of the value originally passed to SetProvider.
+func (c *Context) ModuleProvider(logicModule Module, provider ProviderKey) interface{} {
+	module := c.moduleInfo[logicModule]
+	value, _ := c.provider(module, provider)
+	return value
+}
+
+// ModuleHasProvider returns true if the provider for the given module has been set.
+func (c *Context) ModuleHasProvider(logicModule Module, provider ProviderKey) bool {
+	module := c.moduleInfo[logicModule]
+	_, ok := c.provider(module, provider)
+	return ok
+}
+
 func (c *Context) BlueprintFile(logicModule Module) string {
 	module := c.moduleInfo[logicModule]
 	return module.relBlueprintsFile
@@ -3147,12 +3547,11 @@
 }
 
 func (c *Context) PrimaryModule(module Module) Module {
-	return c.moduleInfo[module].group.modules[0].logicModule
+	return c.moduleInfo[module].group.modules.firstModule().logicModule
 }
 
 func (c *Context) FinalModule(module Module) Module {
-	modules := c.moduleInfo[module].group.modules
-	return modules[len(modules)-1].logicModule
+	return c.moduleInfo[module].group.modules.lastModule().logicModule
 }
 
 func (c *Context) VisitAllModuleVariants(module Module,
@@ -3183,7 +3582,7 @@
 // WriteBuildFile writes the Ninja manifeset text for the generated build
 // actions to w.  If this is called before PrepareBuildActions successfully
 // completes then ErrBuildActionsNotReady is returned.
-func (c *Context) WriteBuildFile(w io.Writer) error {
+func (c *Context) WriteBuildFile(w io.StringWriter) error {
 	var err error
 	pprof.Do(c.Context, pprof.Labels("blueprint", "WriteBuildFile"), func(ctx context.Context) {
 		if !c.buildActionsReady {
@@ -3483,8 +3882,8 @@
 	iName := s[i].module.Name()
 	jName := s[j].module.Name()
 	if iName == jName {
-		iName = s[i].module.variantName
-		jName = s[j].module.variantName
+		iName = s[i].module.variant.name
+		jName = s[j].module.variant.name
 	}
 	return iName < jName
 }
@@ -3508,14 +3907,17 @@
 	iName := s.nameInterface.UniqueName(newNamespaceContext(iMod), iMod.group.name)
 	jName := s.nameInterface.UniqueName(newNamespaceContext(jMod), jMod.group.name)
 	if iName == jName {
-		iName = s.modules[i].variantName
-		jName = s.modules[j].variantName
+		iVariantName := s.modules[i].variant.name
+		jVariantName := s.modules[j].variant.name
+		if iVariantName == jVariantName {
+			panic(fmt.Sprintf("duplicate module name: %s %s: %#v and %#v\n",
+				iName, iVariantName, iMod.variant.variations, jMod.variant.variations))
+		} else {
+			return iVariantName < jVariantName
+		}
+	} else {
+		return iName < jName
 	}
-
-	if iName == jName {
-		panic(fmt.Sprintf("duplicate module name: %s: %#v and %#v\n", iName, iMod, jMod))
-	}
-	return iName < jName
 }
 
 func (s moduleSorter) Swap(i, j int) {
@@ -3560,7 +3962,7 @@
 			"typeName":  module.typeName,
 			"goFactory": factoryName,
 			"pos":       relPos,
-			"variant":   module.variantName,
+			"variant":   module.variant.name,
 		}
 		err = headerTemplate.Execute(buf, infoMap)
 		if err != nil {
@@ -3709,15 +4111,15 @@
 	return nil
 }
 
-func beforeInModuleList(a, b *moduleInfo, list []*moduleInfo) bool {
+func beforeInModuleList(a, b *moduleInfo, list modulesOrAliases) bool {
 	found := false
 	if a == b {
 		return false
 	}
 	for _, l := range list {
-		if l == a {
+		if l.module() == a {
 			found = true
-		} else if l == b {
+		} else if l.module() == b {
 			return found
 		}
 	}
diff --git a/context_test.go b/context_test.go
index 0541c06..d91b89d 100644
--- a/context_test.go
+++ b/context_test.go
@@ -238,7 +238,7 @@
 		t.FailNow()
 	}
 
-	topModule := ctx.moduleGroupFromName("A", nil).modules[0]
+	topModule := ctx.moduleGroupFromName("A", nil).modules.firstModule()
 	outputDown, outputUp := walkDependencyGraph(ctx, topModule, false)
 	if outputDown != "BCEFG" {
 		t.Errorf("unexpected walkDeps behaviour: %s\ndown should be: BCEFG", outputDown)
@@ -319,7 +319,7 @@
 		t.FailNow()
 	}
 
-	topModule := ctx.moduleGroupFromName("A", nil).modules[0]
+	topModule := ctx.moduleGroupFromName("A", nil).modules.firstModule()
 	outputDown, outputUp := walkDependencyGraph(ctx, topModule, true)
 	if outputDown != "BCEGHFGG" {
 		t.Errorf("unexpected walkDeps behaviour: %s\ndown should be: BCEGHFGG", outputDown)
@@ -386,7 +386,7 @@
 		t.FailNow()
 	}
 
-	topModule := ctx.moduleGroupFromName("A", nil).modules[0]
+	topModule := ctx.moduleGroupFromName("A", nil).modules.firstModule()
 	outputDown, outputUp := walkDependencyGraph(ctx, topModule, true)
 	expectedDown := "BDCDE"
 	if outputDown != expectedDown {
@@ -432,10 +432,10 @@
 		t.FailNow()
 	}
 
-	a := ctx.moduleGroupFromName("A", nil).modules[0].logicModule.(*fooModule)
-	b := ctx.moduleGroupFromName("B", nil).modules[0].logicModule.(*barModule)
-	c := ctx.moduleGroupFromName("C", nil).modules[0].logicModule.(*barModule)
-	d := ctx.moduleGroupFromName("D", nil).modules[0].logicModule.(*fooModule)
+	a := ctx.moduleGroupFromName("A", nil).modules.firstModule().logicModule.(*fooModule)
+	b := ctx.moduleGroupFromName("B", nil).modules.firstModule().logicModule.(*barModule)
+	c := ctx.moduleGroupFromName("C", nil).modules.firstModule().logicModule.(*barModule)
+	d := ctx.moduleGroupFromName("D", nil).modules.firstModule().logicModule.(*fooModule)
 
 	checkDeps := func(m Module, expected string) {
 		var deps []string
@@ -606,3 +606,481 @@
 		t.Errorf("Incorrect errors; expected:\n%s\ngot:\n%s", expectedErrs, errs)
 	}
 }
+
+func Test_findVariant(t *testing.T) {
+	module := &moduleInfo{
+		variant: variant{
+			name: "normal_local",
+			variations: variationMap{
+				"normal": "normal",
+				"local":  "local",
+			},
+			dependencyVariations: variationMap{
+				"normal": "normal",
+			},
+		},
+	}
+
+	type alias struct {
+		variant variant
+		target  int
+	}
+
+	makeDependencyGroup := func(in ...interface{}) *moduleGroup {
+		group := &moduleGroup{
+			name: "dep",
+		}
+		for _, x := range in {
+			switch m := x.(type) {
+			case *moduleInfo:
+				m.group = group
+				group.modules = append(group.modules, m)
+			case alias:
+				// aliases may need to target modules that haven't been processed
+				// yet, put an empty alias in for now.
+				group.modules = append(group.modules, nil)
+			default:
+				t.Fatalf("unexpected type %T", x)
+			}
+		}
+
+		for i, x := range in {
+			switch m := x.(type) {
+			case *moduleInfo:
+				// already added in the first pass
+			case alias:
+				group.modules[i] = &moduleAlias{
+					variant: m.variant,
+					target:  group.modules[m.target].moduleOrAliasTarget(),
+				}
+			default:
+				t.Fatalf("unexpected type %T", x)
+			}
+		}
+
+		return group
+	}
+
+	tests := []struct {
+		name         string
+		possibleDeps *moduleGroup
+		variations   []Variation
+		far          bool
+		reverse      bool
+		want         string
+	}{
+		{
+			name: "AddVariationDependencies(nil)",
+			// A dependency that matches the non-local variations of the module
+			possibleDeps: makeDependencyGroup(
+				&moduleInfo{
+					variant: variant{
+						name: "normal",
+						variations: variationMap{
+							"normal": "normal",
+						},
+					},
+				},
+			),
+			variations: nil,
+			far:        false,
+			reverse:    false,
+			want:       "normal",
+		},
+		{
+			name: "AddVariationDependencies(nil) to alias",
+			// A dependency with an alias that matches the non-local variations of the module
+			possibleDeps: makeDependencyGroup(
+				alias{
+					variant: variant{
+						name: "normal",
+						variations: variationMap{
+							"normal": "normal",
+						},
+					},
+					target: 1,
+				},
+				&moduleInfo{
+					variant: variant{
+						name: "normal_a",
+						variations: variationMap{
+							"normal": "normal",
+							"a":      "a",
+						},
+					},
+				},
+			),
+			variations: nil,
+			far:        false,
+			reverse:    false,
+			want:       "normal_a",
+		},
+		{
+			name: "AddVariationDependencies(a)",
+			// A dependency with local variations
+			possibleDeps: makeDependencyGroup(
+				&moduleInfo{
+					variant: variant{
+						name: "normal_a",
+						variations: variationMap{
+							"normal": "normal",
+							"a":      "a",
+						},
+					},
+				},
+			),
+			variations: []Variation{{"a", "a"}},
+			far:        false,
+			reverse:    false,
+			want:       "normal_a",
+		},
+		{
+			name: "AddFarVariationDependencies(far)",
+			// A dependency with far variations
+			possibleDeps: makeDependencyGroup(
+				&moduleInfo{
+					variant: variant{
+						name:       "",
+						variations: nil,
+					},
+				},
+				&moduleInfo{
+					variant: variant{
+						name: "far",
+						variations: variationMap{
+							"far": "far",
+						},
+					},
+				},
+			),
+			variations: []Variation{{"far", "far"}},
+			far:        true,
+			reverse:    false,
+			want:       "far",
+		},
+		{
+			name: "AddFarVariationDependencies(far) to alias",
+			// A dependency with far variations and aliases
+			possibleDeps: makeDependencyGroup(
+				alias{
+					variant: variant{
+						name: "far",
+						variations: variationMap{
+							"far": "far",
+						},
+					},
+					target: 2,
+				},
+				&moduleInfo{
+					variant: variant{
+						name: "far_a",
+						variations: variationMap{
+							"far": "far",
+							"a":   "a",
+						},
+					},
+				},
+				&moduleInfo{
+					variant: variant{
+						name: "far_b",
+						variations: variationMap{
+							"far": "far",
+							"b":   "b",
+						},
+					},
+				},
+			),
+			variations: []Variation{{"far", "far"}},
+			far:        true,
+			reverse:    false,
+			want:       "far_b",
+		},
+		{
+			name: "AddFarVariationDependencies(far, b) to missing",
+			// A dependency with far variations and aliases
+			possibleDeps: makeDependencyGroup(
+				alias{
+					variant: variant{
+						name: "far",
+						variations: variationMap{
+							"far": "far",
+						},
+					},
+					target: 1,
+				},
+				&moduleInfo{
+					variant: variant{
+						name: "far_a",
+						variations: variationMap{
+							"far": "far",
+							"a":   "a",
+						},
+					},
+				},
+			),
+			variations: []Variation{{"far", "far"}, {"a", "b"}},
+			far:        true,
+			reverse:    false,
+			want:       "nil",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, _ := findVariant(module, tt.possibleDeps, tt.variations, tt.far, tt.reverse)
+			if g, w := got == nil, tt.want == "nil"; g != w {
+				t.Fatalf("findVariant() got = %v, want %v", got, tt.want)
+			}
+			if got != nil {
+				if g, w := got.String(), fmt.Sprintf("module %q variant %q", "dep", tt.want); g != w {
+					t.Errorf("findVariant() got = %v, want %v", g, w)
+				}
+			}
+		})
+	}
+}
+
+func Test_parallelVisit(t *testing.T) {
+	addDep := func(from, to *moduleInfo) {
+		from.directDeps = append(from.directDeps, depInfo{to, nil})
+		from.forwardDeps = append(from.forwardDeps, to)
+		to.reverseDeps = append(to.reverseDeps, from)
+	}
+
+	create := func(name string) *moduleInfo {
+		m := &moduleInfo{
+			group: &moduleGroup{
+				name: name,
+			},
+		}
+		m.group.modules = modulesOrAliases{m}
+		return m
+	}
+	moduleA := create("A")
+	moduleB := create("B")
+	moduleC := create("C")
+	moduleD := create("D")
+	moduleE := create("E")
+	moduleF := create("F")
+	moduleG := create("G")
+
+	// A depends on B, B depends on C.  Nothing depends on D through G, and they don't depend on
+	// anything.
+	addDep(moduleA, moduleB)
+	addDep(moduleB, moduleC)
+
+	t.Run("no modules", func(t *testing.T) {
+		errs := parallelVisit(nil, bottomUpVisitorImpl{}, 1,
+			func(module *moduleInfo, pause chan<- pauseSpec) bool {
+				panic("unexpected call to visitor")
+			})
+		if errs != nil {
+			t.Errorf("expected no errors, got %q", errs)
+		}
+	})
+	t.Run("bottom up", func(t *testing.T) {
+		order := ""
+		errs := parallelVisit([]*moduleInfo{moduleA, moduleB, moduleC}, bottomUpVisitorImpl{}, 1,
+			func(module *moduleInfo, pause chan<- pauseSpec) bool {
+				order += module.group.name
+				return false
+			})
+		if errs != nil {
+			t.Errorf("expected no errors, got %q", errs)
+		}
+		if g, w := order, "CBA"; g != w {
+			t.Errorf("expected order %q, got %q", w, g)
+		}
+	})
+	t.Run("pause", func(t *testing.T) {
+		order := ""
+		errs := parallelVisit([]*moduleInfo{moduleA, moduleB, moduleC, moduleD}, bottomUpVisitorImpl{}, 1,
+			func(module *moduleInfo, pause chan<- pauseSpec) bool {
+				if module == moduleC {
+					// Pause module C on module D
+					unpause := make(chan struct{})
+					pause <- pauseSpec{moduleC, moduleD, unpause}
+					<-unpause
+				}
+				order += module.group.name
+				return false
+			})
+		if errs != nil {
+			t.Errorf("expected no errors, got %q", errs)
+		}
+		if g, w := order, "DCBA"; g != w {
+			t.Errorf("expected order %q, got %q", w, g)
+		}
+	})
+	t.Run("cancel", func(t *testing.T) {
+		order := ""
+		errs := parallelVisit([]*moduleInfo{moduleA, moduleB, moduleC}, bottomUpVisitorImpl{}, 1,
+			func(module *moduleInfo, pause chan<- pauseSpec) bool {
+				order += module.group.name
+				// Cancel in module B
+				return module == moduleB
+			})
+		if errs != nil {
+			t.Errorf("expected no errors, got %q", errs)
+		}
+		if g, w := order, "CB"; g != w {
+			t.Errorf("expected order %q, got %q", w, g)
+		}
+	})
+	t.Run("pause and cancel", func(t *testing.T) {
+		order := ""
+		errs := parallelVisit([]*moduleInfo{moduleA, moduleB, moduleC, moduleD}, bottomUpVisitorImpl{}, 1,
+			func(module *moduleInfo, pause chan<- pauseSpec) bool {
+				if module == moduleC {
+					// Pause module C on module D
+					unpause := make(chan struct{})
+					pause <- pauseSpec{moduleC, moduleD, unpause}
+					<-unpause
+				}
+				order += module.group.name
+				// Cancel in module D
+				return module == moduleD
+			})
+		if errs != nil {
+			t.Errorf("expected no errors, got %q", errs)
+		}
+		if g, w := order, "D"; g != w {
+			t.Errorf("expected order %q, got %q", w, g)
+		}
+	})
+	t.Run("parallel", func(t *testing.T) {
+		order := ""
+		errs := parallelVisit([]*moduleInfo{moduleA, moduleB, moduleC}, bottomUpVisitorImpl{}, 3,
+			func(module *moduleInfo, pause chan<- pauseSpec) bool {
+				order += module.group.name
+				return false
+			})
+		if errs != nil {
+			t.Errorf("expected no errors, got %q", errs)
+		}
+		if g, w := order, "CBA"; g != w {
+			t.Errorf("expected order %q, got %q", w, g)
+		}
+	})
+	t.Run("pause existing", func(t *testing.T) {
+		order := ""
+		errs := parallelVisit([]*moduleInfo{moduleA, moduleB, moduleC}, bottomUpVisitorImpl{}, 3,
+			func(module *moduleInfo, pause chan<- pauseSpec) bool {
+				if module == moduleA {
+					// Pause module A on module B (an existing dependency)
+					unpause := make(chan struct{})
+					pause <- pauseSpec{moduleA, moduleB, unpause}
+					<-unpause
+				}
+				order += module.group.name
+				return false
+			})
+		if errs != nil {
+			t.Errorf("expected no errors, got %q", errs)
+		}
+		if g, w := order, "CBA"; g != w {
+			t.Errorf("expected order %q, got %q", w, g)
+		}
+	})
+	t.Run("cycle", func(t *testing.T) {
+		errs := parallelVisit([]*moduleInfo{moduleA, moduleB, moduleC}, bottomUpVisitorImpl{}, 3,
+			func(module *moduleInfo, pause chan<- pauseSpec) bool {
+				if module == moduleC {
+					// Pause module C on module A (a dependency cycle)
+					unpause := make(chan struct{})
+					pause <- pauseSpec{moduleC, moduleA, unpause}
+					<-unpause
+				}
+				return false
+			})
+		want := []string{
+			`encountered dependency cycle`,
+			`module "C" depends on module "A"`,
+			`module "A" depends on module "B"`,
+			`module "B" depends on module "C"`,
+		}
+		for i := range want {
+			if len(errs) <= i {
+				t.Errorf("missing error %s", want[i])
+			} else if !strings.Contains(errs[i].Error(), want[i]) {
+				t.Errorf("expected error %s, got %s", want[i], errs[i])
+			}
+		}
+		if len(errs) > len(want) {
+			for _, err := range errs[len(want):] {
+				t.Errorf("unexpected error %s", err.Error())
+			}
+		}
+	})
+	t.Run("pause cycle", func(t *testing.T) {
+		errs := parallelVisit([]*moduleInfo{moduleA, moduleB, moduleC, moduleD}, bottomUpVisitorImpl{}, 3,
+			func(module *moduleInfo, pause chan<- pauseSpec) bool {
+				if module == moduleC {
+					// Pause module C on module D
+					unpause := make(chan struct{})
+					pause <- pauseSpec{moduleC, moduleD, unpause}
+					<-unpause
+				}
+				if module == moduleD {
+					// Pause module D on module C (a pause cycle)
+					unpause := make(chan struct{})
+					pause <- pauseSpec{moduleD, moduleC, unpause}
+					<-unpause
+				}
+				return false
+			})
+		want := []string{
+			`encountered dependency cycle`,
+			`module "D" depends on module "C"`,
+			`module "C" depends on module "D"`,
+		}
+		for i := range want {
+			if len(errs) <= i {
+				t.Errorf("missing error %s", want[i])
+			} else if !strings.Contains(errs[i].Error(), want[i]) {
+				t.Errorf("expected error %s, got %s", want[i], errs[i])
+			}
+		}
+		if len(errs) > len(want) {
+			for _, err := range errs[len(want):] {
+				t.Errorf("unexpected error %s", err.Error())
+			}
+		}
+	})
+	t.Run("pause cycle with deps", func(t *testing.T) {
+		pauseDeps := map[*moduleInfo]*moduleInfo{
+			// F and G form a pause cycle
+			moduleF: moduleG,
+			moduleG: moduleF,
+			// D depends on E which depends on the pause cycle, making E the first alphabetical
+			// entry in pauseMap, which is not part of the cycle.
+			moduleD: moduleE,
+			moduleE: moduleF,
+		}
+		errs := parallelVisit([]*moduleInfo{moduleD, moduleE, moduleF, moduleG}, bottomUpVisitorImpl{}, 4,
+			func(module *moduleInfo, pause chan<- pauseSpec) bool {
+				if dep, ok := pauseDeps[module]; ok {
+					unpause := make(chan struct{})
+					pause <- pauseSpec{module, dep, unpause}
+					<-unpause
+				}
+				return false
+			})
+		want := []string{
+			`encountered dependency cycle`,
+			`module "G" depends on module "F"`,
+			`module "F" depends on module "G"`,
+		}
+		for i := range want {
+			if len(errs) <= i {
+				t.Errorf("missing error %s", want[i])
+			} else if !strings.Contains(errs[i].Error(), want[i]) {
+				t.Errorf("expected error %s, got %s", want[i], errs[i])
+			}
+		}
+		if len(errs) > len(want) {
+			for _, err := range errs[len(want):] {
+				t.Errorf("unexpected error %s", err.Error())
+			}
+		}
+	})
+}
diff --git a/glob.go b/glob.go
index 4f7e978..91ae723 100644
--- a/glob.go
+++ b/glob.go
@@ -15,7 +15,6 @@
 package blueprint
 
 import (
-	"crypto/md5"
 	"fmt"
 	"sort"
 	"strings"
@@ -23,108 +22,95 @@
 	"github.com/google/blueprint/pathtools"
 )
 
-type GlobPath struct {
-	Pattern  string
-	Excludes []string
-	Files    []string
-	Deps     []string
-	Name     string
-}
-
-func verifyGlob(fileName, pattern string, excludes []string, g GlobPath) {
+func verifyGlob(key globKey, pattern string, excludes []string, g pathtools.GlobResult) {
 	if pattern != g.Pattern {
-		panic(fmt.Errorf("Mismatched patterns %q and %q for glob file %q", pattern, g.Pattern, fileName))
+		panic(fmt.Errorf("Mismatched patterns %q and %q for glob key %q", pattern, g.Pattern, key))
 	}
 	if len(excludes) != len(g.Excludes) {
-		panic(fmt.Errorf("Mismatched excludes %v and %v for glob file %q", excludes, g.Excludes, fileName))
+		panic(fmt.Errorf("Mismatched excludes %v and %v for glob key %q", excludes, g.Excludes, key))
 	}
 
 	for i := range excludes {
 		if g.Excludes[i] != excludes[i] {
-			panic(fmt.Errorf("Mismatched excludes %v and %v for glob file %q", excludes, g.Excludes, fileName))
+			panic(fmt.Errorf("Mismatched excludes %v and %v for glob key %q", excludes, g.Excludes, key))
 		}
 	}
 }
 
 func (c *Context) glob(pattern string, excludes []string) ([]string, error) {
-	fileName := globToFileName(pattern, excludes)
+	// Sort excludes so that two globs with the same excludes in a different order reuse the same
+	// key.  Make a copy first to avoid modifying the caller's version.
+	excludes = append([]string(nil), excludes...)
+	sort.Strings(excludes)
+
+	key := globToKey(pattern, excludes)
 
 	// Try to get existing glob from the stored results
 	c.globLock.Lock()
-	g, exists := c.globs[fileName]
+	g, exists := c.globs[key]
 	c.globLock.Unlock()
 
 	if exists {
 		// Glob has already been done, double check it is identical
-		verifyGlob(fileName, pattern, excludes, g)
-		return g.Files, nil
+		verifyGlob(key, pattern, excludes, g)
+		// Return a copy so that modifications don't affect the cached value.
+		return append([]string(nil), g.Matches...), nil
 	}
 
 	// Get a globbed file list
-	files, deps, err := c.fs.Glob(pattern, excludes, pathtools.FollowSymlinks)
+	result, err := c.fs.Glob(pattern, excludes, pathtools.FollowSymlinks)
 	if err != nil {
 		return nil, err
 	}
 
 	// Store the results
 	c.globLock.Lock()
-	if g, exists = c.globs[fileName]; !exists {
-		c.globs[fileName] = GlobPath{pattern, excludes, files, deps, fileName}
+	if g, exists = c.globs[key]; !exists {
+		c.globs[key] = result
 	}
 	c.globLock.Unlock()
 
-	// Getting the list raced with another goroutine, throw away the results and use theirs
 	if exists {
-		verifyGlob(fileName, pattern, excludes, g)
-		return g.Files, nil
+		// Getting the list raced with another goroutine, throw away the results and use theirs
+		verifyGlob(key, pattern, excludes, g)
+		// Return a copy so that modifications don't affect the cached value.
+		return append([]string(nil), g.Matches...), nil
 	}
 
-	return files, nil
+	// Return a copy so that modifications don't affect the cached value.
+	return append([]string(nil), result.Matches...), nil
 }
 
-func (c *Context) Globs() []GlobPath {
-	fileNames := make([]string, 0, len(c.globs))
+func (c *Context) Globs() pathtools.MultipleGlobResults {
+	keys := make([]globKey, 0, len(c.globs))
 	for k := range c.globs {
-		fileNames = append(fileNames, k)
+		keys = append(keys, k)
 	}
-	sort.Strings(fileNames)
 
-	globs := make([]GlobPath, len(fileNames))
-	for i, fileName := range fileNames {
-		globs[i] = c.globs[fileName]
+	sort.Slice(keys, func(i, j int) bool {
+		if keys[i].pattern != keys[j].pattern {
+			return keys[i].pattern < keys[j].pattern
+		}
+		return keys[i].excludes < keys[j].excludes
+	})
+
+	globs := make(pathtools.MultipleGlobResults, len(keys))
+	for i, key := range keys {
+		globs[i] = c.globs[key]
 	}
 
 	return globs
 }
 
-func globToString(pattern string) string {
-	ret := ""
-	for _, c := range pattern {
-		switch {
-		case c >= 'a' && c <= 'z',
-			c >= 'A' && c <= 'Z',
-			c >= '0' && c <= '9',
-			c == '_', c == '-', c == '/':
-			ret += string(c)
-		default:
-			ret += "_"
-		}
-	}
-
-	return ret
+// globKey combines a pattern and a list of excludes into a hashable struct to be used as a key in
+// a map.
+type globKey struct {
+	pattern  string
+	excludes string
 }
 
-func globToFileName(pattern string, excludes []string) string {
-	name := globToString(pattern)
-	excludeName := ""
-	for _, e := range excludes {
-		excludeName += "__" + globToString(e)
-	}
-
-	// Prevent file names from reaching ninja's path component limit
-	if strings.Count(name, "/")+strings.Count(excludeName, "/") > 30 {
-		excludeName = fmt.Sprintf("___%x", md5.Sum([]byte(excludeName)))
-	}
-
-	return name + excludeName + ".glob"
+// globToKey converts a pattern and an excludes list into a globKey struct that is hashable and
+// usable as a key in a map.
+func globToKey(pattern string, excludes []string) globKey {
+	return globKey{pattern, strings.Join(excludes, "|")}
 }
diff --git a/live_tracker.go b/live_tracker.go
index 40e1930..1d48e58 100644
--- a/live_tracker.go
+++ b/live_tracker.go
@@ -68,6 +68,11 @@
 		return err
 	}
 
+	err = l.addNinjaStringListDeps(def.Validations)
+	if err != nil {
+		return err
+	}
+
 	for _, value := range def.Variables {
 		err = l.addNinjaStringDeps(value)
 		if err != nil {
diff --git a/microfactory/microfactory.go b/microfactory/microfactory.go
index a70d3c5..a0c9a14 100644
--- a/microfactory/microfactory.go
+++ b/microfactory/microfactory.go
@@ -397,6 +397,7 @@
 	fmt.Fprintln(hash, runtime.GOOS, runtime.GOARCH, goVersion)
 
 	cmd := exec.Command(filepath.Join(goToolDir, "compile"),
+		"-N", "-l", // Disable optimization and inlining so that debugging works better
 		"-o", p.output,
 		"-p", p.Name,
 		"-complete", "-pack", "-nolocalimports")
diff --git a/module_ctx.go b/module_ctx.go
index 36e05a4..a074e37 100644
--- a/module_ctx.go
+++ b/module_ctx.go
@@ -246,6 +246,24 @@
 	// invalidated by future mutators.
 	WalkDeps(visit func(Module, Module) bool)
 
+	// PrimaryModule returns the first variant of the current module.  Variants of a module are always visited in
+	// order by mutators and GenerateBuildActions, so the data created by the current mutator can be read from the
+	// Module returned by PrimaryModule without data races.  This can be used to perform singleton actions that are
+	// only done once for all variants of a module.
+	PrimaryModule() Module
+
+	// FinalModule returns the last variant of the current module.  Variants of a module are always visited in
+	// order by mutators and GenerateBuildActions, so the data created by the current mutator can be read from all
+	// variants using VisitAllModuleVariants if the current module == FinalModule().  This can be used to perform
+	// singleton actions that are only done once for all variants of a module.
+	FinalModule() Module
+
+	// VisitAllModuleVariants calls visit for each variant of the current module.  Variants of a module are always
+	// visited in order by mutators and GenerateBuildActions, so the data created by the current mutator can be read
+	// from all variants if the current module == FinalModule().  Otherwise, care must be taken to not access any
+	// data modified by the current mutator.
+	VisitAllModuleVariants(visit func(Module))
+
 	// OtherModuleName returns the name of another Module.  See BaseModuleContext.ModuleName for more information.
 	// It is intended for use inside the visit functions of Visit* and WalkDeps.
 	OtherModuleName(m Module) string
@@ -275,6 +293,51 @@
 	// OtherModuleExists returns true if a module with the specified name exists, as determined by the NameInterface
 	// passed to Context.SetNameInterface, or SimpleNameInterface if it was not called.
 	OtherModuleExists(name string) bool
+
+	// OtherModuleDependencyVariantExists returns true if a module with the
+	// specified name and variant exists. The variant must match the given
+	// variations. It must also match all the non-local variations of the current
+	// module. In other words, it checks for the module that AddVariationDependencies
+	// would add a dependency on with the same arguments.
+	OtherModuleDependencyVariantExists(variations []Variation, name string) bool
+
+	// OtherModuleFarDependencyVariantExists returns true if a module with the
+	// specified name and variant exists. The variant must match the given
+	// variations, but not the non-local variations of the current module. In
+	// other words, it checks for the module that AddFarVariationDependencies
+	// would add a dependency on with the same arguments.
+	OtherModuleFarDependencyVariantExists(variations []Variation, name string) bool
+
+	// OtherModuleReverseDependencyVariantExists returns true if a module with the
+	// specified name exists with the same variations as the current module. In
+	// other words, it checks for the module that AddReverseDependency would add a
+	// dependency on with the same argument.
+	OtherModuleReverseDependencyVariantExists(name string) bool
+
+	// OtherModuleProvider returns the value for a provider for the given module.  If the value is
+	// not set it returns the zero value of the type of the provider, so the return value can always
+	// be type asserted to the type of the provider.  The value returned may be a deep copy of the
+	// value originally passed to SetProvider.
+	OtherModuleProvider(m Module, provider ProviderKey) interface{}
+
+	// OtherModuleHasProvider returns true if the provider for the given module has been set.
+	OtherModuleHasProvider(m Module, provider ProviderKey) bool
+
+	// Provider returns the value for a provider for the current module.  If the value is
+	// not set it returns the zero value of the type of the provider, so the return value can always
+	// be type asserted to the type of the provider.  It panics if called before the appropriate
+	// mutator or GenerateBuildActions pass for the provider.  The value returned may be a deep
+	// copy of the value originally passed to SetProvider.
+	Provider(provider ProviderKey) interface{}
+
+	// HasProvider returns true if the provider for the current module has been set.
+	HasProvider(provider ProviderKey) bool
+
+	// SetProvider sets the value for a provider for the current module.  It panics if not called
+	// during the appropriate mutator or GenerateBuildActions pass for the provider, if the value
+	// is not of the appropriate type, or if the value has already been set.  The value should not
+	// be modified after being passed to SetProvider.
+	SetProvider(provider ProviderKey, value interface{})
 }
 
 type DynamicDependerModuleContext BottomUpMutatorContext
@@ -296,24 +359,6 @@
 	// Build creates a new ninja build statement.
 	Build(pctx PackageContext, params BuildParams)
 
-	// PrimaryModule returns the first variant of the current module.  Variants of a module are always visited in
-	// order by mutators and GenerateBuildActions, so the data created by the current mutator can be read from the
-	// Module returned by PrimaryModule without data races.  This can be used to perform singleton actions that are
-	// only done once for all variants of a module.
-	PrimaryModule() Module
-
-	// FinalModule returns the last variant of the current module.  Variants of a module are always visited in
-	// order by mutators and GenerateBuildActions, so the data created by the current mutator can be read from all
-	// variants using VisitAllModuleVariants if the current module == FinalModule().  This can be used to perform
-	// singleton actions that are only done once for all variants of a module.
-	FinalModule() Module
-
-	// VisitAllModuleVariants calls visit for each variant of the current module.  Variants of a module are always
-	// visited in order by mutators and GenerateBuildActions, so the data created by the current mutator can be read
-	// from all variants if the current module == FinalModule().  Otherwise, care must be taken to not access any
-	// data modified by the current mutator.
-	VisitAllModuleVariants(visit func(Module))
-
 	// GetMissingDependencies returns the list of dependencies that were passed to AddDependencies or related methods,
 	// but do not exist.  It can be used with Context.SetAllowMissingDependencies to allow the primary builder to
 	// handle missing dependencies on its own instead of having Blueprint treat them as an error.
@@ -451,7 +496,7 @@
 
 func (m *baseModuleContext) OtherModuleSubDir(logicModule Module) string {
 	module := m.context.moduleInfo[logicModule]
-	return module.variantName
+	return module.variant.name
 }
 
 func (m *baseModuleContext) OtherModuleType(logicModule Module) string {
@@ -492,6 +537,59 @@
 	return exists
 }
 
+func (m *baseModuleContext) OtherModuleDependencyVariantExists(variations []Variation, name string) bool {
+	possibleDeps := m.context.moduleGroupFromName(name, m.module.namespace())
+	if possibleDeps == nil {
+		return false
+	}
+	found, _ := findVariant(m.module, possibleDeps, variations, false, false)
+	return found != nil
+}
+
+func (m *baseModuleContext) OtherModuleFarDependencyVariantExists(variations []Variation, name string) bool {
+	possibleDeps := m.context.moduleGroupFromName(name, m.module.namespace())
+	if possibleDeps == nil {
+		return false
+	}
+	found, _ := findVariant(m.module, possibleDeps, variations, true, false)
+	return found != nil
+}
+
+func (m *baseModuleContext) OtherModuleReverseDependencyVariantExists(name string) bool {
+	possibleDeps := m.context.moduleGroupFromName(name, m.module.namespace())
+	if possibleDeps == nil {
+		return false
+	}
+	found, _ := findVariant(m.module, possibleDeps, nil, false, true)
+	return found != nil
+}
+
+func (m *baseModuleContext) OtherModuleProvider(logicModule Module, provider ProviderKey) interface{} {
+	module := m.context.moduleInfo[logicModule]
+	value, _ := m.context.provider(module, provider)
+	return value
+}
+
+func (m *baseModuleContext) OtherModuleHasProvider(logicModule Module, provider ProviderKey) bool {
+	module := m.context.moduleInfo[logicModule]
+	_, ok := m.context.provider(module, provider)
+	return ok
+}
+
+func (m *baseModuleContext) Provider(provider ProviderKey) interface{} {
+	value, _ := m.context.provider(m.module, provider)
+	return value
+}
+
+func (m *baseModuleContext) HasProvider(provider ProviderKey) bool {
+	_, ok := m.context.provider(m.module, provider)
+	return ok
+}
+
+func (m *baseModuleContext) SetProvider(provider ProviderKey, value interface{}) {
+	m.context.setProvider(m.module, provider, value)
+}
+
 func (m *baseModuleContext) GetDirectDep(name string) (Module, DependencyTag) {
 	for _, dep := range m.module.directDeps {
 		if dep.module.Name() == name {
@@ -611,6 +709,18 @@
 	m.visitingDep = depInfo{}
 }
 
+func (m *baseModuleContext) PrimaryModule() Module {
+	return m.module.group.modules.firstModule().logicModule
+}
+
+func (m *baseModuleContext) FinalModule() Module {
+	return m.module.group.modules.lastModule().logicModule
+}
+
+func (m *baseModuleContext) VisitAllModuleVariants(visit func(Module)) {
+	m.context.visitAllModuleVariants(m.module, visit)
+}
+
 func (m *baseModuleContext) AddNinjaFileDeps(deps ...string) {
 	m.ninjaFileDeps = append(m.ninjaFileDeps, deps...)
 }
@@ -624,7 +734,7 @@
 }
 
 func (m *moduleContext) ModuleSubDir() string {
-	return m.module.variantName
+	return m.module.variant.name
 }
 
 func (m *moduleContext) Variable(pctx PackageContext, name, value string) {
@@ -664,18 +774,6 @@
 	m.actionDefs.buildDefs = append(m.actionDefs.buildDefs, def)
 }
 
-func (m *moduleContext) PrimaryModule() Module {
-	return m.module.group.modules[0].logicModule
-}
-
-func (m *moduleContext) FinalModule() Module {
-	return m.module.group.modules[len(m.module.group.modules)-1].logicModule
-}
-
-func (m *moduleContext) VisitAllModuleVariants(visit func(Module)) {
-	m.context.visitAllModuleVariants(m.module, visit)
-}
-
 func (m *moduleContext) GetMissingDependencies() []string {
 	m.handledMissingDeps = true
 	return m.module.missingDeps
@@ -691,9 +789,10 @@
 	reverseDeps      []reverseDep
 	rename           []rename
 	replace          []replace
-	newVariations    []*moduleInfo // new variants of existing modules
-	newModules       []*moduleInfo // brand new modules
+	newVariations    modulesOrAliases // new variants of existing modules
+	newModules       []*moduleInfo    // brand new modules
 	defaultVariation *string
+	pauseCh          chan<- pauseSpec
 }
 
 type BaseMutatorContext interface {
@@ -744,10 +843,15 @@
 type BottomUpMutatorContext interface {
 	BaseMutatorContext
 
-	// AddDependency adds a dependency to the given module.
-	// Does not affect the ordering of the current mutator pass, but will be ordered
-	// correctly for all future mutator passes.
-	AddDependency(module Module, tag DependencyTag, name ...string)
+	// AddDependency adds a dependency to the given module.  It returns a slice of modules for each
+	// dependency (some entries may be nil).  Does not affect the ordering of the current mutator
+	// pass, but will be ordered correctly for all future mutator passes.
+	//
+	// If the mutator is parallel (see MutatorHandle.Parallel), this method will pause until the
+	// new dependencies have had the current mutator called on them.  If the mutator is not
+	// parallel this method does not affect the ordering of the current mutator pass, but will
+	// be ordered correctly for all future mutator passes.
+	AddDependency(module Module, tag DependencyTag, name ...string) []Module
 
 	// AddReverseDependency adds a dependency from the destination to the given module.
 	// Does not affect the ordering of the current mutator pass, but will be ordered
@@ -787,19 +891,30 @@
 	SetDefaultDependencyVariation(*string)
 
 	// AddVariationDependencies adds deps as dependencies of the current module, but uses the variations
-	// argument to select which variant of the dependency to use.  A variant of the dependency must
-	// exist that matches the all of the non-local variations of the current module, plus the variations
-	// argument.
-	AddVariationDependencies([]Variation, DependencyTag, ...string)
+	// argument to select which variant of the dependency to use.  It returns a slice of modules for
+	// each dependency (some entries may be nil).  A variant of the dependency must exist that matches
+	// the all of the non-local variations of the current module, plus the variations argument.
+	//
+	// If the mutator is parallel (see MutatorHandle.Parallel), this method will pause until the
+	// new dependencies have had the current mutator called on them.  If the mutator is not
+	// parallel this method does not affect the ordering of the current mutator pass, but will
+	// be ordered correctly for all future mutator passes.
+	AddVariationDependencies([]Variation, DependencyTag, ...string) []Module
 
 	// AddFarVariationDependencies adds deps as dependencies of the current module, but uses the
-	// variations argument to select which variant of the dependency to use.  A variant of the
-	// dependency must exist that matches the variations argument, but may also have other variations.
+	// variations argument to select which variant of the dependency to use.  It returns a slice of
+	// modules for each dependency (some entries may be nil).  A variant of the dependency must
+	// exist that matches the variations argument, but may also have other variations.
 	// For any unspecified variation the first variant will be used.
 	//
 	// Unlike AddVariationDependencies, the variations of the current module are ignored - the
 	// dependency only needs to match the supplied variations.
-	AddFarVariationDependencies([]Variation, DependencyTag, ...string)
+	//
+	// If the mutator is parallel (see MutatorHandle.Parallel), this method will pause until the
+	// new dependencies have had the current mutator called on them.  If the mutator is not
+	// parallel this method does not affect the ordering of the current mutator pass, but will
+	// be ordered correctly for all future mutator passes.
+	AddFarVariationDependencies([]Variation, DependencyTag, ...string) []Module
 
 	// AddInterVariantDependency adds a dependency between two variants of the same module.  Variants are always
 	// ordered in the same orderas they were listed in CreateVariations, and AddInterVariantDependency does not change
@@ -812,12 +927,36 @@
 	// after the mutator pass is finished.
 	ReplaceDependencies(string)
 
-	// AliasVariation takes a variationName that was passed to CreateVariations for this module, and creates an
-	// alias from the current variant to the new variant.  The alias will be valid until the next time a mutator
-	// calls CreateVariations or CreateLocalVariations on this module without also calling AliasVariation.  The
-	// alias can be used to add dependencies on the newly created variant using the variant map from before
-	// CreateVariations was run.
+	// ReplaceDependencies replaces all dependencies on the identical variant of the module with the
+	// specified name with the current variant of this module as long as the supplied predicate returns
+	// true.
+	//
+	// Replacements don't take effect until after the mutator pass is finished.
+	ReplaceDependenciesIf(string, ReplaceDependencyPredicate)
+
+	// AliasVariation takes a variationName that was passed to CreateVariations for this module,
+	// and creates an alias from the current variant (before the mutator has run) to the new
+	// variant.  The alias will be valid until the next time a mutator calls CreateVariations or
+	// CreateLocalVariations on this module without also calling AliasVariation.  The alias can
+	// be used to add dependencies on the newly created variant using the variant map from
+	// before CreateVariations was run.
 	AliasVariation(variationName string)
+
+	// CreateAliasVariation takes a toVariationName that was passed to CreateVariations for this
+	// module, and creates an alias from a new fromVariationName variant the toVariationName
+	// variant.  The alias will be valid until the next time a mutator calls CreateVariations or
+	// CreateLocalVariations on this module without also calling AliasVariation.  The alias can
+	// be used to add dependencies on the toVariationName variant using the fromVariationName
+	// variant.
+	CreateAliasVariation(fromVariationName, toVariationName string)
+
+	// SetVariationProvider sets the value for a provider for the given newly created variant of
+	// the current module, i.e. one of the Modules returned by CreateVariations..  It panics if
+	// not called during the appropriate mutator or GenerateBuildActions pass for the provider,
+	// if the value is not of the appropriate type, or if the module is not a newly created
+	// variant of the current module.  The value should not be modified after being passed to
+	// SetVariationProvider.
+	SetVariationProvider(module Module, provider ProviderKey, value interface{})
 }
 
 // A Mutator function is called for each Module, and can use
@@ -861,21 +1000,30 @@
 	return mctx.createVariations(variationNames, true)
 }
 
+func (mctx *mutatorContext) SetVariationProvider(module Module, provider ProviderKey, value interface{}) {
+	for _, variant := range mctx.newVariations {
+		if m := variant.module(); m != nil && m.logicModule == module {
+			mctx.context.setProvider(m, provider, value)
+			return
+		}
+	}
+	panic(fmt.Errorf("module %q is not a newly created variant of %q", module, mctx.module))
+}
+
+type pendingAlias struct {
+	fromVariant variant
+	target      *moduleInfo
+}
+
 func (mctx *mutatorContext) createVariations(variationNames []string, local bool) []Module {
 	ret := []Module{}
-	modules, errs := mctx.context.createVariations(mctx.module, mctx.name, mctx.defaultVariation, variationNames)
+	modules, errs := mctx.context.createVariations(mctx.module, mctx.name, mctx.defaultVariation, variationNames, local)
 	if len(errs) > 0 {
 		mctx.errs = append(mctx.errs, errs...)
 	}
 
-	for i, module := range modules {
-		ret = append(ret, module.logicModule)
-		if !local {
-			if module.dependencyVariant == nil {
-				module.dependencyVariant = make(variationMap)
-			}
-			module.dependencyVariant[mctx.name] = variationNames[i]
-		}
+	for _, module := range modules {
+		ret = append(ret, module.module().logicModule)
 	}
 
 	if mctx.newVariations != nil {
@@ -891,24 +1039,65 @@
 }
 
 func (mctx *mutatorContext) AliasVariation(variationName string) {
-	if mctx.module.aliasTarget != nil {
-		panic(fmt.Errorf("AliasVariation already called"))
+	for _, moduleOrAlias := range mctx.module.splitModules {
+		if alias := moduleOrAlias.alias(); alias != nil {
+			if alias.variant.variations.equal(mctx.module.variant.variations) {
+				panic(fmt.Errorf("AliasVariation already called"))
+			}
+		}
 	}
 
 	for _, variant := range mctx.newVariations {
-		if variant.variant[mctx.name] == variationName {
-			mctx.module.aliasTarget = variant
+		if variant.moduleOrAliasVariant().variations[mctx.name] == variationName {
+			alias := &moduleAlias{
+				variant: mctx.module.variant,
+				target:  variant.moduleOrAliasTarget(),
+			}
+			// Prepend the alias so that AddFarVariationDependencies subset match matches
+			// the alias before matching the first variation.
+			mctx.module.splitModules = append(modulesOrAliases{alias}, mctx.module.splitModules...)
 			return
 		}
 	}
 
 	var foundVariations []string
 	for _, variant := range mctx.newVariations {
-		foundVariations = append(foundVariations, variant.variant[mctx.name])
+		foundVariations = append(foundVariations, variant.moduleOrAliasVariant().variations[mctx.name])
 	}
 	panic(fmt.Errorf("no %q variation in module variations %q", variationName, foundVariations))
 }
 
+func (mctx *mutatorContext) CreateAliasVariation(aliasVariationName, targetVariationName string) {
+	newVariant := newVariant(mctx.module, mctx.name, aliasVariationName, false)
+
+	for _, moduleOrAlias := range mctx.module.splitModules {
+		if moduleOrAlias.moduleOrAliasVariant().variations.equal(newVariant.variations) {
+			if alias := moduleOrAlias.alias(); alias != nil {
+				panic(fmt.Errorf("can't alias %q to %q, already aliased to %q", aliasVariationName, targetVariationName, alias.target.variant.name))
+			} else {
+				panic(fmt.Errorf("can't alias %q to %q, there is already a variant with that name", aliasVariationName, targetVariationName))
+			}
+		}
+	}
+
+	for _, variant := range mctx.newVariations {
+		if variant.moduleOrAliasVariant().variations[mctx.name] == targetVariationName {
+			// Append the alias here so that it comes after any aliases created by AliasVariation.
+			mctx.module.splitModules = append(mctx.module.splitModules, &moduleAlias{
+				variant: newVariant,
+				target:  variant.moduleOrAliasTarget(),
+			})
+			return
+		}
+	}
+
+	var foundVariations []string
+	for _, variant := range mctx.newVariations {
+		foundVariations = append(foundVariations, variant.moduleOrAliasVariant().variations[mctx.name])
+	}
+	panic(fmt.Errorf("no %q variation in module variations %q", targetVariationName, foundVariations))
+}
+
 func (mctx *mutatorContext) SetDependencyVariation(variationName string) {
 	mctx.context.convertDepsToVariation(mctx.module, mctx.name, variationName, nil)
 }
@@ -921,14 +1110,21 @@
 	return mctx.module.logicModule
 }
 
-func (mctx *mutatorContext) AddDependency(module Module, tag DependencyTag, deps ...string) {
+func (mctx *mutatorContext) AddDependency(module Module, tag DependencyTag, deps ...string) []Module {
+	depInfos := make([]Module, 0, len(deps))
 	for _, dep := range deps {
 		modInfo := mctx.context.moduleInfo[module]
-		errs := mctx.context.addDependency(modInfo, tag, dep)
+		depInfo, errs := mctx.context.addDependency(modInfo, tag, dep)
 		if len(errs) > 0 {
 			mctx.errs = append(mctx.errs, errs...)
 		}
+		if !mctx.pause(depInfo) {
+			// Pausing not supported by this mutator, new dependencies can't be returned.
+			depInfo = nil
+		}
+		depInfos = append(depInfos, maybeLogicModule(depInfo))
 	}
+	return depInfos
 }
 
 func (mctx *mutatorContext) AddReverseDependency(module Module, tag DependencyTag, destName string) {
@@ -949,25 +1145,39 @@
 }
 
 func (mctx *mutatorContext) AddVariationDependencies(variations []Variation, tag DependencyTag,
-	deps ...string) {
+	deps ...string) []Module {
 
+	depInfos := make([]Module, 0, len(deps))
 	for _, dep := range deps {
-		errs := mctx.context.addVariationDependency(mctx.module, variations, tag, dep, false)
+		depInfo, errs := mctx.context.addVariationDependency(mctx.module, variations, tag, dep, false)
 		if len(errs) > 0 {
 			mctx.errs = append(mctx.errs, errs...)
 		}
+		if !mctx.pause(depInfo) {
+			// Pausing not supported by this mutator, new dependencies can't be returned.
+			depInfo = nil
+		}
+		depInfos = append(depInfos, maybeLogicModule(depInfo))
 	}
+	return depInfos
 }
 
 func (mctx *mutatorContext) AddFarVariationDependencies(variations []Variation, tag DependencyTag,
-	deps ...string) {
+	deps ...string) []Module {
 
+	depInfos := make([]Module, 0, len(deps))
 	for _, dep := range deps {
-		errs := mctx.context.addVariationDependency(mctx.module, variations, tag, dep, true)
+		depInfo, errs := mctx.context.addVariationDependency(mctx.module, variations, tag, dep, true)
 		if len(errs) > 0 {
 			mctx.errs = append(mctx.errs, errs...)
 		}
+		if !mctx.pause(depInfo) {
+			// Pausing not supported by this mutator, new dependencies can't be returned.
+			depInfo = nil
+		}
+		depInfos = append(depInfos, maybeLogicModule(depInfo))
 	}
+	return depInfos
 }
 
 func (mctx *mutatorContext) AddInterVariantDependency(tag DependencyTag, from, to Module) {
@@ -975,14 +1185,23 @@
 }
 
 func (mctx *mutatorContext) ReplaceDependencies(name string) {
+	mctx.ReplaceDependenciesIf(name, nil)
+}
+
+type ReplaceDependencyPredicate func(from Module, tag DependencyTag, to Module) bool
+
+func (mctx *mutatorContext) ReplaceDependenciesIf(name string, predicate ReplaceDependencyPredicate) {
 	target := mctx.context.moduleMatchingVariant(mctx.module, name)
 
 	if target == nil {
-		panic(fmt.Errorf("ReplaceDependencies could not find identical variant %q for module %q",
-			mctx.module.variantName, name))
+		panic(fmt.Errorf("ReplaceDependencies could not find identical variant {%s} for module %s\n"+
+			"available variants:\n  %s",
+			mctx.context.prettyPrintVariant(mctx.module.variant.variations),
+			name,
+			mctx.context.prettyPrintGroupVariants(mctx.context.moduleGroupFromName(name, mctx.module.namespace()))))
 	}
 
-	mctx.replace = append(mctx.replace, replace{target, mctx.module})
+	mctx.replace = append(mctx.replace, replace{target, mctx.module, predicate})
 }
 
 func (mctx *mutatorContext) Rename(name string) {
@@ -1009,6 +1228,26 @@
 	return module.logicModule
 }
 
+// pause waits until the given dependency has been visited by the mutator's parallelVisit call.
+// It returns true if the pause was supported, false if the pause was not supported and did not
+// occur, which will happen when the mutator is not parallelizable.  If the dependency is nil
+// it returns true if pausing is supported or false if it is not.
+func (mctx *mutatorContext) pause(dep *moduleInfo) bool {
+	if mctx.pauseCh != nil {
+		if dep != nil {
+			unpause := make(unpause)
+			mctx.pauseCh <- pauseSpec{
+				paused:  mctx.module,
+				until:   dep,
+				unpause: unpause,
+			}
+			<-unpause
+		}
+		return true
+	}
+	return false
+}
+
 // SimpleName is an embeddable object to implement the ModuleContext.Name method using a property
 // called "name".  Modules that embed it must also add SimpleName.Properties to their property
 // structure list.
@@ -1100,7 +1339,7 @@
 }
 
 func runAndRemoveLoadHooks(ctx *Context, config interface{}, module *moduleInfo,
-	scopedModuleFactories *map[string]ModuleFactory) (newModules []*moduleInfo, errs []error) {
+	scopedModuleFactories *map[string]ModuleFactory) (newModules []*moduleInfo, deps []string, errs []error) {
 
 	if v, exists := pendingHooks.Load(module.logicModule); exists {
 		hooks := v.(*[]LoadHook)
@@ -1116,21 +1355,21 @@
 		for _, hook := range *hooks {
 			hook(mctx)
 			newModules = append(newModules, mctx.newModules...)
+			deps = append(deps, mctx.ninjaFileDeps...)
 			errs = append(errs, mctx.errs...)
 		}
 		pendingHooks.Delete(module.logicModule)
 
-		return newModules, errs
+		return newModules, deps, errs
 	}
 
-	return nil, nil
+	return nil, nil, nil
 }
 
 // Check the syntax of a generated blueprint file.
 //
-// This is intended to perform a quick sanity check for generated blueprint
-// code to ensure that it is syntactically correct, where syntactically correct
-// means:
+// This is intended to perform a quick syntactic check for generated blueprint
+// code, where syntactically correct means:
 // * No variable definitions.
 // * Valid module types.
 // * Valid property names.
@@ -1166,3 +1405,11 @@
 
 	return errs
 }
+
+func maybeLogicModule(module *moduleInfo) Module {
+	if module != nil {
+		return module.logicModule
+	} else {
+		return nil
+	}
+}
diff --git a/module_ctx_test.go b/module_ctx_test.go
index e98ae82..d57982e 100644
--- a/module_ctx_test.go
+++ b/module_ctx_test.go
@@ -32,7 +32,7 @@
 func (f *moduleCtxTestModule) GenerateBuildActions(ModuleContext) {
 }
 
-func noCreateAliasMutator(name string) func(ctx BottomUpMutatorContext) {
+func noAliasMutator(name string) func(ctx BottomUpMutatorContext) {
 	return func(ctx BottomUpMutatorContext) {
 		if ctx.ModuleName() == name {
 			ctx.CreateVariations("a", "b")
@@ -40,11 +40,22 @@
 	}
 }
 
+func aliasMutator(name string) func(ctx BottomUpMutatorContext) {
+	return func(ctx BottomUpMutatorContext) {
+		if ctx.ModuleName() == name {
+			ctx.CreateVariations("a", "b")
+			ctx.AliasVariation("b")
+		}
+	}
+}
+
 func createAliasMutator(name string) func(ctx BottomUpMutatorContext) {
 	return func(ctx BottomUpMutatorContext) {
 		if ctx.ModuleName() == name {
 			ctx.CreateVariations("a", "b")
-			ctx.AliasVariation("b")
+			ctx.CreateAliasVariation("c", "a")
+			ctx.CreateAliasVariation("d", "b")
+			ctx.CreateAliasVariation("e", "a")
 		}
 	}
 }
@@ -57,7 +68,16 @@
 	}
 }
 
-func TestAliases(t *testing.T) {
+func addVariantDepsResultMutator(variants []Variation, tag DependencyTag, from, to string, results map[string][]Module) func(ctx BottomUpMutatorContext) {
+	return func(ctx BottomUpMutatorContext) {
+		if ctx.ModuleName() == from {
+			ret := ctx.AddVariationDependencies(variants, tag, to)
+			results[ctx.ModuleName()] = ret
+		}
+	}
+}
+
+func TestAliasVariation(t *testing.T) {
 	runWithFailures := func(ctx *Context, expectedErr string) {
 		t.Helper()
 		bp := `
@@ -115,17 +135,13 @@
 		// Tests a dependency from "foo" to "bar" variant "b" through alias "".
 		ctx := NewContext()
 		ctx.RegisterModuleType("test", newModuleCtxTestModule)
-		ctx.RegisterBottomUpMutator("1", createAliasMutator("bar"))
+		ctx.RegisterBottomUpMutator("1", aliasMutator("bar"))
 		ctx.RegisterBottomUpMutator("2", addVariantDepsMutator(nil, nil, "foo", "bar"))
 
 		run(ctx)
 
-		foo := ctx.moduleGroupFromName("foo", nil).modules[0]
-		barB := ctx.moduleGroupFromName("bar", nil).modules[1]
-
-		if g, w := barB.variantName, "b"; g != w {
-			t.Fatalf("expected bar.modules[1] variant to be %q, got %q", w, g)
-		}
+		foo := ctx.moduleGroupFromName("foo", nil).moduleByVariantName("")
+		barB := ctx.moduleGroupFromName("bar", nil).moduleByVariantName("b")
 
 		if g, w := foo.forwardDeps, []*moduleInfo{barB}; !reflect.DeepEqual(g, w) {
 			t.Fatalf("expected foo deps to be %q, got %q", w, g)
@@ -138,18 +154,14 @@
 		// Tests a dependency from "foo" to "bar" variant "b_b" through alias "".
 		ctx := NewContext()
 		ctx.RegisterModuleType("test", newModuleCtxTestModule)
-		ctx.RegisterBottomUpMutator("1", createAliasMutator("bar"))
-		ctx.RegisterBottomUpMutator("2", createAliasMutator("bar"))
+		ctx.RegisterBottomUpMutator("1", aliasMutator("bar"))
+		ctx.RegisterBottomUpMutator("2", aliasMutator("bar"))
 		ctx.RegisterBottomUpMutator("3", addVariantDepsMutator(nil, nil, "foo", "bar"))
 
 		run(ctx)
 
-		foo := ctx.moduleGroupFromName("foo", nil).modules[0]
-		barBB := ctx.moduleGroupFromName("bar", nil).modules[3]
-
-		if g, w := barBB.variantName, "b_b"; g != w {
-			t.Fatalf("expected bar.modules[3] variant to be %q, got %q", w, g)
-		}
+		foo := ctx.moduleGroupFromName("foo", nil).moduleByVariantName("")
+		barBB := ctx.moduleGroupFromName("bar", nil).moduleByVariantName("b_b")
 
 		if g, w := foo.forwardDeps, []*moduleInfo{barBB}; !reflect.DeepEqual(g, w) {
 			t.Fatalf("expected foo deps to be %q, got %q", w, g)
@@ -162,18 +174,14 @@
 		// Tests a dependency from "foo" to "bar" variant "a_b" through alias "a".
 		ctx := NewContext()
 		ctx.RegisterModuleType("test", newModuleCtxTestModule)
-		ctx.RegisterBottomUpMutator("1", createAliasMutator("bar"))
-		ctx.RegisterBottomUpMutator("2", createAliasMutator("bar"))
+		ctx.RegisterBottomUpMutator("1", aliasMutator("bar"))
+		ctx.RegisterBottomUpMutator("2", aliasMutator("bar"))
 		ctx.RegisterBottomUpMutator("3", addVariantDepsMutator([]Variation{{"1", "a"}}, nil, "foo", "bar"))
 
 		run(ctx)
 
-		foo := ctx.moduleGroupFromName("foo", nil).modules[0]
-		barAB := ctx.moduleGroupFromName("bar", nil).modules[1]
-
-		if g, w := barAB.variantName, "a_b"; g != w {
-			t.Fatalf("expected bar.modules[1] variant to be %q, got %q", w, g)
-		}
+		foo := ctx.moduleGroupFromName("foo", nil).moduleByVariantName("")
+		barAB := ctx.moduleGroupFromName("bar", nil).moduleByVariantName("a_b")
 
 		if g, w := foo.forwardDeps, []*moduleInfo{barAB}; !reflect.DeepEqual(g, w) {
 			t.Fatalf("expected foo deps to be %q, got %q", w, g)
@@ -186,13 +194,120 @@
 		// Tests a dependency from "foo" to removed "bar" alias "" fails.
 		ctx := NewContext()
 		ctx.RegisterModuleType("test", newModuleCtxTestModule)
-		ctx.RegisterBottomUpMutator("1", createAliasMutator("bar"))
-		ctx.RegisterBottomUpMutator("2", noCreateAliasMutator("bar"))
+		ctx.RegisterBottomUpMutator("1", aliasMutator("bar"))
+		ctx.RegisterBottomUpMutator("2", noAliasMutator("bar"))
 		ctx.RegisterBottomUpMutator("3", addVariantDepsMutator(nil, nil, "foo", "bar"))
 
 		runWithFailures(ctx, `dependency "bar" of "foo" missing variant:`+"\n  \n"+
 			"available variants:"+
-			"\n  1:a, 2:a\n  1:a, 2:b\n  1:b, 2:a\n  1:b, 2:b")
+			"\n  1:a,2:a\n  1:a,2:b\n  1:b,2:a\n  1:b,2:b")
+	})
+}
+
+func TestCreateAliasVariations(t *testing.T) {
+	runWithFailures := func(ctx *Context, expectedErr string) {
+		t.Helper()
+		bp := `
+			test {
+				name: "foo",
+			}
+
+			test {
+				name: "bar",
+			}
+		`
+
+		mockFS := map[string][]byte{
+			"Blueprints": []byte(bp),
+		}
+
+		ctx.MockFileSystem(mockFS)
+
+		_, errs := ctx.ParseFileList(".", []string{"Blueprints"}, nil)
+		if len(errs) > 0 {
+			t.Errorf("unexpected parse errors:")
+			for _, err := range errs {
+				t.Errorf("  %s", err)
+			}
+		}
+
+		_, errs = ctx.ResolveDependencies(nil)
+		if len(errs) > 0 {
+			if expectedErr == "" {
+				t.Errorf("unexpected dep errors:")
+				for _, err := range errs {
+					t.Errorf("  %s", err)
+				}
+			} else {
+				for _, err := range errs {
+					if strings.Contains(err.Error(), expectedErr) {
+						continue
+					} else {
+						t.Errorf("unexpected dep error: %s", err)
+					}
+				}
+			}
+		} else if expectedErr != "" {
+			t.Errorf("missing dep error: %s", expectedErr)
+		}
+	}
+
+	run := func(ctx *Context) {
+		t.Helper()
+		runWithFailures(ctx, "")
+	}
+
+	t.Run("simple", func(t *testing.T) {
+		// Creates a module "bar" with variants "a" and "b" and alias "c" -> "a", "d" -> "b", and "e" -> "a".
+		// Tests a dependency from "foo" to "bar" variant "b" through alias "d".
+		ctx := NewContext()
+		ctx.RegisterModuleType("test", newModuleCtxTestModule)
+		ctx.RegisterBottomUpMutator("1", createAliasMutator("bar"))
+		ctx.RegisterBottomUpMutator("2", addVariantDepsMutator([]Variation{{"1", "d"}}, nil, "foo", "bar"))
+
+		run(ctx)
+
+		foo := ctx.moduleGroupFromName("foo", nil).moduleByVariantName("")
+		barB := ctx.moduleGroupFromName("bar", nil).moduleByVariantName("b")
+
+		if g, w := foo.forwardDeps, []*moduleInfo{barB}; !reflect.DeepEqual(g, w) {
+			t.Fatalf("expected foo deps to be %q, got %q", w, g)
+		}
+	})
+
+	t.Run("chained", func(t *testing.T) {
+		// Creates a module "bar" with variants "a_a", "a_b", "b_a" and "b_b" and aliases "c" -> "a_b",
+		// "d" -> "b_b", and "d" -> "a_b".
+		// Tests a dependency from "foo" to "bar" variant "b_b" through alias "d".
+		ctx := NewContext()
+		ctx.RegisterModuleType("test", newModuleCtxTestModule)
+		ctx.RegisterBottomUpMutator("1", createAliasMutator("bar"))
+		ctx.RegisterBottomUpMutator("2", aliasMutator("bar"))
+		ctx.RegisterBottomUpMutator("3", addVariantDepsMutator([]Variation{{"1", "d"}}, nil, "foo", "bar"))
+
+		run(ctx)
+
+		foo := ctx.moduleGroupFromName("foo", nil).moduleByVariantName("")
+		barBB := ctx.moduleGroupFromName("bar", nil).moduleByVariantName("b_b")
+
+		if g, w := foo.forwardDeps, []*moduleInfo{barBB}; !reflect.DeepEqual(g, w) {
+			t.Fatalf("expected foo deps to be %q, got %q", w, g)
+		}
+	})
+
+	t.Run("removed dangling alias", func(t *testing.T) {
+		// Creates a module "bar" with variants "a" and "b" and alias "c" -> "a", "d" -> "b", and "e" -> "a",
+		// then splits the variants into "a_a", "a_b", "b_a" and "b_b" without creating new aliases.
+		// Tests a dependency from "foo" to removed "bar" alias "d" fails.
+		ctx := NewContext()
+		ctx.RegisterModuleType("test", newModuleCtxTestModule)
+		ctx.RegisterBottomUpMutator("1", createAliasMutator("bar"))
+		ctx.RegisterBottomUpMutator("2", noAliasMutator("bar"))
+		ctx.RegisterBottomUpMutator("3", addVariantDepsMutator([]Variation{{"1", "d"}}, nil, "foo", "bar"))
+
+		runWithFailures(ctx, `dependency "bar" of "foo" missing variant:`+"\n  1:d\n"+
+			"available variants:"+
+			"\n  1:a,2:a\n  1:a,2:b\n  1:b,2:a\n  1:b,2:b")
 	})
 }
 
@@ -210,6 +325,141 @@
 	}
 }
 
+func TestAddVariationDependencies(t *testing.T) {
+	runWithFailures := func(ctx *Context, expectedErr string) {
+		t.Helper()
+		bp := `
+			test {
+				name: "foo",
+			}
+
+			test {
+				name: "bar",
+			}
+		`
+
+		mockFS := map[string][]byte{
+			"Blueprints": []byte(bp),
+		}
+
+		ctx.MockFileSystem(mockFS)
+
+		_, errs := ctx.ParseFileList(".", []string{"Blueprints"}, nil)
+		if len(errs) > 0 {
+			t.Errorf("unexpected parse errors:")
+			for _, err := range errs {
+				t.Errorf("  %s", err)
+			}
+		}
+
+		_, errs = ctx.ResolveDependencies(nil)
+		if len(errs) > 0 {
+			if expectedErr == "" {
+				t.Errorf("unexpected dep errors:")
+				for _, err := range errs {
+					t.Errorf("  %s", err)
+				}
+			} else {
+				for _, err := range errs {
+					if strings.Contains(err.Error(), expectedErr) {
+						continue
+					} else {
+						t.Errorf("unexpected dep error: %s", err)
+					}
+				}
+			}
+		} else if expectedErr != "" {
+			t.Errorf("missing dep error: %s", expectedErr)
+		}
+	}
+
+	run := func(ctx *Context) {
+		t.Helper()
+		runWithFailures(ctx, "")
+	}
+
+	t.Run("parallel", func(t *testing.T) {
+		ctx := NewContext()
+		ctx.RegisterModuleType("test", newModuleCtxTestModule)
+		results := make(map[string][]Module)
+		depsMutator := addVariantDepsResultMutator(nil, nil, "foo", "bar", results)
+		ctx.RegisterBottomUpMutator("deps", depsMutator).Parallel()
+
+		run(ctx)
+
+		foo := ctx.moduleGroupFromName("foo", nil).moduleByVariantName("")
+		bar := ctx.moduleGroupFromName("bar", nil).moduleByVariantName("")
+
+		if g, w := foo.forwardDeps, []*moduleInfo{bar}; !reflect.DeepEqual(g, w) {
+			t.Fatalf("expected foo deps to be %q, got %q", w, g)
+		}
+
+		if g, w := results["foo"], []Module{bar.logicModule}; !reflect.DeepEqual(g, w) {
+			t.Fatalf("expected AddVariationDependencies return value to be %q, got %q", w, g)
+		}
+	})
+
+	t.Run("non-parallel", func(t *testing.T) {
+		ctx := NewContext()
+		ctx.RegisterModuleType("test", newModuleCtxTestModule)
+		results := make(map[string][]Module)
+		depsMutator := addVariantDepsResultMutator(nil, nil, "foo", "bar", results)
+		ctx.RegisterBottomUpMutator("deps", depsMutator)
+		run(ctx)
+
+		foo := ctx.moduleGroupFromName("foo", nil).moduleByVariantName("")
+		bar := ctx.moduleGroupFromName("bar", nil).moduleByVariantName("")
+
+		if g, w := foo.forwardDeps, []*moduleInfo{bar}; !reflect.DeepEqual(g, w) {
+			t.Fatalf("expected foo deps to be %q, got %q", w, g)
+		}
+
+		if g, w := results["foo"], []Module{nil}; !reflect.DeepEqual(g, w) {
+			t.Fatalf("expected AddVariationDependencies return value to be %q, got %q", w, g)
+		}
+	})
+
+	t.Run("missing", func(t *testing.T) {
+		ctx := NewContext()
+		ctx.RegisterModuleType("test", newModuleCtxTestModule)
+		results := make(map[string][]Module)
+		depsMutator := addVariantDepsResultMutator(nil, nil, "foo", "baz", results)
+		ctx.RegisterBottomUpMutator("deps", depsMutator).Parallel()
+		runWithFailures(ctx, `"foo" depends on undefined module "baz"`)
+
+		foo := ctx.moduleGroupFromName("foo", nil).moduleByVariantName("")
+
+		if g, w := foo.forwardDeps, []*moduleInfo(nil); !reflect.DeepEqual(g, w) {
+			t.Fatalf("expected foo deps to be %q, got %q", w, g)
+		}
+
+		if g, w := results["foo"], []Module{nil}; !reflect.DeepEqual(g, w) {
+			t.Fatalf("expected AddVariationDependencies return value to be %q, got %q", w, g)
+		}
+	})
+
+	t.Run("allow missing", func(t *testing.T) {
+		ctx := NewContext()
+		ctx.SetAllowMissingDependencies(true)
+		ctx.RegisterModuleType("test", newModuleCtxTestModule)
+		results := make(map[string][]Module)
+		depsMutator := addVariantDepsResultMutator(nil, nil, "foo", "baz", results)
+		ctx.RegisterBottomUpMutator("deps", depsMutator).Parallel()
+		run(ctx)
+
+		foo := ctx.moduleGroupFromName("foo", nil).moduleByVariantName("")
+
+		if g, w := foo.forwardDeps, []*moduleInfo(nil); !reflect.DeepEqual(g, w) {
+			t.Fatalf("expected foo deps to be %q, got %q", w, g)
+		}
+
+		if g, w := results["foo"], []Module{nil}; !reflect.DeepEqual(g, w) {
+			t.Fatalf("expected AddVariationDependencies return value to be %q, got %q", w, g)
+		}
+	})
+
+}
+
 func TestCheckBlueprintSyntax(t *testing.T) {
 	factories := map[string]ModuleFactory{
 		"test": newModuleCtxTestModule,
diff --git a/name_interface.go b/name_interface.go
index 1849e9d..5e7e16e 100644
--- a/name_interface.go
+++ b/name_interface.go
@@ -109,7 +109,7 @@
 		return nil, []error{
 			// seven characters at the start of the second line to align with the string "error: "
 			fmt.Errorf("module %q already defined\n"+
-				"       %s <-- previous definition here", name, group.modules[0].pos),
+				"       %s <-- previous definition here", name, group.modules.firstModule().pos),
 		}
 	}
 
@@ -130,7 +130,7 @@
 			// seven characters at the start of the second line to align with the string "error: "
 			fmt.Errorf("renaming module %q to %q conflicts with existing module\n"+
 				"       %s <-- existing module defined here",
-				oldName, newName, existingGroup.modules[0].pos),
+				oldName, newName, existingGroup.modules.firstModule().pos),
 		}
 	}
 
diff --git a/ninja_defs.go b/ninja_defs.go
index c5d0e4b..69233c2 100644
--- a/ninja_defs.go
+++ b/ninja_defs.go
@@ -56,15 +56,16 @@
 // definition.
 type RuleParams struct {
 	// These fields correspond to a Ninja variable of the same name.
-	Command        string // The command that Ninja will run for the rule.
-	Depfile        string // The dependency file name.
-	Deps           Deps   // The format of the dependency file.
-	Description    string // The description that Ninja will print for the rule.
-	Generator      bool   // Whether the rule generates the Ninja manifest file.
-	Pool           Pool   // The Ninja pool to which the rule belongs.
-	Restat         bool   // Whether Ninja should re-stat the rule's outputs.
-	Rspfile        string // The response file.
-	RspfileContent string // The response file content.
+	Command        string   // The command that Ninja will run for the rule.
+	Depfile        string   // The dependency file name.
+	Deps           Deps     // The format of the dependency file.
+	Description    string   // The description that Ninja will print for the rule.
+	Generator      bool     // Whether the rule generates the Ninja manifest file.
+	Pool           Pool     // The Ninja pool to which the rule belongs.
+	Restat         bool     // Whether Ninja should re-stat the rule's outputs.
+	Rspfile        string   // The response file.
+	RspfileContent string   // The response file content.
+	SymlinkOutputs []string // The list of Outputs or ImplicitOutputs that are symlinks.
 
 	// These fields are used internally in Blueprint
 	CommandDeps      []string // Command-specific implicit dependencies to prepend to builds
@@ -84,9 +85,11 @@
 	Rule            Rule              // The rule to invoke.
 	Outputs         []string          // The list of explicit output targets.
 	ImplicitOutputs []string          // The list of implicit output targets.
+	SymlinkOutputs  []string          // The list of Outputs or ImplicitOutputs that are symlinks.
 	Inputs          []string          // The list of explicit input dependencies.
 	Implicits       []string          // The list of implicit input dependencies.
 	OrderOnly       []string          // The list of order-only dependencies.
+	Validations     []string          // The list of validations to run when this rule runs.
 	Args            map[string]string // The variable/value pairs to set.
 	Optional        bool              // Skip outputting a default statement
 }
@@ -204,6 +207,15 @@
 		r.Variables["rspfile_content"] = value
 	}
 
+	if len(params.SymlinkOutputs) > 0 {
+		value, err = parseNinjaString(scope, strings.Join(params.SymlinkOutputs, " "))
+		if err != nil {
+			return nil, fmt.Errorf("error parsing SymlinkOutputs param: %s",
+				err)
+		}
+		r.Variables["symlink_outputs"] = value
+	}
+
 	r.CommandDeps, err = parseNinjaStrings(scope, params.CommandDeps)
 	if err != nil {
 		return nil, fmt.Errorf("error parsing CommandDeps param: %s", err)
@@ -257,6 +269,7 @@
 	Inputs          []ninjaString
 	Implicits       []ninjaString
 	OrderOnly       []ninjaString
+	Validations     []ninjaString
 	Args            map[Variable]ninjaString
 	Variables       map[string]ninjaString
 	Optional        bool
@@ -314,6 +327,11 @@
 		return nil, fmt.Errorf("error parsing OrderOnly param: %s", err)
 	}
 
+	b.Validations, err = parseNinjaStrings(scope, params.Validations)
+	if err != nil {
+		return nil, fmt.Errorf("error parsing Validations param: %s", err)
+	}
+
 	b.Optional = params.Optional
 
 	if params.Depfile != "" {
@@ -336,6 +354,12 @@
 		setVariable("description", value)
 	}
 
+	if len(params.SymlinkOutputs) > 0 {
+		setVariable(
+			"symlink_outputs",
+			simpleNinjaString(strings.Join(params.SymlinkOutputs, " ")))
+	}
+
 	argNameScope := rule.scope()
 
 	if len(params.Args) > 0 {
@@ -368,49 +392,50 @@
 	var (
 		comment       = b.Comment
 		rule          = b.Rule.fullName(pkgNames)
-		outputs       = valueList(b.Outputs, pkgNames, outputEscaper)
-		implicitOuts  = valueList(b.ImplicitOutputs, pkgNames, outputEscaper)
-		explicitDeps  = valueList(b.Inputs, pkgNames, inputEscaper)
-		implicitDeps  = valueList(b.Implicits, pkgNames, inputEscaper)
-		orderOnlyDeps = valueList(b.OrderOnly, pkgNames, inputEscaper)
+		outputs       = b.Outputs
+		implicitOuts  = b.ImplicitOutputs
+		explicitDeps  = b.Inputs
+		implicitDeps  = b.Implicits
+		orderOnlyDeps = b.OrderOnly
+		validations   = b.Validations
 	)
 
 	if b.RuleDef != nil {
-		implicitDeps = append(valueList(b.RuleDef.CommandDeps, pkgNames, inputEscaper), implicitDeps...)
-		orderOnlyDeps = append(valueList(b.RuleDef.CommandOrderOnly, pkgNames, inputEscaper), orderOnlyDeps...)
+		implicitDeps = append(b.RuleDef.CommandDeps, implicitDeps...)
+		orderOnlyDeps = append(b.RuleDef.CommandOrderOnly, orderOnlyDeps...)
 	}
 
-	err := nw.Build(comment, rule, outputs, implicitOuts, explicitDeps, implicitDeps, orderOnlyDeps)
+	err := nw.Build(comment, rule, outputs, implicitOuts, explicitDeps, implicitDeps, orderOnlyDeps, validations, pkgNames)
 	if err != nil {
 		return err
 	}
 
-	args := make(map[string]string)
-
-	for argVar, value := range b.Args {
-		args[argVar.fullName(pkgNames)] = value.Value(pkgNames)
-	}
-
 	err = writeVariables(nw, b.Variables, pkgNames)
 	if err != nil {
 		return err
 	}
 
-	var keys []string
-	for k := range args {
-		keys = append(keys, k)
+	type nameValuePair struct {
+		name, value string
 	}
-	sort.Strings(keys)
 
-	for _, name := range keys {
-		err = nw.ScopedAssign(name, args[name])
+	args := make([]nameValuePair, 0, len(b.Args))
+
+	for argVar, value := range b.Args {
+		fullName := argVar.fullName(pkgNames)
+		args = append(args, nameValuePair{fullName, value.Value(pkgNames)})
+	}
+	sort.Slice(args, func(i, j int) bool { return args[i].name < args[j].name })
+
+	for _, pair := range args {
+		err = nw.ScopedAssign(pair.name, pair.value)
 		if err != nil {
 			return err
 		}
 	}
 
 	if !b.Optional {
-		err = nw.Default(outputs...)
+		err = nw.Default(pkgNames, outputs...)
 		if err != nil {
 			return err
 		}
@@ -419,16 +444,6 @@
 	return nw.BlankLine()
 }
 
-func valueList(list []ninjaString, pkgNames map[*packageContext]string,
-	escaper *strings.Replacer) []string {
-
-	result := make([]string, len(list))
-	for i, ninjaStr := range list {
-		result[i] = ninjaStr.ValueWithEscaper(pkgNames, escaper)
-	}
-	return result
-}
-
 func writeVariables(nw *ninjaWriter, variables map[string]ninjaString,
 	pkgNames map[*packageContext]string) error {
 	var keys []string
diff --git a/ninja_strings.go b/ninja_strings.go
index 190cae9..51a167d 100644
--- a/ninja_strings.go
+++ b/ninja_strings.go
@@ -17,6 +17,7 @@
 import (
 	"bytes"
 	"fmt"
+	"io"
 	"strings"
 )
 
@@ -36,7 +37,7 @@
 
 type ninjaString interface {
 	Value(pkgNames map[*packageContext]string) string
-	ValueWithEscaper(pkgNames map[*packageContext]string, escaper *strings.Replacer) string
+	ValueWithEscaper(w io.StringWriter, pkgNames map[*packageContext]string, escaper *strings.Replacer)
 	Eval(variables map[Variable]ninjaString) (string, error)
 	Variables() []Variable
 }
@@ -284,26 +285,24 @@
 }
 
 func (n varNinjaString) Value(pkgNames map[*packageContext]string) string {
-	return n.ValueWithEscaper(pkgNames, defaultEscaper)
+	if len(n.strings) == 1 {
+		return defaultEscaper.Replace(n.strings[0])
+	}
+	str := &strings.Builder{}
+	n.ValueWithEscaper(str, pkgNames, defaultEscaper)
+	return str.String()
 }
 
-func (n varNinjaString) ValueWithEscaper(pkgNames map[*packageContext]string,
-	escaper *strings.Replacer) string {
+func (n varNinjaString) ValueWithEscaper(w io.StringWriter, pkgNames map[*packageContext]string,
+	escaper *strings.Replacer) {
 
-	if len(n.strings) == 1 {
-		return escaper.Replace(n.strings[0])
-	}
-
-	str := strings.Builder{}
-	str.WriteString(escaper.Replace(n.strings[0]))
+	w.WriteString(escaper.Replace(n.strings[0]))
 	for i, v := range n.variables {
-		str.WriteString("${")
-		str.WriteString(v.fullName(pkgNames))
-		str.WriteString("}")
-		str.WriteString(escaper.Replace(n.strings[i+1]))
+		w.WriteString("${")
+		w.WriteString(v.fullName(pkgNames))
+		w.WriteString("}")
+		w.WriteString(escaper.Replace(n.strings[i+1]))
 	}
-
-	return str.String()
 }
 
 func (n varNinjaString) Eval(variables map[Variable]ninjaString) (string, error) {
@@ -327,12 +326,12 @@
 }
 
 func (l literalNinjaString) Value(pkgNames map[*packageContext]string) string {
-	return l.ValueWithEscaper(pkgNames, defaultEscaper)
+	return defaultEscaper.Replace(string(l))
 }
 
-func (l literalNinjaString) ValueWithEscaper(pkgNames map[*packageContext]string,
-	escaper *strings.Replacer) string {
-	return escaper.Replace(string(l))
+func (l literalNinjaString) ValueWithEscaper(w io.StringWriter, pkgNames map[*packageContext]string,
+	escaper *strings.Replacer) {
+	w.WriteString(escaper.Replace(string(l)))
 }
 
 func (l literalNinjaString) Eval(variables map[Variable]ninjaString) (string, error) {
diff --git a/ninja_writer.go b/ninja_writer.go
index 5366f3f..f9951b4 100644
--- a/ninja_writer.go
+++ b/ninja_writer.go
@@ -15,7 +15,6 @@
 package blueprint
 
 import (
-	"fmt"
 	"io"
 	"strings"
 	"unicode"
@@ -29,13 +28,18 @@
 
 var indentString = strings.Repeat(" ", indentWidth*maxIndentDepth)
 
+type StringWriterWriter interface {
+	io.StringWriter
+	io.Writer
+}
+
 type ninjaWriter struct {
-	writer io.Writer
+	writer io.StringWriter
 
 	justDidBlankLine bool // true if the last operation was a BlankLine
 }
 
-func newNinjaWriter(writer io.Writer) *ninjaWriter {
+func newNinjaWriter(writer io.StringWriter) *ninjaWriter {
 	return &ninjaWriter{
 		writer: writer,
 	}
@@ -72,7 +76,7 @@
 
 		if writeLine {
 			line = strings.TrimSpace("# "+line) + "\n"
-			_, err := io.WriteString(n.writer, line)
+			_, err := n.writer.WriteString(line)
 			if err != nil {
 				return err
 			}
@@ -82,7 +86,15 @@
 
 	if lineStart != len(comment) {
 		line := strings.TrimSpace(comment[lineStart:])
-		_, err := fmt.Fprintf(n.writer, "# %s\n", line)
+		_, err := n.writer.WriteString("# ")
+		if err != nil {
+			return err
+		}
+		_, err = n.writer.WriteString(line)
+		if err != nil {
+			return err
+		}
+		_, err = n.writer.WriteString("\n")
 		if err != nil {
 			return err
 		}
@@ -93,25 +105,24 @@
 
 func (n *ninjaWriter) Pool(name string) error {
 	n.justDidBlankLine = false
-	_, err := fmt.Fprintf(n.writer, "pool %s\n", name)
-	return err
+	return n.writeStatement("pool", name)
 }
 
 func (n *ninjaWriter) Rule(name string) error {
 	n.justDidBlankLine = false
-	_, err := fmt.Fprintf(n.writer, "rule %s\n", name)
-	return err
+	return n.writeStatement("rule", name)
 }
 
 func (n *ninjaWriter) Build(comment string, rule string, outputs, implicitOuts,
-	explicitDeps, implicitDeps, orderOnlyDeps []string) error {
+	explicitDeps, implicitDeps, orderOnlyDeps, validations []ninjaString,
+	pkgNames map[*packageContext]string) error {
 
 	n.justDidBlankLine = false
 
 	const lineWrapLen = len(" $")
 	const maxLineLen = lineWidth - lineWrapLen
 
-	wrapper := ninjaWriterWithWrap{
+	wrapper := &ninjaWriterWithWrap{
 		ninjaWriter: n,
 		maxLineLen:  maxLineLen,
 	}
@@ -126,14 +137,16 @@
 	wrapper.WriteString("build")
 
 	for _, output := range outputs {
-		wrapper.WriteStringWithSpace(output)
+		wrapper.Space()
+		output.ValueWithEscaper(wrapper, pkgNames, outputEscaper)
 	}
 
 	if len(implicitOuts) > 0 {
 		wrapper.WriteStringWithSpace("|")
 
 		for _, out := range implicitOuts {
-			wrapper.WriteStringWithSpace(out)
+			wrapper.Space()
+			out.ValueWithEscaper(wrapper, pkgNames, outputEscaper)
 		}
 	}
 
@@ -142,14 +155,16 @@
 	wrapper.WriteStringWithSpace(rule)
 
 	for _, dep := range explicitDeps {
-		wrapper.WriteStringWithSpace(dep)
+		wrapper.Space()
+		dep.ValueWithEscaper(wrapper, pkgNames, inputEscaper)
 	}
 
 	if len(implicitDeps) > 0 {
 		wrapper.WriteStringWithSpace("|")
 
 		for _, dep := range implicitDeps {
-			wrapper.WriteStringWithSpace(dep)
+			wrapper.Space()
+			dep.ValueWithEscaper(wrapper, pkgNames, inputEscaper)
 		}
 	}
 
@@ -157,7 +172,17 @@
 		wrapper.WriteStringWithSpace("||")
 
 		for _, dep := range orderOnlyDeps {
-			wrapper.WriteStringWithSpace(dep)
+			wrapper.Space()
+			dep.ValueWithEscaper(wrapper, pkgNames, inputEscaper)
+		}
+	}
+
+	if len(validations) > 0 {
+		wrapper.WriteStringWithSpace("|@")
+
+		for _, dep := range validations {
+			wrapper.Space()
+			dep.ValueWithEscaper(wrapper, pkgNames, inputEscaper)
 		}
 	}
 
@@ -166,23 +191,57 @@
 
 func (n *ninjaWriter) Assign(name, value string) error {
 	n.justDidBlankLine = false
-	_, err := fmt.Fprintf(n.writer, "%s = %s\n", name, value)
-	return err
+	_, err := n.writer.WriteString(name)
+	if err != nil {
+		return err
+	}
+	_, err = n.writer.WriteString(" = ")
+	if err != nil {
+		return err
+	}
+	_, err = n.writer.WriteString(value)
+	if err != nil {
+		return err
+	}
+	_, err = n.writer.WriteString("\n")
+	if err != nil {
+		return err
+	}
+	return nil
 }
 
 func (n *ninjaWriter) ScopedAssign(name, value string) error {
 	n.justDidBlankLine = false
-	_, err := fmt.Fprintf(n.writer, "%s%s = %s\n", indentString[:indentWidth], name, value)
-	return err
+	_, err := n.writer.WriteString(indentString[:indentWidth])
+	if err != nil {
+		return err
+	}
+	_, err = n.writer.WriteString(name)
+	if err != nil {
+		return err
+	}
+	_, err = n.writer.WriteString(" = ")
+	if err != nil {
+		return err
+	}
+	_, err = n.writer.WriteString(value)
+	if err != nil {
+		return err
+	}
+	_, err = n.writer.WriteString("\n")
+	if err != nil {
+		return err
+	}
+	return nil
 }
 
-func (n *ninjaWriter) Default(targets ...string) error {
+func (n *ninjaWriter) Default(pkgNames map[*packageContext]string, targets ...ninjaString) error {
 	n.justDidBlankLine = false
 
 	const lineWrapLen = len(" $")
 	const maxLineLen = lineWidth - lineWrapLen
 
-	wrapper := ninjaWriterWithWrap{
+	wrapper := &ninjaWriterWithWrap{
 		ninjaWriter: n,
 		maxLineLen:  maxLineLen,
 	}
@@ -190,7 +249,8 @@
 	wrapper.WriteString("default")
 
 	for _, target := range targets {
-		wrapper.WriteString(" " + target)
+		wrapper.Space()
+		target.ValueWithEscaper(wrapper, pkgNames, outputEscaper)
 	}
 
 	return wrapper.Flush()
@@ -198,71 +258,157 @@
 
 func (n *ninjaWriter) Subninja(file string) error {
 	n.justDidBlankLine = false
-	_, err := fmt.Fprintf(n.writer, "subninja %s\n", file)
-	return err
+	return n.writeStatement("subninja", file)
 }
 
 func (n *ninjaWriter) BlankLine() (err error) {
 	// We don't output multiple blank lines in a row.
 	if !n.justDidBlankLine {
 		n.justDidBlankLine = true
-		_, err = io.WriteString(n.writer, "\n")
+		_, err = n.writer.WriteString("\n")
 	}
 	return err
 }
 
-type ninjaWriterWithWrap struct {
-	*ninjaWriter
-	maxLineLen int
-	writtenLen int
-	err        error
+func (n *ninjaWriter) writeStatement(directive, name string) error {
+	_, err := n.writer.WriteString(directive + " ")
+	if err != nil {
+		return err
+	}
+	_, err = n.writer.WriteString(name)
+	if err != nil {
+		return err
+	}
+	_, err = n.writer.WriteString("\n")
+	if err != nil {
+		return err
+	}
+	return nil
 }
 
-func (n *ninjaWriterWithWrap) writeString(s string, space bool) {
+// ninjaWriterWithWrap is an io.StringWriter that writes through to a ninjaWriter, but supports
+// user-readable line wrapping on boundaries when ninjaWriterWithWrap.Space is called.
+// It collects incoming calls to WriteString until either the line length is exceeded, in which case
+// it inserts a wrap before the pending strings and then writes them, or the next call to Space, in
+// which case it writes out the pending strings.
+//
+// WriteString never returns an error, all errors are held until Flush is called.  Once an error has
+// occurred all writes become noops.
+type ninjaWriterWithWrap struct {
+	*ninjaWriter
+	// pending lists the strings that have been written since the last call to Space.
+	pending []string
+
+	// pendingLen accumulates the lengths of the strings in pending.
+	pendingLen int
+
+	// lineLen accumulates the number of bytes on the current line.
+	lineLen int
+
+	// maxLineLen is the length of the line before wrapping.
+	maxLineLen int
+
+	// space is true if the strings in pending should be preceded by a space.
+	space bool
+
+	// err holds any error that has occurred to return in Flush.
+	err error
+}
+
+// WriteString writes the string to buffer, wrapping on a previous Space call if necessary.
+// It never returns an error, all errors are held until Flush is called.
+func (n *ninjaWriterWithWrap) WriteString(s string) (written int, noError error) {
+	// Always return the full length of the string and a nil error.
+	// ninjaWriterWithWrap doesn't return errors to the caller, it saves them until Flush()
+	written = len(s)
+
 	if n.err != nil {
 		return
 	}
 
-	spaceLen := 0
-	if space {
-		spaceLen = 1
-	}
-
-	if n.writtenLen+len(s)+spaceLen > n.maxLineLen {
-		_, n.err = io.WriteString(n.writer, " $\n")
+	const spaceLen = 1
+	if !n.space {
+		// No space is pending, so a line wrap can't be inserted before this, so just write
+		// the string.
+		n.lineLen += len(s)
+		_, n.err = n.writer.WriteString(s)
+	} else if n.lineLen+len(s)+spaceLen > n.maxLineLen {
+		// A space is pending, and the pending strings plus the current string would exceed the
+		// maximum line length.  Wrap and indent before the pending space and strings, then write
+		// the pending and current strings.
+		_, n.err = n.writer.WriteString(" $\n")
 		if n.err != nil {
 			return
 		}
-		_, n.err = io.WriteString(n.writer, indentString[:indentWidth*2])
+		_, n.err = n.writer.WriteString(indentString[:indentWidth*2])
 		if n.err != nil {
 			return
 		}
-		n.writtenLen = indentWidth * 2
+		n.lineLen = indentWidth*2 + n.pendingLen
 		s = strings.TrimLeftFunc(s, unicode.IsSpace)
-	} else if space {
-		_, n.err = io.WriteString(n.writer, " ")
+		n.pending = append(n.pending, s)
+		n.writePending()
+
+		n.space = false
+	} else {
+		// A space is pending but the current string would not reach the maximum line length,
+		// add it to the pending list.
+		n.pending = append(n.pending, s)
+		n.pendingLen += len(s)
+		n.lineLen += len(s)
+	}
+
+	return
+}
+
+// Space inserts a space that is also a possible wrapping point into the string.
+func (n *ninjaWriterWithWrap) Space() {
+	if n.err != nil {
+		return
+	}
+	if n.space {
+		// A space was already pending, and the space plus any strings written after the space did
+		// not reach the maxmimum line length, so write out the old space and pending strings.
+		_, n.err = n.writer.WriteString(" ")
+		n.lineLen++
+		n.writePending()
+	}
+	n.space = true
+}
+
+// writePending writes out all the strings stored in pending and resets it.
+func (n *ninjaWriterWithWrap) writePending() {
+	if n.err != nil {
+		return
+	}
+	for _, pending := range n.pending {
+		_, n.err = n.writer.WriteString(pending)
 		if n.err != nil {
 			return
 		}
-		n.writtenLen++
 	}
-
-	_, n.err = io.WriteString(n.writer, s)
-	n.writtenLen += len(s)
+	// Reset the length of pending back to 0 without reducing its capacity to avoid reallocating
+	// the backing array.
+	n.pending = n.pending[:0]
+	n.pendingLen = 0
 }
 
-func (n *ninjaWriterWithWrap) WriteString(s string) {
-	n.writeString(s, false)
-}
-
+// WriteStringWithSpace is a helper that calls Space and WriteString.
 func (n *ninjaWriterWithWrap) WriteStringWithSpace(s string) {
-	n.writeString(s, true)
+	n.Space()
+	_, _ = n.WriteString(s)
 }
 
+// Flush writes out any pending space or strings and then a newline.  It also returns any errors
+// that have previously occurred.
 func (n *ninjaWriterWithWrap) Flush() error {
+	if n.space {
+		_, n.err = n.writer.WriteString(" ")
+	}
+	n.writePending()
 	if n.err != nil {
 		return n.err
 	}
-	_, err := io.WriteString(n.writer, "\n")
+	_, err := n.writer.WriteString("\n")
 	return err
 }
diff --git a/ninja_writer_test.go b/ninja_writer_test.go
index cc880e5..82eeee5 100644
--- a/ninja_writer_test.go
+++ b/ninja_writer_test.go
@@ -16,6 +16,7 @@
 
 import (
 	"bytes"
+	"strings"
 	"testing"
 )
 
@@ -49,14 +50,26 @@
 	},
 	{
 		input: func(w *ninjaWriter) {
-			ck(w.Build("foo comment", "foo", []string{"o1", "o2"}, []string{"io1", "io2"},
-				[]string{"e1", "e2"}, []string{"i1", "i2"}, []string{"oo1", "oo2"}))
+			ck(w.Build("foo comment", "foo", testNinjaStrings("o1", "o2"),
+				testNinjaStrings("io1", "io2"), testNinjaStrings("e1", "e2"),
+				testNinjaStrings("i1", "i2"), testNinjaStrings("oo1", "oo2"),
+				testNinjaStrings("v1", "v2"), nil))
 		},
-		output: "# foo comment\nbuild o1 o2 | io1 io2: foo e1 e2 | i1 i2 || oo1 oo2\n",
+		output: "# foo comment\nbuild o1 o2 | io1 io2: foo e1 e2 | i1 i2 || oo1 oo2 |@ v1 v2\n",
 	},
 	{
 		input: func(w *ninjaWriter) {
-			ck(w.Default("foo"))
+			ck(w.Build("foo comment", "foo",
+				testNinjaStrings(strings.Repeat("o", lineWidth)),
+				nil,
+				testNinjaStrings(strings.Repeat("i", lineWidth)),
+				nil, nil, nil, nil))
+		},
+		output: "# foo comment\nbuild $\n        " + strings.Repeat("o", lineWidth) + ": foo $\n        " + strings.Repeat("i", lineWidth) + "\n",
+	},
+	{
+		input: func(w *ninjaWriter) {
+			ck(w.Default(nil, testNinjaStrings("foo")...))
 		},
 		output: "default foo\n",
 	},
@@ -94,7 +107,8 @@
 			ck(w.ScopedAssign("command", "echo out: $out in: $in _arg: $_arg"))
 			ck(w.ScopedAssign("pool", "p"))
 			ck(w.BlankLine())
-			ck(w.Build("r comment", "r", []string{"foo.o"}, nil, []string{"foo.in"}, nil, nil))
+			ck(w.Build("r comment", "r", testNinjaStrings("foo.o"),
+				nil, testNinjaStrings("foo.in"), nil, nil, nil, nil))
 			ck(w.ScopedAssign("_arg", "arg value"))
 		},
 		output: `pool p
@@ -124,3 +138,8 @@
 		}
 	}
 }
+
+func testNinjaStrings(s ...string) []ninjaString {
+	ret, _ := parseNinjaStrings(nil, s)
+	return ret
+}
diff --git a/package_ctx.go b/package_ctx.go
index 088239e..af78772 100644
--- a/package_ctx.go
+++ b/package_ctx.go
@@ -250,9 +250,10 @@
 }
 
 type staticVariable struct {
-	pctx   *packageContext
-	name_  string
-	value_ string
+	pctx      *packageContext
+	name_     string
+	value_    string
+	fullName_ string
 }
 
 // StaticVariable returns a Variable whose value does not depend on any
@@ -271,7 +272,11 @@
 		panic(err)
 	}
 
-	v := &staticVariable{p, name, value}
+	v := &staticVariable{
+		pctx:   p,
+		name_:  name,
+		value_: value,
+	}
 	err = p.scope.AddVariable(v)
 	if err != nil {
 		panic(err)
@@ -289,9 +294,16 @@
 }
 
 func (v *staticVariable) fullName(pkgNames map[*packageContext]string) string {
+	if v.fullName_ != "" {
+		return v.fullName_
+	}
 	return packageNamespacePrefix(pkgNames[v.pctx]) + v.name_
 }
 
+func (v *staticVariable) memoizeFullName(pkgNames map[*packageContext]string) {
+	v.fullName_ = v.fullName(pkgNames)
+}
+
 func (v *staticVariable) value(interface{}) (ninjaString, error) {
 	ninjaStr, err := parseNinjaString(v.pctx.scope, v.value_)
 	if err != nil {
@@ -306,9 +318,10 @@
 }
 
 type variableFunc struct {
-	pctx   *packageContext
-	name_  string
-	value_ func(interface{}) (string, error)
+	pctx      *packageContext
+	name_     string
+	value_    func(interface{}) (string, error)
+	fullName_ string
 }
 
 // VariableFunc returns a Variable whose value is determined by a function that
@@ -332,7 +345,11 @@
 		panic(err)
 	}
 
-	v := &variableFunc{p, name, f}
+	v := &variableFunc{
+		pctx:   p,
+		name_:  name,
+		value_: f,
+	}
 	err = p.scope.AddVariable(v)
 	if err != nil {
 		panic(err)
@@ -371,7 +388,11 @@
 		return resultStr, nil
 	}
 
-	v := &variableFunc{p, name, fun}
+	v := &variableFunc{
+		pctx:   p,
+		name_:  name,
+		value_: fun,
+	}
 	err = p.scope.AddVariable(v)
 	if err != nil {
 		panic(err)
@@ -389,9 +410,16 @@
 }
 
 func (v *variableFunc) fullName(pkgNames map[*packageContext]string) string {
+	if v.fullName_ != "" {
+		return v.fullName_
+	}
 	return packageNamespacePrefix(pkgNames[v.pctx]) + v.name_
 }
 
+func (v *variableFunc) memoizeFullName(pkgNames map[*packageContext]string) {
+	v.fullName_ = v.fullName(pkgNames)
+}
+
 func (v *variableFunc) value(config interface{}) (ninjaString, error) {
 	value, err := v.value_(config)
 	if err != nil {
@@ -452,6 +480,10 @@
 	return v.name_
 }
 
+func (v *argVariable) memoizeFullName(pkgNames map[*packageContext]string) {
+	// Nothing to do, full name is known at initialization.
+}
+
 func (v *argVariable) value(config interface{}) (ninjaString, error) {
 	return nil, errVariableIsArg
 }
@@ -461,9 +493,10 @@
 }
 
 type staticPool struct {
-	pctx   *packageContext
-	name_  string
-	params PoolParams
+	pctx      *packageContext
+	name_     string
+	params    PoolParams
+	fullName_ string
 }
 
 // StaticPool returns a Pool whose value does not depend on any configuration
@@ -483,7 +516,11 @@
 		panic(err)
 	}
 
-	pool := &staticPool{p, name, params}
+	pool := &staticPool{
+		pctx:   p,
+		name_:  name,
+		params: params,
+	}
 	err = p.scope.AddPool(pool)
 	if err != nil {
 		panic(err)
@@ -501,9 +538,16 @@
 }
 
 func (p *staticPool) fullName(pkgNames map[*packageContext]string) string {
+	if p.fullName_ != "" {
+		return p.fullName_
+	}
 	return packageNamespacePrefix(pkgNames[p.pctx]) + p.name_
 }
 
+func (p *staticPool) memoizeFullName(pkgNames map[*packageContext]string) {
+	p.fullName_ = p.fullName(pkgNames)
+}
+
 func (p *staticPool) def(config interface{}) (*poolDef, error) {
 	def, err := parsePoolParams(p.pctx.scope, &p.params)
 	if err != nil {
@@ -520,6 +564,7 @@
 	pctx       *packageContext
 	name_      string
 	paramsFunc func(interface{}) (PoolParams, error)
+	fullName_  string
 }
 
 // PoolFunc returns a Pool whose value is determined by a function that takes a
@@ -542,7 +587,11 @@
 		panic(err)
 	}
 
-	pool := &poolFunc{p, name, f}
+	pool := &poolFunc{
+		pctx:       p,
+		name_:      name,
+		paramsFunc: f,
+	}
 	err = p.scope.AddPool(pool)
 	if err != nil {
 		panic(err)
@@ -560,9 +609,16 @@
 }
 
 func (p *poolFunc) fullName(pkgNames map[*packageContext]string) string {
+	if p.fullName_ != "" {
+		return p.fullName_
+	}
 	return packageNamespacePrefix(pkgNames[p.pctx]) + p.name_
 }
 
+func (p *poolFunc) memoizeFullName(pkgNames map[*packageContext]string) {
+	p.fullName_ = p.fullName(pkgNames)
+}
+
 func (p *poolFunc) def(config interface{}) (*poolDef, error) {
 	params, err := p.paramsFunc(config)
 	if err != nil {
@@ -595,6 +651,10 @@
 	return p.name_
 }
 
+func (p *builtinPool) memoizeFullName(pkgNames map[*packageContext]string) {
+	// Nothing to do, full name is known at initialization.
+}
+
 func (p *builtinPool) def(config interface{}) (*poolDef, error) {
 	return nil, errPoolIsBuiltin
 }
@@ -616,6 +676,7 @@
 	params     RuleParams
 	argNames   map[string]bool
 	scope_     *basicScope
+	fullName_  string
 	sync.Mutex // protects scope_ during lazy creation
 }
 
@@ -683,9 +744,16 @@
 }
 
 func (r *staticRule) fullName(pkgNames map[*packageContext]string) string {
+	if r.fullName_ != "" {
+		return r.fullName_
+	}
 	return packageNamespacePrefix(pkgNames[r.pctx]) + r.name_
 }
 
+func (r *staticRule) memoizeFullName(pkgNames map[*packageContext]string) {
+	r.fullName_ = r.fullName(pkgNames)
+}
+
 func (r *staticRule) def(interface{}) (*ruleDef, error) {
 	def, err := parseRuleParams(r.scope(), &r.params)
 	if err != nil {
@@ -721,6 +789,7 @@
 	paramsFunc func(interface{}) (RuleParams, error)
 	argNames   map[string]bool
 	scope_     *basicScope
+	fullName_  string
 	sync.Mutex // protects scope_ during lazy creation
 }
 
@@ -789,9 +858,16 @@
 }
 
 func (r *ruleFunc) fullName(pkgNames map[*packageContext]string) string {
+	if r.fullName_ != "" {
+		return r.fullName_
+	}
 	return packageNamespacePrefix(pkgNames[r.pctx]) + r.name_
 }
 
+func (r *ruleFunc) memoizeFullName(pkgNames map[*packageContext]string) {
+	r.fullName_ = r.fullName(pkgNames)
+}
+
 func (r *ruleFunc) def(config interface{}) (*ruleDef, error) {
 	params, err := r.paramsFunc(config)
 	if err != nil {
@@ -843,6 +919,10 @@
 	return r.name_
 }
 
+func (r *builtinRule) memoizeFullName(pkgNames map[*packageContext]string) {
+	// Nothing to do, full name is known at initialization.
+}
+
 func (r *builtinRule) def(config interface{}) (*ruleDef, error) {
 	return nil, errRuleIsBuiltin
 }
diff --git a/parser/sort.go b/parser/sort.go
index da594db..0379d45 100644
--- a/parser/sort.go
+++ b/parser/sort.go
@@ -15,10 +15,105 @@
 package parser
 
 import (
+	"fmt"
 	"sort"
+	"strconv"
+	"strings"
 	"text/scanner"
 )
 
+// numericStringLess compares two strings, returning a lexicographical comparison unless the first
+// difference occurs in a sequence of 1 or more numeric characters, in which case it returns the
+// numerical comparison of the two numbers.
+func numericStringLess(a, b string) bool {
+	isNumeric := func(r rune) bool { return r >= '0' && r <= '9' }
+	isNotNumeric := func(r rune) bool { return !isNumeric(r) }
+
+	minLength := len(a)
+	if len(b) < minLength {
+		minLength = len(b)
+	}
+
+	byteIndex := 0
+	numberStartIndex := -1
+
+	var aByte, bByte byte
+
+	// Start with a byte comparison to find where the strings differ.
+	for ; byteIndex < minLength; byteIndex++ {
+		aByte, bByte = a[byteIndex], b[byteIndex]
+		if aByte != bByte {
+			break
+		}
+		byteIsNumeric := isNumeric(rune(aByte))
+		if numberStartIndex != -1 && !byteIsNumeric {
+			numberStartIndex = -1
+		} else if numberStartIndex == -1 && byteIsNumeric {
+			numberStartIndex = byteIndex
+		}
+	}
+
+	// Handle the case where we reached the end of one or both strings without finding a difference.
+	if byteIndex == minLength {
+		if len(a) < len(b) {
+			// Reached the end of a.  a is a prefix of b.
+			return true
+		} else {
+			// Reached the end of b.  b is a prefix of a or b is equal to a.
+			return false
+		}
+	}
+
+	aByteNumeric := isNumeric(rune(aByte))
+	bByteNumeric := isNumeric(rune(bByte))
+
+	if (aByteNumeric || bByteNumeric) && !(aByteNumeric && bByteNumeric) && numberStartIndex != -1 {
+		// Only one of aByte and bByte is a number, but the previous byte was a number.  That means
+		// one is a longer number with the same prefix, which must be numerically larger.  If bByte
+		// is a number then the number in b is numerically larger than the number in a.
+		return bByteNumeric
+	}
+
+	// If the bytes are both numbers do a numeric comparison.
+	if aByteNumeric && bByteNumeric {
+		// Extract the numbers from each string, starting from the first number after the last
+		// non-number.  This won't be invalid utf8 because we are only looking for the bytes
+		//'0'-'9', which can only occur as single-byte runes in utf8.
+		if numberStartIndex == -1 {
+			numberStartIndex = byteIndex
+		}
+		aNumberString := a[numberStartIndex:]
+		bNumberString := b[numberStartIndex:]
+
+		// Find the first non-number in each, using the full length if there isn't one.
+		endANumbers := strings.IndexFunc(aNumberString, isNotNumeric)
+		endBNumbers := strings.IndexFunc(bNumberString, isNotNumeric)
+		if endANumbers == -1 {
+			endANumbers = len(aNumberString)
+		}
+		if endBNumbers == -1 {
+			endBNumbers = len(bNumberString)
+		}
+
+		// Convert each to an int.
+		aNumber, err := strconv.Atoi(aNumberString[:endANumbers])
+		if err != nil {
+			panic(fmt.Errorf("failed to convert %q from %q to number: %w",
+				aNumberString[:endANumbers], a, err))
+		}
+		bNumber, err := strconv.Atoi(bNumberString[:endBNumbers])
+		if err != nil {
+			panic(fmt.Errorf("failed to convert %q from %q to number: %w",
+				bNumberString[:endBNumbers], b, err))
+		}
+		// Do a numeric comparison.
+		return aNumber < bNumber
+	}
+
+	// At least one is not a number, do a byte comparison.
+	return aByte < bByte
+}
+
 func SortLists(file *File) {
 	for _, def := range file.Defs {
 		if assignment, ok := def.(*Assignment); ok {
@@ -97,7 +192,7 @@
 	if !isListOfPrimitives(values) {
 		return
 	}
-	l := make(elemList, len(values))
+	l := make([]elem, len(values))
 	for i, v := range values {
 		s, ok := v.(*String)
 		if !ok {
@@ -110,7 +205,9 @@
 		l[i] = elem{s.Value, i, v.Pos(), n}
 	}
 
-	sort.Sort(l)
+	sort.SliceStable(l, func(i, j int) bool {
+		return numericStringLess(l[i].s, l[j].s)
+	})
 
 	copyValues := append([]Expression{}, values...)
 	copyComments := make([]*CommentGroup, len(file.Comments))
@@ -150,7 +247,7 @@
 		if !ok {
 			panic("list contains non-string element")
 		}
-		if prev > s.Value {
+		if prev != "" && numericStringLess(s.Value, prev) {
 			return false
 		}
 		prev = s.Value
@@ -166,20 +263,6 @@
 	nextPos scanner.Position
 }
 
-type elemList []elem
-
-func (l elemList) Len() int {
-	return len(l)
-}
-
-func (l elemList) Swap(i, j int) {
-	l[i], l[j] = l[j], l[i]
-}
-
-func (l elemList) Less(i, j int) bool {
-	return l[i].s < l[j].s
-}
-
 type commentsByOffset []*CommentGroup
 
 func (l commentsByOffset) Len() int {
diff --git a/parser/sort_test.go b/parser/sort_test.go
new file mode 100644
index 0000000..0a9e7fc
--- /dev/null
+++ b/parser/sort_test.go
@@ -0,0 +1,96 @@
+// Copyright 2021 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import "testing"
+
+func Test_numericStringLess(t *testing.T) {
+	type args struct {
+		a string
+		b string
+	}
+	tests := []struct {
+		a, b string
+	}{
+		{"a", "b"},
+		{"aa", "ab"},
+		{"aaa", "aba"},
+
+		{"1", "2"},
+		{"1", "11"},
+		{"2", "11"},
+		{"1", "12"},
+
+		{"12", "101"},
+		{"11", "102"},
+
+		{"0", "1"},
+		{"0", "01"},
+		{"1", "02"},
+		{"01", "002"},
+		{"001", "02"},
+	}
+
+	oneTest := func(a, b string, want bool) {
+		t.Helper()
+		if got := numericStringLess(a, b); got != want {
+			t.Errorf("want numericStringLess(%v, %v) = %v, got %v", a, b, want, got)
+		}
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.a+"<"+tt.b, func(t *testing.T) {
+			// a should be less than b
+			oneTest(tt.a, tt.b, true)
+			// b should not be less than a
+			oneTest(tt.b, tt.a, false)
+			// a should not be less than a
+			oneTest(tt.a, tt.a, false)
+			// b should not be less than b
+			oneTest(tt.b, tt.b, false)
+
+			// The same should be true both strings are prefixed with an "a"
+			oneTest("a"+tt.a, "a"+tt.b, true)
+			oneTest("a"+tt.b, "a"+tt.a, false)
+			oneTest("a"+tt.a, "a"+tt.a, false)
+			oneTest("a"+tt.b, "a"+tt.b, false)
+
+			// The same should be true both strings are suffixed with an "a"
+			oneTest(tt.a+"a", tt.b+"a", true)
+			oneTest(tt.b+"a", tt.a+"a", false)
+			oneTest(tt.a+"a", tt.a+"a", false)
+			oneTest(tt.b+"a", tt.b+"a", false)
+
+			// The same should be true both strings are suffixed with a "1"
+			oneTest(tt.a+"1", tt.b+"1", true)
+			oneTest(tt.b+"1", tt.a+"1", false)
+			oneTest(tt.a+"1", tt.a+"1", false)
+			oneTest(tt.b+"1", tt.b+"1", false)
+
+			// The same should be true both strings are prefixed with a "0"
+			oneTest("0"+tt.a, "0"+tt.b, true)
+			oneTest("0"+tt.b, "0"+tt.a, false)
+			oneTest("0"+tt.a, "0"+tt.a, false)
+			oneTest("0"+tt.b, "0"+tt.b, false)
+
+			// The same should be true both strings are suffixed with a "0"
+			oneTest(tt.a+"0", tt.b+"0", true)
+			oneTest(tt.b+"0", tt.a+"0", false)
+			oneTest(tt.a+"0", tt.a+"0", false)
+			oneTest(tt.b+"0", tt.b+"0", false)
+
+		})
+	}
+}
diff --git a/pathtools/fs.go b/pathtools/fs.go
index 21754d0..806f466 100644
--- a/pathtools/fs.go
+++ b/pathtools/fs.go
@@ -95,7 +95,7 @@
 	// Exists returns whether the file exists and whether it is a directory.  Follows symlinks.
 	Exists(name string) (bool, bool, error)
 
-	Glob(pattern string, excludes []string, follow ShouldFollowSymlinks) (matches, dirs []string, err error)
+	Glob(pattern string, excludes []string, follow ShouldFollowSymlinks) (GlobResult, error)
 	glob(pattern string) (matches []string, err error)
 
 	// IsDir returns true if the path points to a directory, false it it points to a file.  Follows symlinks.
@@ -194,7 +194,7 @@
 	}
 }
 
-func (fs *osFs) Glob(pattern string, excludes []string, follow ShouldFollowSymlinks) (matches, dirs []string, err error) {
+func (fs *osFs) Glob(pattern string, excludes []string, follow ShouldFollowSymlinks) (GlobResult, error) {
 	return startGlob(fs, pattern, excludes, follow)
 }
 
@@ -346,7 +346,7 @@
 	return false, os.ErrNotExist
 }
 
-func (m *mockFs) Glob(pattern string, excludes []string, follow ShouldFollowSymlinks) (matches, dirs []string, err error) {
+func (m *mockFs) Glob(pattern string, excludes []string, follow ShouldFollowSymlinks) (GlobResult, error) {
 	return startGlob(m, pattern, excludes, follow)
 }
 
@@ -538,10 +538,22 @@
 			continue
 		}
 		f = filepath.Join(name, f)
-		if isSymlink, _ := fs.IsSymlink(f); isSymlink && follow == DontFollowSymlinks {
-			continue
+		var info os.FileInfo
+		if follow == DontFollowSymlinks {
+			info, err = fs.Lstat(f)
+			if err != nil {
+				continue
+			}
+			if info.Mode()&os.ModeSymlink != 0 {
+				continue
+			}
+		} else {
+			info, err = fs.Stat(f)
+			if err != nil {
+				continue
+			}
 		}
-		if isDir, _ := fs.IsDir(f); isDir {
+		if info.IsDir() {
 			dirs = append(dirs, f)
 			subDirs, err := listDirsRecursiveRelative(fs, f, follow, depth)
 			if err != nil {
diff --git a/pathtools/glob.go b/pathtools/glob.go
index 727b725..14cdacf 100644
--- a/pathtools/glob.go
+++ b/pathtools/glob.go
@@ -15,20 +15,69 @@
 package pathtools
 
 import (
+	"encoding/json"
 	"errors"
 	"fmt"
 	"io/ioutil"
 	"os"
 	"path/filepath"
 	"strings"
-
-	"github.com/google/blueprint/deptools"
 )
 
+// BPGlobArgumentVersion is used to abort argument parsing early when the bpglob argument format
+// has changed but soong_build hasn't had a chance to rerun yet to update build-globs.ninja.
+// Increment it manually when changing the bpglob argument format.  It is located here because
+// pathtools is the only package that is shared between bpglob and bootstrap.
+const BPGlobArgumentVersion = 2
+
 var GlobMultipleRecursiveErr = errors.New("pattern contains multiple '**'")
 var GlobLastRecursiveErr = errors.New("pattern has '**' as last path element")
 var GlobInvalidRecursiveErr = errors.New("pattern contains other characters between '**' and path separator")
 
+// GlobResult is a container holding the results of a call to Glob.
+type GlobResult struct {
+	// Pattern is the pattern that was passed to Glob.
+	Pattern string
+	// Excludes is the list of excludes that were passed to Glob.
+	Excludes []string
+
+	// Matches is the list of files or directories that matched the pattern but not the excludes.
+	Matches []string
+
+	// Deps is the list of files or directories that must be depended on to regenerate the glob.
+	Deps []string
+}
+
+// FileList returns the list of files matched by a glob for writing to an output file.
+func (result GlobResult) FileList() []byte {
+	return []byte(strings.Join(result.Matches, "\n") + "\n")
+}
+
+// MultipleGlobResults is a list of GlobResult structs.
+type MultipleGlobResults []GlobResult
+
+// FileList returns the list of files matched by a list of multiple globs for writing to an output file.
+func (results MultipleGlobResults) FileList() []byte {
+	multipleMatches := make([][]string, len(results))
+	for i, result := range results {
+		multipleMatches[i] = result.Matches
+	}
+	buf, err := json.Marshal(multipleMatches)
+	if err != nil {
+		panic(fmt.Errorf("failed to marshal glob results to json: %w", err))
+	}
+	return buf
+}
+
+// Deps returns the deps from all of the GlobResults.
+func (results MultipleGlobResults) Deps() []string {
+	var deps []string
+	for _, result := range results {
+		deps = append(deps, result.Deps...)
+	}
+	return deps
+}
+
 // Glob returns the list of files and directories that match the given pattern
 // but do not match the given exclude patterns, along with the list of
 // directories and other dependencies that were searched to construct the file
@@ -40,26 +89,26 @@
 // In general ModuleContext.GlobWithDeps or SingletonContext.GlobWithDeps
 // should be used instead, as they will automatically set up dependencies
 // to rerun the primary builder when the list of matching files changes.
-func Glob(pattern string, excludes []string, follow ShouldFollowSymlinks) (matches, deps []string, err error) {
+func Glob(pattern string, excludes []string, follow ShouldFollowSymlinks) (GlobResult, error) {
 	return startGlob(OsFs, pattern, excludes, follow)
 }
 
 func startGlob(fs FileSystem, pattern string, excludes []string,
-	follow ShouldFollowSymlinks) (matches, deps []string, err error) {
+	follow ShouldFollowSymlinks) (GlobResult, error) {
 
 	if filepath.Base(pattern) == "**" {
-		return nil, nil, GlobLastRecursiveErr
-	} else {
-		matches, deps, err = glob(fs, pattern, false, follow)
+		return GlobResult{}, GlobLastRecursiveErr
 	}
 
+	matches, deps, err := glob(fs, pattern, false, follow)
+
 	if err != nil {
-		return nil, nil, err
+		return GlobResult{}, err
 	}
 
 	matches, err = filterExcludes(matches, excludes)
 	if err != nil {
-		return nil, nil, err
+		return GlobResult{}, err
 	}
 
 	// If the pattern has wildcards, we added dependencies on the
@@ -76,28 +125,27 @@
 	}
 
 	for i, match := range matches {
-		isSymlink, err := fs.IsSymlink(match)
-		if err != nil {
-			return nil, nil, err
+		var info os.FileInfo
+		if follow == DontFollowSymlinks {
+			info, err = fs.Lstat(match)
+		} else {
+			info, err = fs.Stat(match)
 		}
-		if !(isSymlink && follow == DontFollowSymlinks) {
-			isDir, err := fs.IsDir(match)
-			if os.IsNotExist(err) {
-				if isSymlink {
-					return nil, nil, fmt.Errorf("%s: dangling symlink", match)
-				}
-			}
-			if err != nil {
-				return nil, nil, fmt.Errorf("%s: %s", match, err.Error())
-			}
+		if err != nil {
+			return GlobResult{}, err
+		}
 
-			if isDir {
-				matches[i] = match + "/"
-			}
+		if info.IsDir() {
+			matches[i] = match + "/"
 		}
 	}
 
-	return matches, deps, nil
+	return GlobResult{
+		Pattern:  pattern,
+		Excludes: excludes,
+		Matches:  matches,
+		Deps:     deps,
+	}, nil
 }
 
 // glob is a recursive helper function to handle globbing each level of the pattern individually,
@@ -304,30 +352,6 @@
 	}
 }
 
-func GlobPatternList(patterns []string, prefix string) (globedList []string, depDirs []string, err error) {
-	var (
-		matches []string
-		deps    []string
-	)
-
-	globedList = make([]string, 0)
-	depDirs = make([]string, 0)
-
-	for _, pattern := range patterns {
-		if isWild(pattern) {
-			matches, deps, err = Glob(filepath.Join(prefix, pattern), nil, FollowSymlinks)
-			if err != nil {
-				return nil, nil, err
-			}
-			globedList = append(globedList, matches...)
-			depDirs = append(depDirs, deps...)
-		} else {
-			globedList = append(globedList, filepath.Join(prefix, pattern))
-		}
-	}
-	return globedList, depDirs, nil
-}
-
 // IsGlob returns true if the pattern contains any glob characters (*, ?, or [).
 func IsGlob(pattern string) bool {
 	return strings.IndexAny(pattern, "*?[") >= 0
@@ -344,34 +368,6 @@
 	return false
 }
 
-// GlobWithDepFile finds all files and directories that match glob.  Directories
-// will have a trailing '/'.  It compares the list of matches against the
-// contents of fileListFile, and rewrites fileListFile if it has changed.  It
-// also writes all of the the directories it traversed as dependencies on
-// fileListFile to depFile.
-//
-// The format of glob is either path/*.ext for a single directory glob, or
-// path/**/*.ext for a recursive glob.
-//
-// Returns a list of file paths, and an error.
-//
-// In general ModuleContext.GlobWithDeps or SingletonContext.GlobWithDeps
-// should be used instead, as they will automatically set up dependencies
-// to rerun the primary builder when the list of matching files changes.
-func GlobWithDepFile(glob, fileListFile, depFile string, excludes []string) (files []string, err error) {
-	files, deps, err := Glob(glob, excludes, FollowSymlinks)
-	if err != nil {
-		return nil, err
-	}
-
-	fileList := strings.Join(files, "\n") + "\n"
-
-	WriteFileIfChanged(fileListFile, []byte(fileList), 0666)
-	deptools.WriteDepFile(depFile, fileListFile, deps)
-
-	return
-}
-
 // WriteFileIfChanged wraps ioutil.WriteFile, but only writes the file if
 // the files does not already exist with identical contents.  This can be used
 // along with ninja restat rules to skip rebuilding downstream rules if no
diff --git a/pathtools/glob_test.go b/pathtools/glob_test.go
index a3a36ff..d847bad 100644
--- a/pathtools/glob_test.go
+++ b/pathtools/glob_test.go
@@ -723,7 +723,7 @@
 
 func testGlob(t *testing.T, fs FileSystem, testCase globTestCase, follow ShouldFollowSymlinks) {
 	t.Helper()
-	matches, deps, err := fs.Glob(testCase.pattern, testCase.excludes, follow)
+	result, err := fs.Glob(testCase.pattern, testCase.excludes, follow)
 	if err != testCase.err {
 		if err == nil {
 			t.Fatalf("missing error: %s", testCase.err)
@@ -733,22 +733,22 @@
 		return
 	}
 
-	if !reflect.DeepEqual(matches, testCase.matches) {
+	if !reflect.DeepEqual(result.Matches, testCase.matches) {
 		t.Errorf("incorrect matches list:")
 		t.Errorf(" pattern: %q", testCase.pattern)
 		if testCase.excludes != nil {
 			t.Errorf("excludes: %q", testCase.excludes)
 		}
-		t.Errorf("     got: %#v", matches)
+		t.Errorf("     got: %#v", result.Matches)
 		t.Errorf("expected: %#v", testCase.matches)
 	}
-	if !reflect.DeepEqual(deps, testCase.deps) {
+	if !reflect.DeepEqual(result.Deps, testCase.deps) {
 		t.Errorf("incorrect deps list:")
 		t.Errorf(" pattern: %q", testCase.pattern)
 		if testCase.excludes != nil {
 			t.Errorf("excludes: %q", testCase.excludes)
 		}
-		t.Errorf("     got: %#v", deps)
+		t.Errorf("     got: %#v", result.Deps)
 		t.Errorf("expected: %#v", testCase.deps)
 	}
 }
@@ -904,14 +904,14 @@
 
 			mock := MockFs(mockFiles)
 
-			matches, _, err := mock.Glob(test.pattern, nil, DontFollowSymlinks)
-			t.Log(test.name, test.pattern, matches)
+			result, err := mock.Glob(test.pattern, nil, DontFollowSymlinks)
+			t.Log(test.name, test.pattern, result.Matches)
 			if err != nil {
 				t.Fatal(err)
 			}
 
 			match := false
-			for _, x := range matches {
+			for _, x := range result.Matches {
 				if x == test.name {
 					match = true
 				}
diff --git a/pathtools/lists.go b/pathtools/lists.go
index fbde88a..e1838b3 100644
--- a/pathtools/lists.go
+++ b/pathtools/lists.go
@@ -38,10 +38,12 @@
 	return result
 }
 
+// ReplaceExtension changes the file extension. If the file does not have an
+// extension, the new extension is appended.
 func ReplaceExtension(path string, extension string) string {
-	dot := strings.LastIndex(path, ".")
-	if dot == -1 {
-		return path
+	oldExt := filepath.Ext(path)
+	if oldExt != "" {
+		path = strings.TrimSuffix(path, oldExt)
 	}
-	return path[:dot+1] + extension
+	return path + "." + extension
 }
diff --git a/pathtools/lists_test.go b/pathtools/lists_test.go
new file mode 100644
index 0000000..cce8786
--- /dev/null
+++ b/pathtools/lists_test.go
@@ -0,0 +1,41 @@
+// Copyright 2021 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package pathtools
+
+import (
+	"testing"
+)
+
+func TestLists_ReplaceExtension(t *testing.T) {
+
+	testCases := []struct {
+		from, ext, to string
+	}{
+		{"1.jpg", "png", "1.png"},
+		{"1", "png", "1.png"},
+		{"1.", "png", "1.png"},
+		{"2.so", "so.1", "2.so.1"},
+		{"/out/.test/1.png", "jpg", "/out/.test/1.jpg"},
+		{"/out/.test/1", "jpg", "/out/.test/1.jpg"},
+	}
+
+	for _, test := range testCases {
+		t.Run(test.from, func(t *testing.T) {
+			got := ReplaceExtension(test.from, test.ext)
+			if got != test.to {
+				t.Errorf("ReplaceExtension(%v, %v) = %v; want: %v", test.from, test.ext, got, test.to)
+			}
+		})
+	}
+}
diff --git a/proptools/escape.go b/proptools/escape.go
index e7f0456..b8790b5 100644
--- a/proptools/escape.go
+++ b/proptools/escape.go
@@ -56,30 +56,44 @@
 
 }
 
-// ShellEscapeList takes string that may contain characters that are meaningful to bash and
+func shellUnsafeChar(r rune) bool {
+	switch {
+	case 'A' <= r && r <= 'Z',
+		'a' <= r && r <= 'z',
+		'0' <= r && r <= '9',
+		r == '_',
+		r == '+',
+		r == '-',
+		r == '=',
+		r == '.',
+		r == ',',
+		r == '/':
+		return false
+	default:
+		return true
+	}
+}
+
+// ShellEscape takes string that may contain characters that are meaningful to bash and
 // escapes it if necessary by wrapping it in single quotes, and replacing internal single quotes with
 // '\'' (one single quote to end the quoting, a shell-escaped single quote to insert a real single
 // quote, and then a single quote to restarting quoting.
 func ShellEscape(s string) string {
-	shellUnsafeChar := func(r rune) bool {
-		switch {
-		case 'A' <= r && r <= 'Z',
-			'a' <= r && r <= 'z',
-			'0' <= r && r <= '9',
-			r == '_',
-			r == '+',
-			r == '-',
-			r == '=',
-			r == '.',
-			r == ',',
-			r == '/',
-			r == ' ':
-			return false
-		default:
-			return true
-		}
+	shellUnsafeCharNotSpace := func(r rune) bool {
+		return r != ' ' && shellUnsafeChar(r)
 	}
 
+	if strings.IndexFunc(s, shellUnsafeCharNotSpace) == -1 {
+		// No escaping necessary
+		return s
+	}
+
+	return `'` + singleQuoteReplacer.Replace(s) + `'`
+}
+
+// ShellEscapeIncludingSpaces escapes the input `s` in a similar way to ShellEscape except that
+// this treats spaces as meaningful characters.
+func ShellEscapeIncludingSpaces(s string) string {
 	if strings.IndexFunc(s, shellUnsafeChar) == -1 {
 		// No escaping necessary
 		return s
diff --git a/proptools/escape_test.go b/proptools/escape_test.go
index 633d711..5823a05 100644
--- a/proptools/escape_test.go
+++ b/proptools/escape_test.go
@@ -91,6 +91,24 @@
 	},
 }
 
+var shellEscapeIncludingSpacesTestCase = []escapeTestCase{
+	{
+		name: "no escaping",
+		in:   `test`,
+		out:  `test`,
+	},
+	{
+		name: "spacing",
+		in:   `arg1 arg2`,
+		out:  `'arg1 arg2'`,
+	},
+	{
+		name: "single quote",
+		in:   `'arg'`,
+		out:  `''\''arg'\'''`,
+	},
+}
+
 func TestNinjaEscaping(t *testing.T) {
 	for _, testCase := range ninjaEscapeTestCase {
 		got := NinjaEscape(testCase.in)
@@ -109,6 +127,15 @@
 	}
 }
 
+func TestShellEscapeIncludingSpaces(t *testing.T) {
+	for _, testCase := range shellEscapeIncludingSpacesTestCase {
+		got := ShellEscapeIncludingSpaces(testCase.in)
+		if got != testCase.out {
+			t.Errorf("%s: expected `%s` got `%s`", testCase.name, testCase.out, got)
+		}
+	}
+}
+
 func TestExternalShellEscaping(t *testing.T) {
 	if testing.Short() {
 		return
@@ -124,3 +151,19 @@
 		}
 	}
 }
+
+func TestExternalShellEscapeIncludingSpaces(t *testing.T) {
+	if testing.Short() {
+		return
+	}
+	for _, testCase := range shellEscapeIncludingSpacesTestCase {
+		cmd := "echo -n " + ShellEscapeIncludingSpaces(testCase.in)
+		got, err := exec.Command("/bin/sh", "-c", cmd).Output()
+		if err != nil {
+			t.Error(err)
+		}
+		if string(got) != testCase.in {
+			t.Errorf("%s: expected `%s` got `%s`", testCase.name, testCase.in, got)
+		}
+	}
+}
diff --git a/proptools/filter.go b/proptools/filter.go
index e6b3336..54a20d5 100644
--- a/proptools/filter.go
+++ b/proptools/filter.go
@@ -173,7 +173,13 @@
 		return nil, true
 	}
 
-	if !filtered {
+	// If the predicate selected all fields in the structure then it is generally better to reuse the
+	// original type as it avoids the footprint of creating another type. Also, if the original type
+	// is a named type then it will reduce the size of any structs the caller may create that include
+	// fields of this type. However, the original type should only be reused if it does not exceed
+	// maxNameSize. That is, of course, more likely for an anonymous type than a named one but this
+	// treats them the same.
+	if !filtered && (maxNameSize < 0 || len(prop.String()) < maxNameSize) {
 		if ptr {
 			return []reflect.Type{reflect.PtrTo(prop)}, false
 		}
diff --git a/proptools/filter_test.go b/proptools/filter_test.go
index 0ea04bb..1c27cb2 100644
--- a/proptools/filter_test.go
+++ b/proptools/filter_test.go
@@ -240,6 +240,12 @@
 }
 
 func TestFilterPropertyStructSharded(t *testing.T) {
+	type KeepAllWithAReallyLongNameThatExceedsTheMaxNameSize struct {
+		A *string `keep:"true"`
+		B *string `keep:"true"`
+		C *string `keep:"true"`
+	}
+
 	tests := []struct {
 		name        string
 		maxNameSize int
@@ -266,6 +272,44 @@
 			},
 			filtered: true,
 		},
+		{
+			name:        "anonymous where all match but still needs sharding",
+			maxNameSize: 20,
+			in: &struct {
+				A *string `keep:"true"`
+				B *string `keep:"true"`
+				C *string `keep:"true"`
+			}{},
+			out: []interface{}{
+				&struct {
+					A *string
+				}{},
+				&struct {
+					B *string
+				}{},
+				&struct {
+					C *string
+				}{},
+			},
+			filtered: true,
+		},
+		{
+			name:        "named where all match but still needs sharding",
+			maxNameSize: 20,
+			in:          &KeepAllWithAReallyLongNameThatExceedsTheMaxNameSize{},
+			out: []interface{}{
+				&struct {
+					A *string
+				}{},
+				&struct {
+					B *string
+				}{},
+				&struct {
+					C *string
+				}{},
+			},
+			filtered: true,
+		},
 	}
 
 	for _, test := range tests {
diff --git a/proptools/proptools.go b/proptools/proptools.go
index 2aa6e32..1da3ba4 100644
--- a/proptools/proptools.go
+++ b/proptools/proptools.go
@@ -125,3 +125,7 @@
 func isSlice(t reflect.Type) bool {
 	return t.Kind() == reflect.Slice
 }
+
+func isSliceOfStruct(t reflect.Type) bool {
+	return isSlice(t) && isStruct(t.Elem())
+}
diff --git a/proptools/tag.go b/proptools/tag.go
index d69853a..b078894 100644
--- a/proptools/tag.go
+++ b/proptools/tag.go
@@ -23,10 +23,17 @@
 // HasTag returns true if a StructField has a tag in the form `name:"foo,value"`.
 func HasTag(field reflect.StructField, name, value string) bool {
 	tag := field.Tag.Get(name)
-	for _, entry := range strings.Split(tag, ",") {
-		if entry == value {
+	for len(tag) > 0 {
+		idx := strings.Index(tag, ",")
+
+		if idx < 0 {
+			return tag == value
+		}
+		if tag[:idx] == value {
 			return true
 		}
+
+		tag = tag[idx+1:]
 	}
 
 	return false
@@ -49,8 +56,8 @@
 	for i := 0; i < t.NumField(); i++ {
 		field := t.Field(i)
 		ft := field.Type
-		if isStruct(ft) || isStructPtr(ft) {
-			if ft.Kind() == reflect.Ptr {
+		if isStruct(ft) || isStructPtr(ft) || isSliceOfStruct(ft) {
+			if ft.Kind() == reflect.Ptr || ft.Kind() == reflect.Slice {
 				ft = ft.Elem()
 			}
 			subIndexes := propertyIndexesWithTag(ft, key, value)
diff --git a/proptools/tag_test.go b/proptools/tag_test.go
index 0041c54..d466859 100644
--- a/proptools/tag_test.go
+++ b/proptools/tag_test.go
@@ -19,16 +19,16 @@
 	"testing"
 )
 
-func TestHasTag(t *testing.T) {
-	type testType struct {
-		NoTag       string
-		EmptyTag    string ``
-		OtherTag    string `foo:"bar"`
-		MatchingTag string `name:"value"`
-		ExtraValues string `name:"foo,value,bar"`
-		ExtraTags   string `foo:"bar" name:"value"`
-	}
+type testType struct {
+	NoTag       string
+	EmptyTag    string ``
+	OtherTag    string `foo:"bar"`
+	MatchingTag string `name:"value"`
+	ExtraValues string `name:"foo,value,bar"`
+	ExtraTags   string `foo:"bar" name:"value"`
+}
 
+func TestHasTag(t *testing.T) {
 	tests := []struct {
 		field string
 		want  bool
@@ -68,6 +68,39 @@
 	}
 }
 
+func BenchmarkHasTag(b *testing.B) {
+	tests := []struct {
+		field string
+	}{
+		{
+			field: "NoTag",
+		},
+		{
+			field: "EmptyTag",
+		},
+		{
+			field: "OtherTag",
+		},
+		{
+			field: "MatchingTag",
+		},
+		{
+			field: "ExtraValues",
+		},
+		{
+			field: "ExtraTags",
+		},
+	}
+	for _, test := range tests {
+		b.Run(test.field, func(b *testing.B) {
+			field, _ := reflect.TypeOf(testType{}).FieldByName(test.field)
+			for i := 0; i < b.N; i++ {
+				HasTag(field, "name", "value")
+			}
+		})
+	}
+}
+
 func TestPropertyIndexesWithTag(t *testing.T) {
 	tests := []struct {
 		name string
@@ -123,6 +156,31 @@
 			want: [][]int{{0, 0}},
 		},
 		{
+			name: "slice of struct",
+			ps: &struct {
+				Other int
+				Foo   []struct {
+					Other int
+					Bar   string `name:"value"`
+				}
+			}{},
+			want: [][]int{{1, 1}},
+		},
+		{
+			name: "slice^2 of struct",
+			ps: &struct {
+				Other int
+				Foo   []struct {
+					Other int
+					Bar   []struct {
+						Other int
+						Baz   string `name:"value"`
+					}
+				}
+			}{},
+			want: [][]int{{1, 1, 1}},
+		},
+		{
 			name: "nil",
 			ps: (*struct {
 				Foo string `name:"value"`
diff --git a/proptools/unpack.go b/proptools/unpack.go
index 4a0858c..f6d9e95 100644
--- a/proptools/unpack.go
+++ b/proptools/unpack.go
@@ -280,6 +280,14 @@
 		}
 
 		if isStruct(fieldValue.Type()) {
+			if property.Value.Eval().Type() != parser.MapType {
+				ctx.addError(&UnpackError{
+					fmt.Errorf("can't assign %s value to map property %q",
+						property.Value.Type(), property.Name),
+					property.Value.Pos(),
+				})
+				continue
+			}
 			ctx.unpackToStruct(propertyName, fieldValue)
 			if len(ctx.errs) >= maxUnpackErrors {
 				return
@@ -307,8 +315,11 @@
 	sliceName string, property *parser.Property, sliceType reflect.Type) (reflect.Value, bool) {
 	propValueAsList, ok := property.Value.Eval().(*parser.List)
 	if !ok {
-		ctx.addError(fmt.Errorf("%s: can't assign %s value to list property %q",
-			property.Value.Pos(), property.Value.Type(), property.Name))
+		ctx.addError(&UnpackError{
+			fmt.Errorf("can't assign %s value to list property %q",
+				property.Value.Type(), property.Name),
+			property.Value.Pos(),
+		})
 		return reflect.MakeSlice(sliceType, 0, 0), false
 	}
 	exprs := propValueAsList.Values
@@ -387,24 +398,33 @@
 	case reflect.Bool:
 		b, ok := property.Value.Eval().(*parser.Bool)
 		if !ok {
-			return value, fmt.Errorf("%s: can't assign %s value to bool property %q",
-				property.Value.Pos(), property.Value.Type(), property.Name)
+			return value, &UnpackError{
+				fmt.Errorf("can't assign %s value to bool property %q",
+					property.Value.Type(), property.Name),
+				property.Value.Pos(),
+			}
 		}
 		value = reflect.ValueOf(b.Value)
 
 	case reflect.Int64:
 		b, ok := property.Value.Eval().(*parser.Int64)
 		if !ok {
-			return value, fmt.Errorf("%s: can't assign %s value to int64 property %q",
-				property.Value.Pos(), property.Value.Type(), property.Name)
+			return value, &UnpackError{
+				fmt.Errorf("can't assign %s value to int64 property %q",
+					property.Value.Type(), property.Name),
+				property.Value.Pos(),
+			}
 		}
 		value = reflect.ValueOf(b.Value)
 
 	case reflect.String:
 		s, ok := property.Value.Eval().(*parser.String)
 		if !ok {
-			return value, fmt.Errorf("%s: can't assign %s value to string property %q",
-				property.Value.Pos(), property.Value.Type(), property.Name)
+			return value, &UnpackError{
+				fmt.Errorf("can't assign %s value to string property %q",
+					property.Value.Type(), property.Name),
+				property.Value.Pos(),
+			}
 		}
 		value = reflect.ValueOf(s.Value)
 
diff --git a/proptools/unpack_test.go b/proptools/unpack_test.go
index 6a4d7b4..7e2751d 100644
--- a/proptools/unpack_test.go
+++ b/proptools/unpack_test.go
@@ -24,12 +24,14 @@
 )
 
 var validUnpackTestCases = []struct {
+	name   string
 	input  string
 	output []interface{}
 	empty  []interface{}
 	errs   []error
 }{
 	{
+		name: "blank and unset",
 		input: `
 			m {
 				s: "abc",
@@ -50,6 +52,7 @@
 	},
 
 	{
+		name: "string",
 		input: `
 			m {
 				s: "abc",
@@ -65,6 +68,7 @@
 	},
 
 	{
+		name: "bool",
 		input: `
 			m {
 				isGood: true,
@@ -80,6 +84,7 @@
 	},
 
 	{
+		name: "boolptr",
 		input: `
 			m {
 				isGood: true,
@@ -100,6 +105,7 @@
 	},
 
 	{
+		name: "slice",
 		input: `
 			m {
 				stuff: ["asdf", "jkl;", "qwert",
@@ -123,6 +129,35 @@
 	},
 
 	{
+		name: "double nested",
+		input: `
+			m {
+				nested: {
+					nested: {
+						s: "abc",
+					},
+				},
+			}
+		`,
+		output: []interface{}{
+			&struct {
+				Nested struct {
+					Nested struct {
+						S string
+					}
+				}
+			}{
+				Nested: struct{ Nested struct{ S string } }{
+					Nested: struct{ S string }{
+						S: "abc",
+					},
+				},
+			},
+		},
+	},
+
+	{
+		name: "nested",
 		input: `
 			m {
 				nested: {
@@ -144,6 +179,7 @@
 	},
 
 	{
+		name: "nested interface",
 		input: `
 			m {
 				nested: {
@@ -163,6 +199,7 @@
 	},
 
 	{
+		name: "mixed",
 		input: `
 			m {
 				nested: {
@@ -190,6 +227,7 @@
 	},
 
 	{
+		name: "filter",
 		input: `
 			m {
 				nested: {
@@ -220,6 +258,7 @@
 
 	// List of maps
 	{
+		name: "list of structs",
 		input: `
 			m {
 				mapslist: [
@@ -254,6 +293,7 @@
 
 	// List of pointers to structs
 	{
+		name: "list of pointers to structs",
 		input: `
 			m {
 				mapslist: [
@@ -288,6 +328,7 @@
 
 	// List of lists
 	{
+		name: "list of lists",
 		input: `
 			m {
 				listoflists: [
@@ -310,6 +351,7 @@
 
 	// Multilevel
 	{
+		name: "multilevel",
 		input: `
 			m {
 				name: "mymodule",
@@ -358,6 +400,7 @@
 	},
 	// Anonymous struct
 	{
+		name: "embedded struct",
 		input: `
 			m {
 				s: "abc",
@@ -389,6 +432,7 @@
 
 	// Anonymous interface
 	{
+		name: "embedded interface",
 		input: `
 			m {
 				s: "abc",
@@ -420,6 +464,7 @@
 
 	// Anonymous struct with name collision
 	{
+		name: "embedded name collision",
 		input: `
 			m {
 				s: "abc",
@@ -456,6 +501,7 @@
 
 	// Anonymous interface with name collision
 	{
+		name: "embeded interface name collision",
 		input: `
 			m {
 				s: "abc",
@@ -492,6 +538,7 @@
 
 	// Variables
 	{
+		name: "variables",
 		input: `
 			list = ["abc"]
 			string = "def"
@@ -527,6 +574,7 @@
 
 	// Multiple property structs
 	{
+		name: "multiple",
 		input: `
 			m {
 				nested: {
@@ -560,6 +608,7 @@
 
 	// Nil pointer to struct
 	{
+		name: "nil struct pointer",
 		input: `
 			m {
 				nested: {
@@ -589,6 +638,7 @@
 
 	// Interface containing nil pointer to struct
 	{
+		name: "interface nil struct pointer",
 		input: `
 			m {
 				nested: {
@@ -616,6 +666,7 @@
 
 	// Factory set properties
 	{
+		name: "factory properties",
 		input: `
 			m {
 				string: "abc",
@@ -675,61 +726,291 @@
 
 func TestUnpackProperties(t *testing.T) {
 	for _, testCase := range validUnpackTestCases {
-		r := bytes.NewBufferString(testCase.input)
-		file, errs := parser.ParseAndEval("", r, parser.NewScope(nil))
-		if len(errs) != 0 {
-			t.Errorf("test case: %s", testCase.input)
-			t.Errorf("unexpected parse errors:")
-			for _, err := range errs {
-				t.Errorf("  %s", err)
-			}
-			t.FailNow()
-		}
-
-		for _, def := range file.Defs {
-			module, ok := def.(*parser.Module)
-			if !ok {
-				continue
-			}
-
-			var output []interface{}
-			if len(testCase.empty) > 0 {
-				output = testCase.empty
-			} else {
-				for _, p := range testCase.output {
-					output = append(output, CloneEmptyProperties(reflect.ValueOf(p)).Interface())
-				}
-			}
-			_, errs = UnpackProperties(module.Properties, output...)
-			if len(errs) != 0 && len(testCase.errs) == 0 {
+		t.Run(testCase.name, func(t *testing.T) {
+			r := bytes.NewBufferString(testCase.input)
+			file, errs := parser.ParseAndEval("", r, parser.NewScope(nil))
+			if len(errs) != 0 {
 				t.Errorf("test case: %s", testCase.input)
-				t.Errorf("unexpected unpack errors:")
+				t.Errorf("unexpected parse errors:")
 				for _, err := range errs {
 					t.Errorf("  %s", err)
 				}
 				t.FailNow()
-			} else if !reflect.DeepEqual(errs, testCase.errs) {
-				t.Errorf("test case: %s", testCase.input)
-				t.Errorf("incorrect errors:")
-				t.Errorf("  expected: %+v", testCase.errs)
-				t.Errorf("       got: %+v", errs)
 			}
 
-			if len(output) != len(testCase.output) {
-				t.Fatalf("incorrect number of property structs, expected %d got %d",
-					len(testCase.output), len(output))
-			}
+			for _, def := range file.Defs {
+				module, ok := def.(*parser.Module)
+				if !ok {
+					continue
+				}
 
-			for i := range output {
-				got := reflect.ValueOf(output[i]).Interface()
-				if !reflect.DeepEqual(got, testCase.output[i]) {
+				var output []interface{}
+				if len(testCase.empty) > 0 {
+					for _, p := range testCase.empty {
+						output = append(output, CloneProperties(reflect.ValueOf(p)).Interface())
+					}
+				} else {
+					for _, p := range testCase.output {
+						output = append(output, CloneEmptyProperties(reflect.ValueOf(p)).Interface())
+					}
+				}
+
+				_, errs = UnpackProperties(module.Properties, output...)
+				if len(errs) != 0 && len(testCase.errs) == 0 {
 					t.Errorf("test case: %s", testCase.input)
-					t.Errorf("incorrect output:")
-					t.Errorf("  expected: %+v", testCase.output[i])
-					t.Errorf("       got: %+v", got)
+					t.Errorf("unexpected unpack errors:")
+					for _, err := range errs {
+						t.Errorf("  %s", err)
+					}
+					t.FailNow()
+				} else if !reflect.DeepEqual(errs, testCase.errs) {
+					t.Errorf("test case: %s", testCase.input)
+					t.Errorf("incorrect errors:")
+					t.Errorf("  expected: %+v", testCase.errs)
+					t.Errorf("       got: %+v", errs)
+				}
+
+				if len(output) != len(testCase.output) {
+					t.Fatalf("incorrect number of property structs, expected %d got %d",
+						len(testCase.output), len(output))
+				}
+
+				for i := range output {
+					got := reflect.ValueOf(output[i]).Interface()
+					if !reflect.DeepEqual(got, testCase.output[i]) {
+						t.Errorf("test case: %s", testCase.input)
+						t.Errorf("incorrect output:")
+						t.Errorf("  expected: %+v", testCase.output[i])
+						t.Errorf("       got: %+v", got)
+					}
 				}
 			}
-		}
+		})
+	}
+}
+
+func TestUnpackErrors(t *testing.T) {
+	testCases := []struct {
+		name   string
+		input  string
+		output []interface{}
+		errors []string
+	}{
+		{
+			name: "missing",
+			input: `
+				m {
+					missing: true,
+				}
+			`,
+			output: []interface{}{},
+			errors: []string{`<input>:3:13: unrecognized property "missing"`},
+		},
+		{
+			name: "missing nested",
+			input: `
+				m {
+					nested: {
+						missing: true,
+					},
+				}
+			`,
+			output: []interface{}{
+				&struct {
+					Nested struct{}
+				}{},
+			},
+			errors: []string{`<input>:4:14: unrecognized property "nested.missing"`},
+		},
+		{
+			name: "mutated",
+			input: `
+				m {
+					mutated: true,
+				}
+			`,
+			output: []interface{}{
+				&struct {
+					Mutated bool `blueprint:"mutated"`
+				}{},
+			},
+			errors: []string{`<input>:3:13: mutated field mutated cannot be set in a Blueprint file`},
+		},
+		{
+			name: "nested mutated",
+			input: `
+				m {
+					nested: {
+						mutated: true,
+					},
+				}
+			`,
+			output: []interface{}{
+				&struct {
+					Nested struct {
+						Mutated bool `blueprint:"mutated"`
+					}
+				}{},
+			},
+			errors: []string{`<input>:4:14: mutated field nested.mutated cannot be set in a Blueprint file`},
+		},
+		{
+			name: "duplicate",
+			input: `
+				m {
+					exists: true,
+					exists: true,
+				}
+			`,
+			output: []interface{}{
+				&struct {
+					Exists bool
+				}{},
+			},
+			errors: []string{
+				`<input>:4:12: property "exists" already defined`,
+				`<input>:3:12: <-- previous definition here`,
+			},
+		},
+		{
+			name: "nested duplicate",
+			input: `
+				m {
+					nested: {
+						exists: true,
+						exists: true,
+					},
+				}
+			`,
+			output: []interface{}{
+				&struct {
+					Nested struct {
+						Exists bool
+					}
+				}{},
+			},
+			errors: []string{
+				`<input>:5:13: property "nested.exists" already defined`,
+				`<input>:4:13: <-- previous definition here`,
+			},
+		},
+		{
+			name: "wrong type",
+			input: `
+				m {
+					int: "foo",
+				}
+			`,
+			output: []interface{}{
+				&struct {
+					Int *int64
+				}{},
+			},
+			errors: []string{
+				`<input>:3:11: can't assign string value to int64 property "int"`,
+			},
+		},
+		{
+			name: "wrong type for map",
+			input: `
+				m {
+					map: "foo",
+				}
+			`,
+			output: []interface{}{
+				&struct {
+					Map struct {
+						S string
+					}
+				}{},
+			},
+			errors: []string{
+				`<input>:3:11: can't assign string value to map property "map"`,
+			},
+		},
+		{
+			name: "wrong type for list",
+			input: `
+				m {
+					list: "foo",
+				}
+			`,
+			output: []interface{}{
+				&struct {
+					List []string
+				}{},
+			},
+			errors: []string{
+				`<input>:3:12: can't assign string value to list property "list"`,
+			},
+		},
+		{
+			name: "wrong type for list of maps",
+			input: `
+				m {
+					map_list: "foo",
+				}
+			`,
+			output: []interface{}{
+				&struct {
+					Map_list []struct {
+						S string
+					}
+				}{},
+			},
+			errors: []string{
+				`<input>:3:16: can't assign string value to list property "map_list"`,
+			},
+		},
+	}
+
+	for _, testCase := range testCases {
+		t.Run(testCase.name, func(t *testing.T) {
+			r := bytes.NewBufferString(testCase.input)
+			file, errs := parser.ParseAndEval("", r, parser.NewScope(nil))
+			if len(errs) != 0 {
+				t.Errorf("test case: %s", testCase.input)
+				t.Errorf("unexpected parse errors:")
+				for _, err := range errs {
+					t.Errorf("  %s", err)
+				}
+				t.FailNow()
+			}
+
+			for _, def := range file.Defs {
+				module, ok := def.(*parser.Module)
+				if !ok {
+					continue
+				}
+
+				var output []interface{}
+				for _, p := range testCase.output {
+					output = append(output, CloneEmptyProperties(reflect.ValueOf(p)).Interface())
+				}
+
+				_, errs = UnpackProperties(module.Properties, output...)
+
+				printErrors := false
+				for _, expectedErr := range testCase.errors {
+					foundError := false
+					for _, err := range errs {
+						if err.Error() == expectedErr {
+							foundError = true
+						}
+					}
+					if !foundError {
+						t.Errorf("expected error %s", expectedErr)
+						printErrors = true
+					}
+				}
+				if printErrors {
+					t.Errorf("got errors:")
+					for _, err := range errs {
+						t.Errorf("   %s", err.Error())
+					}
+				}
+			}
+		})
 	}
 }
 
diff --git a/provider.go b/provider.go
new file mode 100644
index 0000000..b83e1d4
--- /dev/null
+++ b/provider.go
@@ -0,0 +1,216 @@
+// Copyright 2020 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// This file implements Providers, modelled after Bazel
+// (https://docs.bazel.build/versions/master/skylark/rules.html#providers).
+// Each provider can be associated with a mutator, in which case the value for the provider for a
+// module can only be set during the mutator call for the module, and the value can only be
+// retrieved after the mutator call for the module. For providers not associated with a mutator, the
+// value can for the provider for a module can only be set during GenerateBuildActions for the
+// module, and the value can only be retrieved after GenerateBuildActions for the module.
+//
+// Providers are globally registered during init() and given a unique ID.  The value of a provider
+// for a module is stored in an []interface{} indexed by the ID.  If the value of a provider has
+// not been set, the value in the []interface{} will be nil.
+//
+// If the storage used by the provider value arrays becomes too large:
+//  sizeof([]interface) * number of providers * number of modules that have a provider value set
+// then the storage can be replaced with something like a bitwise trie.
+//
+// The purpose of providers is to provide a serializable checkpoint between modules to enable
+// Blueprint to skip parts of the analysis phase when inputs haven't changed.  To that end,
+// values passed to providers should be treated as immutable by callers to both the getters and
+// setters.  Go doesn't provide any way to enforce immutability on arbitrary types, so it may be
+// necessary for the getters and setters to make deep copies of the values, likely extending
+// proptools.CloneProperties to do so.
+
+type provider struct {
+	id      int
+	typ     reflect.Type
+	zero    interface{}
+	mutator string
+}
+
+type ProviderKey *provider
+
+var providerRegistry []ProviderKey
+
+// NewProvider returns a ProviderKey for the type of the given example value.  The example value
+// is otherwise unused.
+//
+// The returned ProviderKey can be used to set a value of the ProviderKey's type for a module
+// inside GenerateBuildActions for the module, and to get the value from GenerateBuildActions from
+// any module later in the build graph.
+//
+// Once Go has generics the exampleValue parameter will not be necessary:
+// NewProvider(type T)() ProviderKey(T)
+func NewProvider(exampleValue interface{}) ProviderKey {
+	return NewMutatorProvider(exampleValue, "")
+}
+
+// NewMutatorProvider returns a ProviderKey for the type of the given example value.  The example
+// value is otherwise unused.
+//
+// The returned ProviderKey can be used to set a value of the ProviderKey's type for a module inside
+// the given mutator for the module, and to get the value from GenerateBuildActions from any
+// module later in the build graph in the same mutator, or any module in a later mutator or during
+// GenerateBuildActions.
+//
+// Once Go has generics the exampleValue parameter will not be necessary:
+// NewMutatorProvider(type T)(mutator string) ProviderKey(T)
+func NewMutatorProvider(exampleValue interface{}, mutator string) ProviderKey {
+	checkCalledFromInit()
+
+	typ := reflect.TypeOf(exampleValue)
+	zero := reflect.Zero(typ).Interface()
+
+	provider := &provider{
+		id:      len(providerRegistry),
+		typ:     typ,
+		zero:    zero,
+		mutator: mutator,
+	}
+
+	providerRegistry = append(providerRegistry, provider)
+
+	return provider
+}
+
+// initProviders fills c.providerMutators with the *mutatorInfo associated with each provider ID,
+// if any.
+func (c *Context) initProviders() {
+	c.providerMutators = make([]*mutatorInfo, len(providerRegistry))
+	for _, provider := range providerRegistry {
+		for _, mutator := range c.mutatorInfo {
+			if mutator.name == provider.mutator {
+				c.providerMutators[provider.id] = mutator
+			}
+		}
+	}
+}
+
+// setProvider sets the value for a provider on a moduleInfo.  Verifies that it is called during the
+// appropriate mutator or GenerateBuildActions pass for the provider, and that the value is of the
+// appropriate type.  The value should not be modified after being passed to setProvider.
+//
+// Once Go has generics the value parameter can be typed:
+// setProvider(type T)(m *moduleInfo, provider ProviderKey(T), value T)
+func (c *Context) setProvider(m *moduleInfo, provider ProviderKey, value interface{}) {
+	if provider.mutator == "" {
+		if !m.startedGenerateBuildActions {
+			panic(fmt.Sprintf("Can't set value of provider %s before GenerateBuildActions started",
+				provider.typ))
+		} else if m.finishedGenerateBuildActions {
+			panic(fmt.Sprintf("Can't set value of provider %s after GenerateBuildActions finished",
+				provider.typ))
+		}
+	} else {
+		expectedMutator := c.providerMutators[provider.id]
+		if expectedMutator == nil {
+			panic(fmt.Sprintf("Can't set value of provider %s associated with unregistered mutator %s",
+				provider.typ, provider.mutator))
+		} else if c.mutatorFinishedForModule(expectedMutator, m) {
+			panic(fmt.Sprintf("Can't set value of provider %s after mutator %s finished",
+				provider.typ, provider.mutator))
+		} else if !c.mutatorStartedForModule(expectedMutator, m) {
+			panic(fmt.Sprintf("Can't set value of provider %s before mutator %s started",
+				provider.typ, provider.mutator))
+		}
+	}
+
+	if typ := reflect.TypeOf(value); typ != provider.typ {
+		panic(fmt.Sprintf("Value for provider has incorrect type, wanted %s, got %s",
+			provider.typ, typ))
+	}
+
+	if m.providers == nil {
+		m.providers = make([]interface{}, len(providerRegistry))
+	}
+
+	if m.providers[provider.id] != nil {
+		panic(fmt.Sprintf("Value of provider %s is already set", provider.typ))
+	}
+
+	m.providers[provider.id] = value
+}
+
+// provider returns the value, if any, for a given provider for a module.  Verifies that it is
+// called after the appropriate mutator or GenerateBuildActions pass for the provider on the module.
+// If the value for the provider was not set it returns the zero value of the type of the provider,
+// which means the return value can always be type-asserted to the type of the provider.  The return
+// value should always be considered read-only.
+//
+// Once Go has generics the return value can be typed and the type assert by callers can be dropped:
+// provider(type T)(m *moduleInfo, provider ProviderKey(T)) T
+func (c *Context) provider(m *moduleInfo, provider ProviderKey) (interface{}, bool) {
+	if provider.mutator == "" {
+		if !m.finishedGenerateBuildActions {
+			panic(fmt.Sprintf("Can't get value of provider %s before GenerateBuildActions finished",
+				provider.typ))
+		}
+	} else {
+		expectedMutator := c.providerMutators[provider.id]
+		if expectedMutator != nil && !c.mutatorFinishedForModule(expectedMutator, m) {
+			panic(fmt.Sprintf("Can't get value of provider %s before mutator %s finished",
+				provider.typ, provider.mutator))
+		}
+	}
+
+	if len(m.providers) > provider.id {
+		if p := m.providers[provider.id]; p != nil {
+			return p, true
+		}
+	}
+
+	return provider.zero, false
+}
+
+func (c *Context) mutatorFinishedForModule(mutator *mutatorInfo, m *moduleInfo) bool {
+	if c.finishedMutators[mutator] {
+		// mutator pass finished for all modules
+		return true
+	}
+
+	if c.startedMutator == mutator {
+		// mutator pass started, check if it is finished for this module
+		return m.finishedMutator == mutator
+	}
+
+	// mutator pass hasn't started
+	return false
+}
+
+func (c *Context) mutatorStartedForModule(mutator *mutatorInfo, m *moduleInfo) bool {
+	if c.finishedMutators[mutator] {
+		// mutator pass finished for all modules
+		return true
+	}
+
+	if c.startedMutator == mutator {
+		// mutator pass is currently running
+		if m.startedMutator == mutator {
+			// mutator has started for this module
+			return true
+		}
+	}
+
+	return false
+}
diff --git a/provider_test.go b/provider_test.go
new file mode 100644
index 0000000..8f8def4
--- /dev/null
+++ b/provider_test.go
@@ -0,0 +1,420 @@
+// Copyright 2020 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"testing"
+)
+
+type providerTestModule struct {
+	SimpleName
+	properties struct {
+		Deps []string
+	}
+
+	mutatorProviderValues              []string
+	generateBuildActionsProviderValues []string
+}
+
+func newProviderTestModule() (Module, []interface{}) {
+	m := &providerTestModule{}
+	return m, []interface{}{&m.properties, &m.SimpleName.Properties}
+}
+
+type providerTestMutatorInfo struct {
+	Values []string
+}
+
+type providerTestGenerateBuildActionsInfo struct {
+	Value string
+}
+
+type providerTestUnsetInfo string
+
+var providerTestMutatorInfoProvider = NewMutatorProvider(&providerTestMutatorInfo{}, "provider_mutator")
+var providerTestGenerateBuildActionsInfoProvider = NewProvider(&providerTestGenerateBuildActionsInfo{})
+var providerTestUnsetInfoProvider = NewMutatorProvider((providerTestUnsetInfo)(""), "provider_mutator")
+var providerTestUnusedMutatorProvider = NewMutatorProvider(&struct{ unused string }{}, "nonexistent_mutator")
+
+func (p *providerTestModule) GenerateBuildActions(ctx ModuleContext) {
+	unset := ctx.Provider(providerTestUnsetInfoProvider).(providerTestUnsetInfo)
+	if unset != "" {
+		panic(fmt.Sprintf("expected zero value for providerTestGenerateBuildActionsInfoProvider before it was set, got %q",
+			unset))
+	}
+
+	_ = ctx.Provider(providerTestUnusedMutatorProvider)
+
+	ctx.SetProvider(providerTestGenerateBuildActionsInfoProvider, &providerTestGenerateBuildActionsInfo{
+		Value: ctx.ModuleName(),
+	})
+
+	mp := ctx.Provider(providerTestMutatorInfoProvider).(*providerTestMutatorInfo)
+	if mp != nil {
+		p.mutatorProviderValues = mp.Values
+	}
+
+	ctx.VisitDirectDeps(func(module Module) {
+		gbap := ctx.OtherModuleProvider(module, providerTestGenerateBuildActionsInfoProvider).(*providerTestGenerateBuildActionsInfo)
+		if gbap != nil {
+			p.generateBuildActionsProviderValues = append(p.generateBuildActionsProviderValues, gbap.Value)
+		}
+	})
+}
+
+func providerTestDepsMutator(ctx BottomUpMutatorContext) {
+	if p, ok := ctx.Module().(*providerTestModule); ok {
+		ctx.AddDependency(ctx.Module(), nil, p.properties.Deps...)
+	}
+}
+
+func providerTestMutator(ctx BottomUpMutatorContext) {
+	values := []string{strings.ToLower(ctx.ModuleName())}
+
+	ctx.VisitDirectDeps(func(module Module) {
+		mp := ctx.OtherModuleProvider(module, providerTestMutatorInfoProvider).(*providerTestMutatorInfo)
+		if mp != nil {
+			values = append(values, mp.Values...)
+		}
+	})
+
+	ctx.SetProvider(providerTestMutatorInfoProvider, &providerTestMutatorInfo{
+		Values: values,
+	})
+}
+
+func providerTestAfterMutator(ctx BottomUpMutatorContext) {
+	_ = ctx.Provider(providerTestMutatorInfoProvider)
+}
+
+func TestProviders(t *testing.T) {
+	ctx := NewContext()
+	ctx.RegisterModuleType("provider_module", newProviderTestModule)
+	ctx.RegisterBottomUpMutator("provider_deps_mutator", providerTestDepsMutator)
+	ctx.RegisterBottomUpMutator("provider_mutator", providerTestMutator)
+	ctx.RegisterBottomUpMutator("provider_after_mutator", providerTestAfterMutator)
+
+	ctx.MockFileSystem(map[string][]byte{
+		"Blueprints": []byte(`
+			provider_module {
+				name: "A",
+				deps: ["B"],
+			}
+	
+			provider_module {
+				name: "B",
+				deps: ["C", "D"],
+			}
+	
+			provider_module {
+				name: "C",
+				deps: ["D"],
+			}
+	
+			provider_module {
+				name: "D",
+			}
+		`),
+	})
+
+	_, errs := ctx.ParseBlueprintsFiles("Blueprints", nil)
+	if len(errs) == 0 {
+		_, errs = ctx.ResolveDependencies(nil)
+	}
+	if len(errs) == 0 {
+		_, errs = ctx.PrepareBuildActions(nil)
+	}
+	if len(errs) > 0 {
+		t.Errorf("unexpected errors:")
+		for _, err := range errs {
+			t.Errorf("  %s", err)
+		}
+		t.FailNow()
+	}
+
+	aModule := ctx.moduleGroupFromName("A", nil).moduleByVariantName("").logicModule.(*providerTestModule)
+	if g, w := aModule.generateBuildActionsProviderValues, []string{"B"}; !reflect.DeepEqual(g, w) {
+		t.Errorf("expected A.generateBuildActionsProviderValues %q, got %q", w, g)
+	}
+	if g, w := aModule.mutatorProviderValues, []string{"a", "b", "c", "d", "d"}; !reflect.DeepEqual(g, w) {
+		t.Errorf("expected A.mutatorProviderValues %q, got %q", w, g)
+	}
+
+	bModule := ctx.moduleGroupFromName("B", nil).moduleByVariantName("").logicModule.(*providerTestModule)
+	if g, w := bModule.generateBuildActionsProviderValues, []string{"C", "D"}; !reflect.DeepEqual(g, w) {
+		t.Errorf("expected B.generateBuildActionsProviderValues %q, got %q", w, g)
+	}
+	if g, w := bModule.mutatorProviderValues, []string{"b", "c", "d", "d"}; !reflect.DeepEqual(g, w) {
+		t.Errorf("expected B.mutatorProviderValues %q, got %q", w, g)
+	}
+}
+
+type invalidProviderUsageMutatorInfo string
+type invalidProviderUsageGenerateBuildActionsInfo string
+
+var invalidProviderUsageMutatorInfoProvider = NewMutatorProvider(invalidProviderUsageMutatorInfo(""), "mutator_under_test")
+var invalidProviderUsageGenerateBuildActionsInfoProvider = NewProvider(invalidProviderUsageGenerateBuildActionsInfo(""))
+
+type invalidProviderUsageTestModule struct {
+	parent *invalidProviderUsageTestModule
+
+	SimpleName
+	properties struct {
+		Deps []string
+
+		Early_mutator_set_of_mutator_provider       bool
+		Late_mutator_set_of_mutator_provider        bool
+		Late_build_actions_set_of_mutator_provider  bool
+		Early_mutator_set_of_build_actions_provider bool
+
+		Early_mutator_get_of_mutator_provider       bool
+		Early_module_get_of_mutator_provider        bool
+		Early_mutator_get_of_build_actions_provider bool
+		Early_module_get_of_build_actions_provider  bool
+
+		Duplicate_set bool
+	}
+}
+
+func invalidProviderUsageDepsMutator(ctx BottomUpMutatorContext) {
+	if i, ok := ctx.Module().(*invalidProviderUsageTestModule); ok {
+		ctx.AddDependency(ctx.Module(), nil, i.properties.Deps...)
+	}
+}
+
+func invalidProviderUsageParentMutator(ctx TopDownMutatorContext) {
+	if i, ok := ctx.Module().(*invalidProviderUsageTestModule); ok {
+		ctx.VisitDirectDeps(func(module Module) {
+			module.(*invalidProviderUsageTestModule).parent = i
+		})
+	}
+}
+
+func invalidProviderUsageBeforeMutator(ctx BottomUpMutatorContext) {
+	if i, ok := ctx.Module().(*invalidProviderUsageTestModule); ok {
+		if i.properties.Early_mutator_set_of_mutator_provider {
+			// A mutator attempting to set the value of a provider associated with a later mutator.
+			ctx.SetProvider(invalidProviderUsageMutatorInfoProvider, invalidProviderUsageMutatorInfo(""))
+		}
+		if i.properties.Early_mutator_get_of_mutator_provider {
+			// A mutator attempting to get the value of a provider associated with a later mutator.
+			_ = ctx.Provider(invalidProviderUsageMutatorInfoProvider)
+		}
+	}
+}
+
+func invalidProviderUsageMutatorUnderTest(ctx TopDownMutatorContext) {
+	if i, ok := ctx.Module().(*invalidProviderUsageTestModule); ok {
+		if i.properties.Early_mutator_set_of_build_actions_provider {
+			// A mutator attempting to set the value of a non-mutator provider.
+			ctx.SetProvider(invalidProviderUsageGenerateBuildActionsInfoProvider, invalidProviderUsageGenerateBuildActionsInfo(""))
+		}
+		if i.properties.Early_mutator_get_of_build_actions_provider {
+			// A mutator attempting to get the value of a non-mutator provider.
+			_ = ctx.Provider(invalidProviderUsageGenerateBuildActionsInfoProvider)
+		}
+		if i.properties.Early_module_get_of_mutator_provider {
+			// A mutator attempting to get the value of a provider associated with this mutator on
+			// a module for which this mutator hasn't run.  This is a top down mutator so
+			// dependencies haven't run yet.
+			ctx.VisitDirectDeps(func(module Module) {
+				_ = ctx.OtherModuleProvider(module, invalidProviderUsageMutatorInfoProvider)
+			})
+		}
+	}
+}
+
+func invalidProviderUsageAfterMutator(ctx BottomUpMutatorContext) {
+	if i, ok := ctx.Module().(*invalidProviderUsageTestModule); ok {
+		if i.properties.Late_mutator_set_of_mutator_provider {
+			// A mutator trying to set the value of a provider associated with an earlier mutator.
+			ctx.SetProvider(invalidProviderUsageMutatorInfoProvider, invalidProviderUsageMutatorInfo(""))
+		}
+		if i.properties.Late_mutator_set_of_mutator_provider {
+			// A mutator trying to set the value of a provider associated with an earlier mutator.
+			ctx.SetProvider(invalidProviderUsageMutatorInfoProvider, invalidProviderUsageMutatorInfo(""))
+		}
+	}
+}
+
+func (i *invalidProviderUsageTestModule) GenerateBuildActions(ctx ModuleContext) {
+	if i.properties.Late_build_actions_set_of_mutator_provider {
+		// A GenerateBuildActions trying to set the value of a provider associated with a mutator.
+		ctx.SetProvider(invalidProviderUsageMutatorInfoProvider, invalidProviderUsageMutatorInfo(""))
+	}
+	if i.properties.Early_module_get_of_build_actions_provider {
+		// A GenerateBuildActions trying to get the value of a provider on a module for which
+		// GenerateBuildActions hasn't run.
+		_ = ctx.OtherModuleProvider(i.parent, invalidProviderUsageGenerateBuildActionsInfoProvider)
+	}
+	if i.properties.Duplicate_set {
+		ctx.SetProvider(invalidProviderUsageGenerateBuildActionsInfoProvider, invalidProviderUsageGenerateBuildActionsInfo(""))
+		ctx.SetProvider(invalidProviderUsageGenerateBuildActionsInfoProvider, invalidProviderUsageGenerateBuildActionsInfo(""))
+	}
+}
+
+func TestInvalidProvidersUsage(t *testing.T) {
+	run := func(t *testing.T, module string, prop string, panicMsg string) {
+		t.Helper()
+		ctx := NewContext()
+		ctx.RegisterModuleType("invalid_provider_usage_test_module", func() (Module, []interface{}) {
+			m := &invalidProviderUsageTestModule{}
+			return m, []interface{}{&m.properties, &m.SimpleName.Properties}
+		})
+		ctx.RegisterBottomUpMutator("deps", invalidProviderUsageDepsMutator)
+		ctx.RegisterBottomUpMutator("before", invalidProviderUsageBeforeMutator)
+		ctx.RegisterTopDownMutator("mutator_under_test", invalidProviderUsageMutatorUnderTest)
+		ctx.RegisterBottomUpMutator("after", invalidProviderUsageAfterMutator)
+		ctx.RegisterTopDownMutator("parent", invalidProviderUsageParentMutator)
+
+		// Don't invalidate the parent pointer and before GenerateBuildActions.
+		ctx.skipCloneModulesAfterMutators = true
+
+		var parentBP, moduleUnderTestBP, childBP string
+
+		prop += ": true,"
+
+		switch module {
+		case "parent":
+			parentBP = prop
+		case "module_under_test":
+			moduleUnderTestBP = prop
+		case "child":
+			childBP = prop
+		}
+
+		bp := fmt.Sprintf(`
+			invalid_provider_usage_test_module {
+				name: "parent",
+				deps: ["module_under_test"],
+				%s
+			}
+	
+			invalid_provider_usage_test_module {
+				name: "module_under_test",
+				deps: ["child"],
+				%s
+			}
+	
+			invalid_provider_usage_test_module {
+				name: "child",
+				%s
+			}
+
+		`,
+			parentBP,
+			moduleUnderTestBP,
+			childBP)
+
+		ctx.MockFileSystem(map[string][]byte{
+			"Blueprints": []byte(bp),
+		})
+
+		_, errs := ctx.ParseBlueprintsFiles("Blueprints", nil)
+
+		if len(errs) == 0 {
+			_, errs = ctx.ResolveDependencies(nil)
+		}
+
+		if len(errs) == 0 {
+			_, errs = ctx.PrepareBuildActions(nil)
+		}
+
+		if len(errs) == 0 {
+			t.Fatal("expected an error")
+		}
+
+		if len(errs) > 1 {
+			t.Errorf("expected a single error, got %d:", len(errs))
+			for i, err := range errs {
+				t.Errorf("%d:  %s", i, err)
+			}
+			t.FailNow()
+		}
+
+		if panicErr, ok := errs[0].(panicError); ok {
+			if panicErr.panic != panicMsg {
+				t.Fatalf("expected panic %q, got %q", panicMsg, panicErr.panic)
+			}
+		} else {
+			t.Fatalf("expected a panicError, got %T: %s", errs[0], errs[0].Error())
+		}
+
+	}
+
+	tests := []struct {
+		prop   string
+		module string
+
+		panicMsg string
+		skip     string
+	}{
+		{
+			prop:     "early_mutator_set_of_mutator_provider",
+			module:   "module_under_test",
+			panicMsg: "Can't set value of provider blueprint.invalidProviderUsageMutatorInfo before mutator mutator_under_test started",
+		},
+		{
+			prop:     "late_mutator_set_of_mutator_provider",
+			module:   "module_under_test",
+			panicMsg: "Can't set value of provider blueprint.invalidProviderUsageMutatorInfo after mutator mutator_under_test finished",
+		},
+		{
+			prop:     "late_build_actions_set_of_mutator_provider",
+			module:   "module_under_test",
+			panicMsg: "Can't set value of provider blueprint.invalidProviderUsageMutatorInfo after mutator mutator_under_test finished",
+		},
+		{
+			prop:     "early_mutator_set_of_build_actions_provider",
+			module:   "module_under_test",
+			panicMsg: "Can't set value of provider blueprint.invalidProviderUsageGenerateBuildActionsInfo before GenerateBuildActions started",
+		},
+
+		{
+			prop:     "early_mutator_get_of_mutator_provider",
+			module:   "module_under_test",
+			panicMsg: "Can't get value of provider blueprint.invalidProviderUsageMutatorInfo before mutator mutator_under_test finished",
+		},
+		{
+			prop:     "early_module_get_of_mutator_provider",
+			module:   "module_under_test",
+			panicMsg: "Can't get value of provider blueprint.invalidProviderUsageMutatorInfo before mutator mutator_under_test finished",
+		},
+		{
+			prop:     "early_mutator_get_of_build_actions_provider",
+			module:   "module_under_test",
+			panicMsg: "Can't get value of provider blueprint.invalidProviderUsageGenerateBuildActionsInfo before GenerateBuildActions finished",
+		},
+		{
+			prop:     "early_module_get_of_build_actions_provider",
+			module:   "module_under_test",
+			panicMsg: "Can't get value of provider blueprint.invalidProviderUsageGenerateBuildActionsInfo before GenerateBuildActions finished",
+		},
+		{
+			prop:     "duplicate_set",
+			module:   "module_under_test",
+			panicMsg: "Value of provider blueprint.invalidProviderUsageGenerateBuildActionsInfo is already set",
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.prop, func(t *testing.T) {
+			run(t, tt.module, tt.prop, tt.panicMsg)
+		})
+	}
+}
diff --git a/scope.go b/scope.go
index 0a520d9..3f39eb7 100644
--- a/scope.go
+++ b/scope.go
@@ -28,6 +28,7 @@
 	packageContext() *packageContext
 	name() string                                        // "foo"
 	fullName(pkgNames map[*packageContext]string) string // "pkg.foo" or "path.to.pkg.foo"
+	memoizeFullName(pkgNames map[*packageContext]string) // precompute fullName if desired
 	value(config interface{}) (ninjaString, error)
 	String() string
 }
@@ -38,6 +39,7 @@
 	packageContext() *packageContext
 	name() string                                        // "foo"
 	fullName(pkgNames map[*packageContext]string) string // "pkg.foo" or "path.to.pkg.foo"
+	memoizeFullName(pkgNames map[*packageContext]string) // precompute fullName if desired
 	def(config interface{}) (*poolDef, error)
 	String() string
 }
@@ -48,6 +50,7 @@
 	packageContext() *packageContext
 	name() string                                        // "foo"
 	fullName(pkgNames map[*packageContext]string) string // "pkg.foo" or "path.to.pkg.foo"
+	memoizeFullName(pkgNames map[*packageContext]string) // precompute fullName if desired
 	def(config interface{}) (*ruleDef, error)
 	scope() *basicScope
 	isArg(argName string) bool
@@ -294,9 +297,9 @@
 	}
 
 	v := &localVariable{
-		namePrefix: s.namePrefix,
-		name_:      name,
-		value_:     ninjaValue,
+		fullName_: s.namePrefix + name,
+		name_:     name,
+		value_:    ninjaValue,
 	}
 
 	err = s.scope.AddVariable(v)
@@ -333,11 +336,11 @@
 	}
 
 	r := &localRule{
-		namePrefix: s.namePrefix,
-		name_:      name,
-		def_:       def,
-		argNames:   argNamesSet,
-		scope_:     ruleScope,
+		fullName_: s.namePrefix + name,
+		name_:     name,
+		def_:      def,
+		argNames:  argNamesSet,
+		scope_:    ruleScope,
 	}
 
 	err = s.scope.AddRule(r)
@@ -349,9 +352,9 @@
 }
 
 type localVariable struct {
-	namePrefix string
-	name_      string
-	value_     ninjaString
+	fullName_ string
+	name_     string
+	value_    ninjaString
 }
 
 func (l *localVariable) packageContext() *packageContext {
@@ -363,7 +366,11 @@
 }
 
 func (l *localVariable) fullName(pkgNames map[*packageContext]string) string {
-	return l.namePrefix + l.name_
+	return l.fullName_
+}
+
+func (l *localVariable) memoizeFullName(pkgNames map[*packageContext]string) {
+	// Nothing to do, full name is known at initialization.
 }
 
 func (l *localVariable) value(interface{}) (ninjaString, error) {
@@ -371,15 +378,15 @@
 }
 
 func (l *localVariable) String() string {
-	return "<local var>:" + l.namePrefix + l.name_
+	return "<local var>:" + l.fullName_
 }
 
 type localRule struct {
-	namePrefix string
-	name_      string
-	def_       *ruleDef
-	argNames   map[string]bool
-	scope_     *basicScope
+	fullName_ string
+	name_     string
+	def_      *ruleDef
+	argNames  map[string]bool
+	scope_    *basicScope
 }
 
 func (l *localRule) packageContext() *packageContext {
@@ -391,7 +398,11 @@
 }
 
 func (l *localRule) fullName(pkgNames map[*packageContext]string) string {
-	return l.namePrefix + l.name_
+	return l.fullName_
+}
+
+func (l *localRule) memoizeFullName(pkgNames map[*packageContext]string) {
+	// Nothing to do, full name is known at initialization.
 }
 
 func (l *localRule) def(interface{}) (*ruleDef, error) {
@@ -407,5 +418,5 @@
 }
 
 func (r *localRule) String() string {
-	return "<local rule>:" + r.namePrefix + r.name_
+	return "<local rule>:" + r.fullName_
 }
diff --git a/singleton_ctx.go b/singleton_ctx.go
index 3c0a24c..a4e7153 100644
--- a/singleton_ctx.go
+++ b/singleton_ctx.go
@@ -47,6 +47,16 @@
 	// BlueprintFile returns the path of the Blueprint file that defined the given module.
 	BlueprintFile(module Module) string
 
+	// ModuleProvider returns the value, if any, for the provider for a module.  If the value for the
+	// provider was not set it returns the zero value of the type of the provider, which means the
+	// return value can always be type-asserted to the type of the provider.  The return value should
+	// always be considered read-only.  It panics if called before the appropriate mutator or
+	// GenerateBuildActions pass for the provider on the module.
+	ModuleProvider(module Module, provider ProviderKey) interface{}
+
+	// ModuleHasProvider returns true if the provider for the given module has been set.
+	ModuleHasProvider(m Module, provider ProviderKey) bool
+
 	// ModuleErrorf reports an error at the line number of the module type in the module definition.
 	ModuleErrorf(module Module, format string, args ...interface{})
 
@@ -188,6 +198,15 @@
 	return s.context.ModuleType(logicModule)
 }
 
+func (s *singletonContext) ModuleProvider(logicModule Module, provider ProviderKey) interface{} {
+	return s.context.ModuleProvider(logicModule, provider)
+}
+
+// ModuleHasProvider returns true if the provider for the given module has been set.
+func (s *singletonContext) ModuleHasProvider(logicModule Module, provider ProviderKey) bool {
+	return s.context.ModuleHasProvider(logicModule, provider)
+}
+
 func (s *singletonContext) BlueprintFile(logicModule Module) string {
 	return s.context.BlueprintFile(logicModule)
 }
diff --git a/splice_modules_test.go b/splice_modules_test.go
index a67aeb1..473999a 100644
--- a/splice_modules_test.go
+++ b/splice_modules_test.go
@@ -20,91 +20,91 @@
 )
 
 var (
-	testModuleA = &moduleInfo{variantName: "testModuleA"}
-	testModuleB = &moduleInfo{variantName: "testModuleB"}
-	testModuleC = &moduleInfo{variantName: "testModuleC"}
-	testModuleD = &moduleInfo{variantName: "testModuleD"}
-	testModuleE = &moduleInfo{variantName: "testModuleE"}
-	testModuleF = &moduleInfo{variantName: "testModuleF"}
+	testModuleA = &moduleInfo{variant: variant{name: "testModuleA"}}
+	testModuleB = &moduleInfo{variant: variant{name: "testModuleB"}}
+	testModuleC = &moduleInfo{variant: variant{name: "testModuleC"}}
+	testModuleD = &moduleInfo{variant: variant{name: "testModuleD"}}
+	testModuleE = &moduleInfo{variant: variant{name: "testModuleE"}}
+	testModuleF = &moduleInfo{variant: variant{name: "testModuleF"}}
 )
 
 var spliceModulesTestCases = []struct {
-	in         []*moduleInfo
+	in         modulesOrAliases
 	at         int
-	with       []*moduleInfo
-	out        []*moduleInfo
+	with       modulesOrAliases
+	out        modulesOrAliases
 	outAt      int
 	reallocate bool
 }{
 	{
 		// Insert at the beginning
-		in:         []*moduleInfo{testModuleA, testModuleB, testModuleC},
+		in:         modulesOrAliases{testModuleA, testModuleB, testModuleC},
 		at:         0,
-		with:       []*moduleInfo{testModuleD, testModuleE},
-		out:        []*moduleInfo{testModuleD, testModuleE, testModuleB, testModuleC},
+		with:       modulesOrAliases{testModuleD, testModuleE},
+		out:        modulesOrAliases{testModuleD, testModuleE, testModuleB, testModuleC},
 		outAt:      1,
 		reallocate: true,
 	},
 	{
 		// Insert in the middle
-		in:         []*moduleInfo{testModuleA, testModuleB, testModuleC},
+		in:         modulesOrAliases{testModuleA, testModuleB, testModuleC},
 		at:         1,
-		with:       []*moduleInfo{testModuleD, testModuleE},
-		out:        []*moduleInfo{testModuleA, testModuleD, testModuleE, testModuleC},
+		with:       modulesOrAliases{testModuleD, testModuleE},
+		out:        modulesOrAliases{testModuleA, testModuleD, testModuleE, testModuleC},
 		outAt:      2,
 		reallocate: true,
 	},
 	{
 		// Insert at the end
-		in:         []*moduleInfo{testModuleA, testModuleB, testModuleC},
+		in:         modulesOrAliases{testModuleA, testModuleB, testModuleC},
 		at:         2,
-		with:       []*moduleInfo{testModuleD, testModuleE},
-		out:        []*moduleInfo{testModuleA, testModuleB, testModuleD, testModuleE},
+		with:       modulesOrAliases{testModuleD, testModuleE},
+		out:        modulesOrAliases{testModuleA, testModuleB, testModuleD, testModuleE},
 		outAt:      3,
 		reallocate: true,
 	},
 	{
 		// Insert over a single element
-		in:         []*moduleInfo{testModuleA},
+		in:         modulesOrAliases{testModuleA},
 		at:         0,
-		with:       []*moduleInfo{testModuleD, testModuleE},
-		out:        []*moduleInfo{testModuleD, testModuleE},
+		with:       modulesOrAliases{testModuleD, testModuleE},
+		out:        modulesOrAliases{testModuleD, testModuleE},
 		outAt:      1,
 		reallocate: true,
 	},
 	{
 		// Insert at the beginning without reallocating
-		in:         []*moduleInfo{testModuleA, testModuleB, testModuleC, nil}[0:3],
+		in:         modulesOrAliases{testModuleA, testModuleB, testModuleC, nil}[0:3],
 		at:         0,
-		with:       []*moduleInfo{testModuleD, testModuleE},
-		out:        []*moduleInfo{testModuleD, testModuleE, testModuleB, testModuleC},
+		with:       modulesOrAliases{testModuleD, testModuleE},
+		out:        modulesOrAliases{testModuleD, testModuleE, testModuleB, testModuleC},
 		outAt:      1,
 		reallocate: false,
 	},
 	{
 		// Insert in the middle without reallocating
-		in:         []*moduleInfo{testModuleA, testModuleB, testModuleC, nil}[0:3],
+		in:         modulesOrAliases{testModuleA, testModuleB, testModuleC, nil}[0:3],
 		at:         1,
-		with:       []*moduleInfo{testModuleD, testModuleE},
-		out:        []*moduleInfo{testModuleA, testModuleD, testModuleE, testModuleC},
+		with:       modulesOrAliases{testModuleD, testModuleE},
+		out:        modulesOrAliases{testModuleA, testModuleD, testModuleE, testModuleC},
 		outAt:      2,
 		reallocate: false,
 	},
 	{
 		// Insert at the end without reallocating
-		in:         []*moduleInfo{testModuleA, testModuleB, testModuleC, nil}[0:3],
+		in:         modulesOrAliases{testModuleA, testModuleB, testModuleC, nil}[0:3],
 		at:         2,
-		with:       []*moduleInfo{testModuleD, testModuleE},
-		out:        []*moduleInfo{testModuleA, testModuleB, testModuleD, testModuleE},
+		with:       modulesOrAliases{testModuleD, testModuleE},
+		out:        modulesOrAliases{testModuleA, testModuleB, testModuleD, testModuleE},
 		outAt:      3,
 		reallocate: false,
 	},
 	{
 		// Insert over a single element without reallocating
-		in:         []*moduleInfo{testModuleA, nil}[0:1],
+		in:         modulesOrAliases{testModuleA, nil}[0:1],
 		at:         0,
-		with:       []*moduleInfo{testModuleD, testModuleE},
-		out:        []*moduleInfo{testModuleD, testModuleE},
+		with:       modulesOrAliases{testModuleD, testModuleE},
+		out:        modulesOrAliases{testModuleD, testModuleE},
 		outAt:      1,
 		reallocate: false,
 	},
@@ -112,7 +112,7 @@
 
 func TestSpliceModules(t *testing.T) {
 	for _, testCase := range spliceModulesTestCases {
-		in := make([]*moduleInfo, len(testCase.in), cap(testCase.in))
+		in := make(modulesOrAliases, len(testCase.in), cap(testCase.in))
 		copy(in, testCase.in)
 		origIn := in
 		got, gotAt := spliceModules(in, testCase.at, testCase.with)
@@ -139,6 +139,6 @@
 	}
 }
 
-func sameArray(a, b []*moduleInfo) bool {
+func sameArray(a, b modulesOrAliases) bool {
 	return &a[0:cap(a)][cap(a)-1] == &b[0:cap(b)][cap(b)-1]
 }
diff --git a/visit_test.go b/visit_test.go
index efaadba..1c74b93 100644
--- a/visit_test.go
+++ b/visit_test.go
@@ -149,13 +149,13 @@
 func TestVisit(t *testing.T) {
 	ctx := setupVisitTest(t)
 
-	topModule := ctx.moduleGroupFromName("A", nil).modules[0].logicModule.(*visitModule)
+	topModule := ctx.moduleGroupFromName("A", nil).modules.firstModule().logicModule.(*visitModule)
 	assertString(t, topModule.properties.VisitDepsDepthFirst, "FEDCB")
 	assertString(t, topModule.properties.VisitDepsDepthFirstIf, "FEDC")
 	assertString(t, topModule.properties.VisitDirectDeps, "B")
 	assertString(t, topModule.properties.VisitDirectDepsIf, "")
 
-	eModule := ctx.moduleGroupFromName("E", nil).modules[0].logicModule.(*visitModule)
+	eModule := ctx.moduleGroupFromName("E", nil).modules.firstModule().logicModule.(*visitModule)
 	assertString(t, eModule.properties.VisitDepsDepthFirst, "F")
 	assertString(t, eModule.properties.VisitDepsDepthFirstIf, "F")
 	assertString(t, eModule.properties.VisitDirectDeps, "FF")