Merge branch 'aosp/upstream-master'

Includes android-required NOTICE/MODULE_LICENSE*/METADATA files.

Test: treehugger
Change-Id: Ie31d99425f7c8d907c0284a68e385bdde009464c
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..b060ee8
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,10 @@
+# Ignore backup files.
+*~
+# Ignore Vim swap files.
+.*.sw*
+# Ignore bazel directories
+/bazel-bin
+/bazel-genfiles
+/bazel-out
+/bazel-remote-apis
+/bazel-testlogs
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..8f95963
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,9 @@
+# This the official list of Bazel authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as:
+# Name or Organization <email address>
+# The email address is not required for organizations.
+
+Google Inc.
diff --git a/CODEOWNERS b/CODEOWNERS
new file mode 100644
index 0000000..c0ed4b4
--- /dev/null
+++ b/CODEOWNERS
@@ -0,0 +1 @@
+* @buchgr @ulfjack @bergsieker @ola-rozenfeld @agoulti
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..b977853
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing to the Remote Execution API
+
+Contributions to this project are welcome. Feel free to make a pull request, or
+open an issue for further discussion. If you are making substantive changes to
+the proto files, please make sure that they build by running `bazel build
+build/bazel/remote/execution/v2:remote_execution_proto`.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution,
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to <https://cla.developers.google.com/> to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
new file mode 100644
index 0000000..680658c
--- /dev/null
+++ b/CONTRIBUTORS
@@ -0,0 +1,14 @@
+# People who have agreed to one of the CLAs and can contribute patches.
+# The AUTHORS file lists the copyright holders; this file
+# lists people.  For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# https://developers.google.com/open-source/cla/individual
+# https://developers.google.com/open-source/cla/corporate
+#
+# Names should be added to this file as:
+#     Name <email address>
+
+Alexis Hunt <lexer@google.com>
+Ola Rozenfield <olaola@google.com>
+Adrian Ludwin <aludwin@google.com>
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..21373cc
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,13 @@
+name: "remote-apis"
+description:
+    "An API for caching and execution of actions on a remote system."
+
+third_party {
+  url {
+    type: GIT
+    value: "https://github.com/bazelbuild/remote-apis"
+  }
+  version: "6a5a17b77bca5e70417746fd0616db3849731619"
+  last_upgrade_date { year: 2019 month: 4 day: 9 }
+  license_type: NOTICE
+}
diff --git a/MODULE_LICENSE_APACHE2 b/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MODULE_LICENSE_APACHE2
diff --git a/NOTICE b/NOTICE
new file mode 120000
index 0000000..7a694c9
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1 @@
+LICENSE
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..1c99ce6
--- /dev/null
+++ b/README.md
@@ -0,0 +1,50 @@
+# Remote Execution API
+
+The Remote Execution API is an API that, at its most general, allows clients to
+request execution of binaries on a remote system. It is intended primarily for
+use by build systems, such as [Bazel](bazel.build), to distribute build and test
+actions through a worker pool, and also provide a central cache of build
+results. This allows builds to execute faster, both by reusing results already
+built by other clients and by allowing many actions to be executed in parallel,
+in excess of the resource limits of the machine running the build.
+
+There are a number of clients and services using these APIs, they are listed
+below.
+
+### Clients
+These tools use the Remote Execution API to distribute builds to workers.
+
+* [Bazel](https://bazel.build)
+* [BuildStream](https://buildstream.build/)
+* [Pants](https://www.pantsbuild.org)
+* [Recc](https://gitlab.com/bloomberg/recc)
+
+### Servers
+These applications implement the Remote Execution API to server build requests
+from the clients above. These are then distributed to workers; some of these 
+workers implement the Remote Worker API.
+
+* [Buildbarn](https://github.com/EdSchouten/bazel-buildbarn)
+* [Buildfarm](https://github.com/bazelbuild/bazel-buildfarm)
+* [BuildGrid](https://buildgrid.build/)
+* [Remote Build Execution (Alpha)](https://blog.bazel.build/2018/10/05/remote-build-execution.html)
+* [Scoot](https://github.com/twitter/scoot)
+
+## Dependencies
+
+The APIs in this repository refer to several general-purpose APIs published by
+Google in the [Google APIs
+repository](https://github.com/googleapis/googleapis). You will need to refer to
+packages from that repository in order to generate code using this API. If you
+build the repository using the included `BUILD` files, Bazel will fetch the
+protobuf compiler and googleapis automatically.
+
+## Using the APIs
+
+The repository contains `BUILD` files to build the protobuf library with
+[Bazel](https://bazel.build/). If you wish to use them with your own project in
+Bazel, you will possibly want to declare `cc_proto_library`,
+`java_proto_library`, etc. rules that depend on them.
+
+Other build systems will have to run protoc on the protobuf files, and link in
+the googleapis and well-known proto types, manually.
diff --git a/WORKSPACE b/WORKSPACE
new file mode 100644
index 0000000..4bd4dfa
--- /dev/null
+++ b/WORKSPACE
@@ -0,0 +1,56 @@
+workspace(name = "bazel_remote_apis")
+
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+# Needed for protobuf.
+http_archive(
+    name = "bazel_skylib",
+    # Commit f83cb8dd6f5658bc574ccd873e25197055265d1c of 2018-11-26
+    sha256 = "ba5d15ca230efca96320085d8e4d58da826d1f81b444ef8afccd8b23e0799b52",
+    strip_prefix = "bazel-skylib-f83cb8dd6f5658bc574ccd873e25197055265d1c",
+    urls = [
+        "https://github.com/bazelbuild/bazel-skylib/archive/f83cb8dd6f5658bc574ccd873e25197055265d1c.tar.gz",
+    ],
+)
+
+# Needed for "well-known protos" and protoc.
+http_archive(
+    name = "com_google_protobuf",
+    sha256 = "3e933375ecc58d01e52705479b82f155aea2d02cc55d833f8773213e74f88363",
+    strip_prefix = "protobuf-3.7.0",
+    urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v3.7.0/protobuf-all-3.7.0.tar.gz"],
+)
+
+# Needed for the googleapis protos.
+http_archive(
+    name = "googleapis",
+    build_file = "BUILD.googleapis",
+    sha256 = "7b6ea252f0b8fb5cd722f45feb83e115b689909bbb6a393a873b6cbad4ceae1d",
+    strip_prefix = "googleapis-143084a2624b6591ee1f9d23e7f5241856642f4d",
+    urls = ["https://github.com/googleapis/googleapis/archive/143084a2624b6591ee1f9d23e7f5241856642f4d.zip"],
+)
+
+# Needed for C++ gRPC.
+http_archive(
+    name = "com_github_grpc_grpc",
+    strip_prefix = "grpc-1.17.2",
+    urls = [
+        "https://github.com/grpc/grpc/archive/v1.17.2.tar.gz",
+        "https://mirror.bazel.build/github.com/grpc/grpc/archive/v1.17.2.tar.gz",
+    ],
+    sha256 = "34ed95b727e7c6fcbf85e5eb422e962788e21707b712fdb4caf931553c2c6dbc",
+)
+
+# Pull in all gRPC dependencies.
+load("@com_github_grpc_grpc//bazel:grpc_deps.bzl", "grpc_deps")
+grpc_deps()
+
+bind(
+    name = "grpc_cpp_plugin",
+    actual = "@com_github_grpc_grpc//:grpc_cpp_plugin",
+)
+
+bind(
+    name = "grpc_lib",
+    actual = "@com_github_grpc_grpc//:grpc++",
+)
diff --git a/build/bazel/remote/execution/v2/BUILD b/build/bazel/remote/execution/v2/BUILD
new file mode 100644
index 0000000..34a509c
--- /dev/null
+++ b/build/bazel/remote/execution/v2/BUILD
@@ -0,0 +1,39 @@
+package(default_visibility = ["//visibility:public"])
+load("@com_github_grpc_grpc//bazel:cc_grpc_library.bzl", "cc_grpc_library")
+
+licenses(["notice"])
+
+proto_library(
+    name = "remote_execution_proto",
+    srcs = ["remote_execution.proto"],
+    deps = [
+        "//build/bazel/semver:semver_proto",
+        "@com_google_protobuf//:duration_proto",
+        "@com_google_protobuf//:timestamp_proto",
+        "@googleapis//:google_api_annotations_proto",
+        "@googleapis//:google_api_http_proto",
+        "@googleapis//:google_longrunning_operations_proto",
+        "@googleapis//:google_rpc_status_proto",
+    ],
+)
+
+java_proto_library(
+    name = "remote_execution_java_proto",
+    deps = [":remote_execution_proto"],
+)
+
+cc_grpc_library(
+    name = "remote_execution_cc_proto",
+    srcs = ["remote_execution.proto"],
+    deps = [
+        "//build/bazel/semver:semver_cc_proto",
+        "@googleapis//:google_api_annotations_cc_proto",
+        "@googleapis//:google_api_http_cc_proto",
+        "@googleapis//:google_longrunning_operations_cc_proto",
+        "@googleapis//:google_rpc_status_cc_proto",
+    ],
+    proto_only = False,
+    well_known_protos = True,
+    use_external = False,
+)
+
diff --git a/build/bazel/remote/execution/v2/remote_execution.proto b/build/bazel/remote/execution/v2/remote_execution.proto
new file mode 100644
index 0000000..ca96a34
--- /dev/null
+++ b/build/bazel/remote/execution/v2/remote_execution.proto
@@ -0,0 +1,1433 @@
+// Copyright 2018 The Bazel Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package build.bazel.remote.execution.v2;
+
+import "build/bazel/semver/semver.proto";
+import "google/api/annotations.proto";
+import "google/longrunning/operations.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+import "google/rpc/status.proto";
+
+option csharp_namespace = "Build.Bazel.Remote.Execution.V2";
+option go_package = "remoteexecution";
+option java_multiple_files = true;
+option java_outer_classname = "RemoteExecutionProto";
+option java_package = "build.bazel.remote.execution.v2";
+option objc_class_prefix = "REX";
+
+
+// The Remote Execution API is used to execute an
+// [Action][build.bazel.remote.execution.v2.Action] on the remote
+// workers.
+//
+// As with other services in the Remote Execution API, any call may return an
+// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+// information about when the client should retry the request; clients SHOULD
+// respect the information provided.
+service Execution {
+  // Execute an action remotely.
+  //
+  // In order to execute an action, the client must first upload all of the
+  // inputs, the
+  // [Command][build.bazel.remote.execution.v2.Command] to run, and the
+  // [Action][build.bazel.remote.execution.v2.Action] into the
+  // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+  // It then calls `Execute` with an `action_digest` referring to them. The
+  // server will run the action and eventually return the result.
+  //
+  // The input `Action`'s fields MUST meet the various canonicalization
+  // requirements specified in the documentation for their types so that it has
+  // the same digest as other logically equivalent `Action`s. The server MAY
+  // enforce the requirements and return errors if a non-canonical input is
+  // received. It MAY also proceed without verifying some or all of the
+  // requirements, such as for performance reasons. If the server does not
+  // verify the requirement, then it will treat the `Action` as distinct from
+  // another logically equivalent action if they hash differently.
+  //
+  // Returns a stream of
+  // [google.longrunning.Operation][google.longrunning.Operation] messages
+  // describing the resulting execution, with eventual `response`
+  // [ExecuteResponse][build.bazel.remote.execution.v2.ExecuteResponse]. The
+  // `metadata` on the operation is of type
+  // [ExecuteOperationMetadata][build.bazel.remote.execution.v2.ExecuteOperationMetadata].
+  //
+  // If the client remains connected after the first response is returned after
+  // the server, then updates are streamed as if the client had called
+  // [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution]
+  // until the execution completes or the request reaches an error. The
+  // operation can also be queried using [Operations
+  // API][google.longrunning.Operations.GetOperation].
+  //
+  // The server NEED NOT implement other methods or functionality of the
+  // Operations API.
+  //
+  // Errors discovered during creation of the `Operation` will be reported
+  // as gRPC Status errors, while errors that occurred while running the
+  // action will be reported in the `status` field of the `ExecuteResponse`. The
+  // server MUST NOT set the `error` field of the `Operation` proto.
+  // The possible errors include:
+  //
+  // * `INVALID_ARGUMENT`: One or more arguments are invalid.
+  // * `FAILED_PRECONDITION`: One or more errors occurred in setting up the
+  //   action requested, such as a missing input or command or no worker being
+  //   available. The client may be able to fix the errors and retry.
+  // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run
+  //   the action.
+  // * `UNAVAILABLE`: Due to a transient condition, such as all workers being
+  //   occupied (and the server does not support a queue), the action could not
+  //   be started. The client should retry.
+  // * `INTERNAL`: An internal error occurred in the execution engine or the
+  //   worker.
+  // * `DEADLINE_EXCEEDED`: The execution timed out.
+  // * `CANCELLED`: The operation was cancelled by the client. This status is
+  //   only possible if the server implements the Operations API CancelOperation
+  //   method, and it was called for the current execution.
+  //
+  // In the case of a missing input or command, the server SHOULD additionally
+  // send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail
+  // where, for each requested blob not present in the CAS, there is a
+  // `Violation` with a `type` of `MISSING` and a `subject` of
+  // `"blobs/{hash}/{size}"` indicating the digest of the missing blob.
+  rpc Execute(ExecuteRequest) returns (stream google.longrunning.Operation) {
+    option (google.api.http) = { post: "/v2/{instance_name=**}/actions:execute" body: "*" };
+  }
+
+  // Wait for an execution operation to complete. When the client initially
+  // makes the request, the server immediately responds with the current status
+  // of the execution. The server will leave the request stream open until the
+  // operation completes, and then respond with the completed operation. The
+  // server MAY choose to stream additional updates as execution progresses,
+  // such as to provide an update as to the state of the execution.
+  rpc WaitExecution(WaitExecutionRequest) returns (stream google.longrunning.Operation) {
+    option (google.api.http) = { post: "/v2/{name=operations/**}:waitExecution" body: "*" };
+  }
+}
+
+// The action cache API is used to query whether a given action has already been
+// performed and, if so, retrieve its result. Unlike the
+// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
+// which addresses blobs by their own content, the action cache addresses the
+// [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
+// digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
+// which produced them.
+//
+// The lifetime of entries in the action cache is implementation-specific, but
+// the server SHOULD assume that more recently used entries are more likely to
+// be used again. Additionally, action cache implementations SHOULD ensure that
+// any blobs referenced in the
+// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
+// are still valid when returning a result.
+//
+// As with other services in the Remote Execution API, any call may return an
+// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+// information about when the client should retry the request; clients SHOULD
+// respect the information provided.
+service ActionCache {
+  // Retrieve a cached execution result.
+  //
+  // Errors:
+  //
+  // * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
+  rpc GetActionResult(GetActionResultRequest) returns (ActionResult) {
+    option (google.api.http) = { get: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" };
+  }
+
+  // Upload a new execution result.
+  //
+  // In order to allow the server to perform access control based on the type of
+  // action, and to assist with client debugging, the client MUST first upload
+  // the [Action][build.bazel.remote.execution.v2.Execution] that produced the
+  // result, along with its
+  // [Command][build.bazel.remote.execution.v2.Command], into the
+  // `ContentAddressableStorage`.
+  //
+  // Errors:
+  //
+  // * `INVALID_ARGUMENT`: One or more arguments are invalid.
+  // * `FAILED_PRECONDITION`: One or more errors occurred in updating the
+  //   action result, such as a missing command or action.
+  // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
+  //   entry to the cache.
+  rpc UpdateActionResult(UpdateActionResultRequest) returns (ActionResult) {
+    option (google.api.http) = { put: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" body: "action_result" };
+  }
+}
+
+// The CAS (content-addressable storage) is used to store the inputs to and
+// outputs from the execution service. Each piece of content is addressed by the
+// digest of its binary data.
+//
+// Most of the binary data stored in the CAS is opaque to the execution engine,
+// and is only used as a communication medium. In order to build an
+// [Action][build.bazel.remote.execution.v2.Action],
+// however, the client will need to also upload the
+// [Command][build.bazel.remote.execution.v2.Command] and input root
+// [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
+// The Command and Directory messages must be marshalled to wire format and then
+// uploaded under the hash as with any other piece of content. In practice, the
+// input root directory is likely to refer to other Directories in its
+// hierarchy, which must also each be uploaded on their own.
+//
+// For small file uploads the client should group them together and call
+// [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+// For large uploads, the client must use the
+// [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
+// `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
+// where `instance_name` is as described in the next paragraph, `uuid` is a
+// version 4 UUID generated by the client, and `hash` and `size` are the
+// [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
+// `uuid` is used only to avoid collisions when multiple clients try to upload
+// the same file (or the same client tries to upload the file multiple times at
+// once on different threads), so the client MAY reuse the `uuid` for uploading
+// different blobs. The `resource_name` may optionally have a trailing filename
+// (or other metadata) for a client to use if it is storing URLs, as in
+// `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
+// after the `size` is ignored.
+//
+// A single server MAY support multiple instances of the execution system, each
+// with their own workers, storage, cache, etc. The exact relationship between
+// instances is up to the server. If the server does, then the `instance_name`
+// is an identifier, possibly containing multiple path segments, used to
+// distinguish between the various instances on the server, in a manner defined
+// by the server. For servers which do not support multiple instances, then the
+// `instance_name` is the empty path and the leading slash is omitted, so that
+// the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
+// To simplify parsing, a path segment cannot equal any of the following
+// keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
+// `capabilities`.
+//
+// When attempting an upload, if another client has already completed the upload
+// (which may occur in the middle of a single upload if another client uploads
+// the same blob concurrently), the request will terminate immediately with
+// a response whose `committed_size` is the full size of the uploaded file
+// (regardless of how much data was transmitted by the client). If the client
+// completes the upload but the
+// [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
+// `INVALID_ARGUMENT` error will be returned. In either case, the client should
+// not attempt to retry the upload.
+//
+// For downloading blobs, the client must use the
+// [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
+// a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
+// `instance_name` is the instance name (see above), and `hash` and `size` are
+// the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
+//
+// The lifetime of entries in the CAS is implementation specific, but it SHOULD
+// be long enough to allow for newly-added and recently looked-up entries to be
+// used in subsequent calls (e.g. to
+// [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
+//
+// As with other services in the Remote Execution API, any call may return an
+// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
+// information about when the client should retry the request; clients SHOULD
+// respect the information provided.
+service ContentAddressableStorage {
+  // Determine if blobs are present in the CAS.
+  //
+  // Clients can use this API before uploading blobs to determine which ones are
+  // already present in the CAS and do not need to be uploaded again.
+  //
+  // There are no method-specific errors.
+  rpc FindMissingBlobs(FindMissingBlobsRequest) returns (FindMissingBlobsResponse) {
+    option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:findMissing" body: "*" };
+  }
+
+  // Upload many blobs at once.
+  //
+  // The server may enforce a limit of the combined total size of blobs
+  // to be uploaded using this API. This limit may be obtained using the
+  // [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
+  // Requests exceeding the limit should either be split into smaller
+  // chunks or uploaded using the
+  // [ByteStream API][google.bytestream.ByteStream], as appropriate.
+  //
+  // This request is equivalent to calling a Bytestream `Write` request
+  // on each individual blob, in parallel. The requests may succeed or fail
+  // independently.
+  //
+  // Errors:
+  //
+  // * `INVALID_ARGUMENT`: The client attempted to upload more than the
+  //   server supported limit.
+  //
+  // Individual requests may return the following errors, additionally:
+  //
+  // * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
+  // * `INVALID_ARGUMENT`: The
+  // [Digest][build.bazel.remote.execution.v2.Digest] does not match the
+  // provided data.
+  rpc BatchUpdateBlobs(BatchUpdateBlobsRequest) returns (BatchUpdateBlobsResponse) {
+    option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:batchUpdate" body: "*" };
+  }
+
+  // Download many blobs at once.
+  //
+  // The server may enforce a limit of the combined total size of blobs
+  // to be downloaded using this API. This limit may be obtained using the
+  // [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
+  // Requests exceeding the limit should either be split into smaller
+  // chunks or downloaded using the
+  // [ByteStream API][google.bytestream.ByteStream], as appropriate.
+  //
+  // This request is equivalent to calling a Bytestream `Read` request
+  // on each individual blob, in parallel. The requests may succeed or fail
+  // independently.
+  //
+  // Errors:
+  //
+  // * `INVALID_ARGUMENT`: The client attempted to read more than the
+  //   server supported limit.
+  //
+  // Every error on individual read will be returned in the corresponding digest
+  // status.
+  rpc BatchReadBlobs(BatchReadBlobsRequest) returns (BatchReadBlobsResponse) {
+    option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:batchRead" body: "*" };
+  }
+
+  // Fetch the entire directory tree rooted at a node.
+  //
+  // This request must be targeted at a
+  // [Directory][build.bazel.remote.execution.v2.Directory] stored in the
+  // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
+  // (CAS). The server will enumerate the `Directory` tree recursively and
+  // return every node descended from the root.
+  //
+  // The GetTreeRequest.page_token parameter can be used to skip ahead in
+  // the stream (e.g. when retrying a partially completed and aborted request),
+  // by setting it to a value taken from GetTreeResponse.next_page_token of the
+  // last successfully processed GetTreeResponse).
+  //
+  // The exact traversal order is unspecified and, unless retrieving subsequent
+  // pages from an earlier request, is not guaranteed to be stable across
+  // multiple invocations of `GetTree`.
+  //
+  // If part of the tree is missing from the CAS, the server will return the
+  // portion present and omit the rest.
+  //
+  // * `NOT_FOUND`: The requested tree root is not present in the CAS.
+  rpc GetTree(GetTreeRequest) returns (stream GetTreeResponse) {
+    option (google.api.http) = { get: "/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree" };
+  }
+}
+
+// The Capabilities service may be used by remote execution clients to query
+// various server properties, in order to self-configure or return meaningful
+// error messages.
+//
+// The query may include a particular `instance_name`, in which case the values
+// returned will pertain to that instance.
+service Capabilities {
+  // GetCapabilities returns the server capabilities configuration.
+  rpc GetCapabilities(GetCapabilitiesRequest) returns (ServerCapabilities) {
+    option (google.api.http) = {
+      get: "/v2/{instance_name=**}/capabilities"
+    };
+  }
+}
+
+// An `Action` captures all the information about an execution which is required
+// to reproduce it.
+//
+// `Action`s are the core component of the [Execution] service. A single
+// `Action` represents a repeatable action that can be performed by the
+// execution service. `Action`s can be succinctly identified by the digest of
+// their wire format encoding and, once an `Action` has been executed, will be
+// cached in the action cache. Future requests can then use the cached result
+// rather than needing to run afresh.
+//
+// When a server completes execution of an
+// [Action][build.bazel.remote.execution.v2.Action], it MAY choose to
+// cache the [result][build.bazel.remote.execution.v2.ActionResult] in
+// the [ActionCache][build.bazel.remote.execution.v2.ActionCache] unless
+// `do_not_cache` is `true`. Clients SHOULD expect the server to do so. By
+// default, future calls to
+// [Execute][build.bazel.remote.execution.v2.Execution.Execute] the same
+// `Action` will also serve their results from the cache. Clients must take care
+// to understand the caching behaviour. Ideally, all `Action`s will be
+// reproducible so that serving a result from cache is always desirable and
+// correct.
+message Action {
+  // The digest of the [Command][build.bazel.remote.execution.v2.Command]
+  // to run, which MUST be present in the
+  // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+  Digest command_digest = 1;
+
+  // The digest of the root
+  // [Directory][build.bazel.remote.execution.v2.Directory] for the input
+  // files. The files in the directory tree are available in the correct
+  // location on the build machine before the command is executed. The root
+  // directory, as well as every subdirectory and content blob referred to, MUST
+  // be in the
+  // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+  Digest input_root_digest = 2;
+
+  reserved 3 to 5; // Used for fields moved to [Command][build.bazel.remote.execution.v2.Command].
+
+  // A timeout after which the execution should be killed. If the timeout is
+  // absent, then the client is specifying that the execution should continue
+  // as long as the server will let it. The server SHOULD impose a timeout if
+  // the client does not specify one, however, if the client does specify a
+  // timeout that is longer than the server's maximum timeout, the server MUST
+  // reject the request.
+  //
+  // The timeout is a part of the
+  // [Action][build.bazel.remote.execution.v2.Action] message, and
+  // therefore two `Actions` with different timeouts are different, even if they
+  // are otherwise identical. This is because, if they were not, running an
+  // `Action` with a lower timeout than is required might result in a cache hit
+  // from an execution run with a longer timeout, hiding the fact that the
+  // timeout is too short. By encoding it directly in the `Action`, a lower
+  // timeout will result in a cache miss and the execution timeout will fail
+  // immediately, rather than whenever the cache entry gets evicted.
+  google.protobuf.Duration timeout = 6;
+
+  // If true, then the `Action`'s result cannot be cached, and in-flight
+  // requests for the same `Action` may not be merged.
+  bool do_not_cache = 7;
+}
+
+// A `Command` is the actual command executed by a worker running an
+// [Action][build.bazel.remote.execution.v2.Action] and specifications of its
+// environment.
+//
+// Except as otherwise required, the environment (such as which system
+// libraries or binaries are available, and what filesystems are mounted where)
+// is defined by and specific to the implementation of the remote execution API.
+message Command {
+  // An `EnvironmentVariable` is one variable to set in the running program's
+  // environment.
+  message EnvironmentVariable {
+    // The variable name.
+    string name = 1;
+
+    // The variable value.
+    string value = 2;
+  }
+
+  // The arguments to the command. The first argument must be the path to the
+  // executable, which must be either a relative path, in which case it is
+  // evaluated with respect to the input root, or an absolute path.
+  repeated string arguments = 1;
+
+  // The environment variables to set when running the program. The worker may
+  // provide its own default environment variables; these defaults can be
+  // overridden using this field. Additional variables can also be specified.
+  //
+  // In order to ensure that equivalent
+  // [Command][build.bazel.remote.execution.v2.Command]s always hash to the same
+  // value, the environment variables MUST be lexicographically sorted by name.
+  // Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
+  repeated EnvironmentVariable environment_variables = 2;
+
+  // A list of the output files that the client expects to retrieve from the
+  // action. Only the listed files, as well as directories listed in
+  // `output_directories`, will be returned to the client as output.
+  // Other files or directories that may be created during command execution
+  // are discarded.
+  //
+  // The paths are relative to the working directory of the action execution.
+  // The paths are specified using a single forward slash (`/`) as a path
+  // separator, even if the execution platform natively uses a different
+  // separator. The path MUST NOT include a trailing slash, nor a leading slash,
+  // being a relative path.
+  //
+  // In order to ensure consistent hashing of the same Action, the output paths
+  // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
+  // bytes).
+  //
+  // An output file cannot be duplicated, be a parent of another output file, or
+  // have the same path as any of the listed output directories.
+  //
+  // Directories leading up to the output files are created by the worker prior
+  // to execution, even if they are not explicitly part of the input root.
+  repeated string output_files = 3;
+
+  // A list of the output directories that the client expects to retrieve from
+  // the action. Only the listed directories will be returned (an entire
+  // directory structure will be returned as a
+  // [Tree][build.bazel.remote.execution.v2.Tree] message digest, see
+  // [OutputDirectory][build.bazel.remote.execution.v2.OutputDirectory]), as
+  // well as files listed in `output_files`. Other files or directories that
+  // may be created during command execution are discarded.
+  //
+  // The paths are relative to the working directory of the action execution.
+  // The paths are specified using a single forward slash (`/`) as a path
+  // separator, even if the execution platform natively uses a different
+  // separator. The path MUST NOT include a trailing slash, nor a leading slash,
+  // being a relative path. The special value of empty string is allowed,
+  // although not recommended, and can be used to capture the entire working
+  // directory tree, including inputs.
+  //
+  // In order to ensure consistent hashing of the same Action, the output paths
+  // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
+  // bytes).
+  //
+  // An output directory cannot be duplicated or have the same path as any of
+  // the listed output files.
+  //
+  // Directories leading up to the output directories (but not the output
+  // directories themselves) are created by the worker prior to execution, even
+  // if they are not explicitly part of the input root.
+  repeated string output_directories = 4;
+
+  // The platform requirements for the execution environment. The server MAY
+  // choose to execute the action on any worker satisfying the requirements, so
+  // the client SHOULD ensure that running the action on any such worker will
+  // have the same result.
+  Platform platform = 5;
+
+  // The working directory, relative to the input root, for the command to run
+  // in. It must be a directory which exists in the input tree. If it is left
+  // empty, then the action is run in the input root.
+  string working_directory = 6;
+}
+
+// A `Platform` is a set of requirements, such as hardware, operating system, or
+// compiler toolchain, for an
+// [Action][build.bazel.remote.execution.v2.Action]'s execution
+// environment. A `Platform` is represented as a series of key-value pairs
+// representing the properties that are required of the platform.
+message Platform {
+  // A single property for the environment. The server is responsible for
+  // specifying the property `name`s that it accepts. If an unknown `name` is
+  // provided in the requirements for an
+  // [Action][build.bazel.remote.execution.v2.Action], the server SHOULD
+  // reject the execution request. If permitted by the server, the same `name`
+  // may occur multiple times.
+  //
+  // The server is also responsible for specifying the interpretation of
+  // property `value`s. For instance, a property describing how much RAM must be
+  // available may be interpreted as allowing a worker with 16GB to fulfill a
+  // request for 8GB, while a property describing the OS environment on which
+  // the action must be performed may require an exact match with the worker's
+  // OS.
+  //
+  // The server MAY use the `value` of one or more properties to determine how
+  // it sets up the execution environment, such as by making specific system
+  // files available to the worker.
+  message Property {
+    // The property name.
+    string name = 1;
+
+    // The property value.
+    string value = 2;
+  }
+
+  // The properties that make up this platform. In order to ensure that
+  // equivalent `Platform`s always hash to the same value, the properties MUST
+  // be lexicographically sorted by name, and then by value. Sorting of strings
+  // is done by code point, equivalently, by the UTF-8 bytes.
+  repeated Property properties = 1;
+}
+
+// A `Directory` represents a directory node in a file tree, containing zero or
+// more children [FileNodes][build.bazel.remote.execution.v2.FileNode],
+// [DirectoryNodes][build.bazel.remote.execution.v2.DirectoryNode] and
+// [SymlinkNodes][build.bazel.remote.execution.v2.SymlinkNode].
+// Each `Node` contains its name in the directory, either the digest of its
+// content (either a file blob or a `Directory` proto) or a symlink target, as
+// well as possibly some metadata about the file or directory.
+//
+// In order to ensure that two equivalent directory trees hash to the same
+// value, the following restrictions MUST be obeyed when constructing a
+// a `Directory`:
+//
+// * Every child in the directory must have a path of exactly one segment.
+//   Multiple levels of directory hierarchy may not be collapsed.
+// * Each child in the directory must have a unique path segment (file name).
+//   Note that while the API itself is case-sensitive, the environment where
+//   the Action is executed may or may not be case-sensitive. That is, it is
+//   legal to call the API with a Directory that has both "Foo" and "foo" as
+//   children, but the Action may be rejected by the remote system upon
+//   execution.
+// * The files, directories and symlinks in the directory must each be sorted
+//   in lexicographical order by path. The path strings must be sorted by code
+//   point, equivalently, by UTF-8 bytes.
+//
+// A `Directory` that obeys the restrictions is said to be in canonical form.
+//
+// As an example, the following could be used for a file named `bar` and a
+// directory named `foo` with an executable file named `baz` (hashes shortened
+// for readability):
+//
+// ```json
+// // (Directory proto)
+// {
+//   files: [
+//     {
+//       name: "bar",
+//       digest: {
+//         hash: "4a73bc9d03...",
+//         size: 65534
+//       }
+//     }
+//   ],
+//   directories: [
+//     {
+//       name: "foo",
+//       digest: {
+//         hash: "4cf2eda940...",
+//         size: 43
+//       }
+//     }
+//   ]
+// }
+//
+// // (Directory proto with hash "4cf2eda940..." and size 43)
+// {
+//   files: [
+//     {
+//       name: "baz",
+//       digest: {
+//         hash: "b2c941073e...",
+//         size: 1294,
+//       },
+//       is_executable: true
+//     }
+//   ]
+// }
+// ```
+message Directory {
+  // The files in the directory.
+  repeated FileNode files = 1;
+
+  // The subdirectories in the directory.
+  repeated DirectoryNode directories = 2;
+
+  // The symlinks in the directory.
+  repeated SymlinkNode symlinks = 3;
+}
+
+// A `FileNode` represents a single file and associated metadata.
+message FileNode {
+  // The name of the file.
+  string name = 1;
+
+  // The digest of the file's content.
+  Digest digest = 2;
+
+  reserved 3; // Reserved to ensure wire-compatibility with `OutputFile`.
+
+  // True if file is executable, false otherwise.
+  bool is_executable = 4;
+}
+
+// A `DirectoryNode` represents a child of a
+// [Directory][build.bazel.remote.execution.v2.Directory] which is itself
+// a `Directory` and its associated metadata.
+message DirectoryNode {
+  // The name of the directory.
+  string name = 1;
+
+  // The digest of the
+  // [Directory][build.bazel.remote.execution.v2.Directory] object
+  // represented. See [Digest][build.bazel.remote.execution.v2.Digest]
+  // for information about how to take the digest of a proto message.
+  Digest digest = 2;
+}
+
+// A `SymlinkNode` represents a symbolic link.
+message SymlinkNode {
+  // The name of the symlink.
+  string name = 1;
+
+  // The target path of the symlink. The path separator is a forward slash `/`.
+  // The target path can be relative to the parent directory of the symlink or
+  // it can be an absolute path starting with `/`. Support for absolute paths
+  // can be checked using the [Capabilities][build.bazel.remote.execution.v2.Capabilities]
+  // API. The canonical form forbids the substrings `/./` and `//` in the target
+  // path. `..` components are allowed anywhere in the target path.
+  string target = 2;
+}
+
+// A content digest. A digest for a given blob consists of the size of the blob
+// and its hash. The hash algorithm to use is defined by the server, but servers
+// SHOULD use SHA-256.
+//
+// The size is considered to be an integral part of the digest and cannot be
+// separated. That is, even if the `hash` field is correctly specified but
+// `size_bytes` is not, the server MUST reject the request.
+//
+// The reason for including the size in the digest is as follows: in a great
+// many cases, the server needs to know the size of the blob it is about to work
+// with prior to starting an operation with it, such as flattening Merkle tree
+// structures or streaming it to a worker. Technically, the server could
+// implement a separate metadata store, but this results in a significantly more
+// complicated implementation as opposed to having the client specify the size
+// up-front (or storing the size along with the digest in every message where
+// digests are embedded). This does mean that the API leaks some implementation
+// details of (what we consider to be) a reasonable server implementation, but
+// we consider this to be a worthwhile tradeoff.
+//
+// When a `Digest` is used to refer to a proto message, it always refers to the
+// message in binary encoded form. To ensure consistent hashing, clients and
+// servers MUST ensure that they serialize messages according to the following
+// rules, even if there are alternate valid encodings for the same message:
+//
+// * Fields are serialized in tag order.
+// * There are no unknown fields.
+// * There are no duplicate fields.
+// * Fields are serialized according to the default semantics for their type.
+//
+// Most protocol buffer implementations will always follow these rules when
+// serializing, but care should be taken to avoid shortcuts. For instance,
+// concatenating two messages to merge them may produce duplicate fields.
+message Digest {
+  // The hash. In the case of SHA-256, it will always be a lowercase hex string
+  // exactly 64 characters long.
+  string hash = 1;
+
+  // The size of the blob, in bytes.
+  int64 size_bytes = 2;
+}
+
+// ExecutedActionMetadata contains details about a completed execution.
+message ExecutedActionMetadata {
+  // The name of the worker which ran the execution.
+  string worker = 1;
+
+  // When was the action added to the queue.
+  google.protobuf.Timestamp queued_timestamp = 2;
+
+  // When the worker received the action.
+  google.protobuf.Timestamp worker_start_timestamp = 3;
+
+  // When the worker completed the action, including all stages.
+  google.protobuf.Timestamp worker_completed_timestamp = 4;
+
+  // When the worker started fetching action inputs.
+  google.protobuf.Timestamp input_fetch_start_timestamp = 5;
+
+  // When the worker finished fetching action inputs.
+  google.protobuf.Timestamp input_fetch_completed_timestamp = 6;
+
+  // When the worker started executing the action command.
+  google.protobuf.Timestamp execution_start_timestamp = 7;
+
+  // When the worker completed executing the action command.
+  google.protobuf.Timestamp execution_completed_timestamp = 8;
+
+  // When the worker started uploading action outputs.
+  google.protobuf.Timestamp output_upload_start_timestamp = 9;
+
+  // When the worker finished uploading action outputs.
+  google.protobuf.Timestamp output_upload_completed_timestamp = 10;
+}
+
+// An ActionResult represents the result of an
+// [Action][build.bazel.remote.execution.v2.Action] being run.
+message ActionResult {
+  reserved 1; // Reserved for use as the resource name.
+
+  // The output files of the action. For each output file requested in the
+  // `output_files` field of the Action, if the corresponding file existed after
+  // the action completed, a single entry will be present either in this field,
+  // or in the output_file_symlinks field, if the file was a symbolic link to
+  // another file.
+  //
+  // If the action does not produce the requested output, or produces a
+  // directory where a regular file is expected or vice versa, then that output
+  // will be omitted from the list. The server is free to arrange the output
+  // list as desired; clients MUST NOT assume that the output list is sorted.
+  repeated OutputFile output_files = 2;
+
+  // The output files of the action that are symbolic links to other files. Those
+  // may be links to other output files, or input files, or even absolute paths
+  // outside of the working directory, if the server supports
+  // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy].
+  // For each output file requested in the `output_files` field of the Action,
+  // if the corresponding file existed after
+  // the action completed, a single entry will be present either in this field,
+  // or in the `output_files` field, if the file was not a symbolic link.
+  //
+  // If the action does not produce the requested output, or produces a
+  // directory where a regular file is expected or vice versa, then that output
+  // will be omitted from the list. The server is free to arrange the output
+  // list as desired; clients MUST NOT assume that the output list is sorted.
+  repeated OutputSymlink output_file_symlinks = 10;
+
+  // The output directories of the action. For each output directory requested
+  // in the `output_directories` field of the Action, if the corresponding
+  // directory existed after the action completed, a single entry will be
+  // present in the output list, which will contain the digest of a
+  // [Tree][build.bazel.remote.execution.v2.Tree] message containing the
+  // directory tree, and the path equal exactly to the corresponding Action
+  // output_directories member.
+  //
+  // As an example, suppose the Action had an output directory `a/b/dir` and the
+  // execution produced the following contents in `a/b/dir`: a file named `bar`
+  // and a directory named `foo` with an executable file named `baz`. Then,
+  // output_directory will contain (hashes shortened for readability):
+  //
+  // ```json
+  // // OutputDirectory proto:
+  // {
+  //   path: "a/b/dir"
+  //   tree_digest: {
+  //     hash: "4a73bc9d03...",
+  //     size: 55
+  //   }
+  // }
+  // // Tree proto with hash "4a73bc9d03..." and size 55:
+  // {
+  //   root: {
+  //     files: [
+  //       {
+  //         name: "bar",
+  //         digest: {
+  //           hash: "4a73bc9d03...",
+  //           size: 65534
+  //         }
+  //       }
+  //     ],
+  //     directories: [
+  //       {
+  //         name: "foo",
+  //         digest: {
+  //           hash: "4cf2eda940...",
+  //           size: 43
+  //         }
+  //       }
+  //     ]
+  //   }
+  //   children : {
+  //     // (Directory proto with hash "4cf2eda940..." and size 43)
+  //     files: [
+  //       {
+  //         name: "baz",
+  //         digest: {
+  //           hash: "b2c941073e...",
+  //           size: 1294,
+  //         },
+  //         is_executable: true
+  //       }
+  //     ]
+  //   }
+  // }
+  // ```
+  repeated OutputDirectory output_directories = 3;
+
+  // The output directories of the action that are symbolic links to other
+  // directories. Those may be links to other output directories, or input
+  // directories, or even absolute paths outside of the working directory,
+  // if the server supports
+  // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy].
+  // For each output directory requested in the `output_directories` field of
+  // the Action, if the directory file existed after
+  // the action completed, a single entry will be present either in this field,
+  // or in the `output_directories` field, if the directory was not a symbolic link.
+  //
+  // If the action does not produce the requested output, or produces a
+  // file where a directory is expected or vice versa, then that output
+  // will be omitted from the list. The server is free to arrange the output
+  // list as desired; clients MUST NOT assume that the output list is sorted.
+  repeated OutputSymlink output_directory_symlinks = 11;
+
+  // The exit code of the command.
+  int32 exit_code = 4;
+
+  // The standard output buffer of the action. The server will determine, based
+  // on the size of the buffer, whether to return it in raw form or to return
+  // a digest in `stdout_digest` that points to the buffer. If neither is set,
+  // then the buffer is empty. The client SHOULD NOT assume it will get one of
+  // the raw buffer or a digest on any given request and should be prepared to
+  // handle either.
+  bytes stdout_raw = 5;
+
+  // The digest for a blob containing the standard output of the action, which
+  // can be retrieved from the
+  // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+  // See `stdout_raw` for when this will be set.
+  Digest stdout_digest = 6;
+
+  // The standard error buffer of the action. The server will determine, based
+  // on the size of the buffer, whether to return it in raw form or to return
+  // a digest in `stderr_digest` that points to the buffer. If neither is set,
+  // then the buffer is empty. The client SHOULD NOT assume it will get one of
+  // the raw buffer or a digest on any given request and should be prepared to
+  // handle either.
+  bytes stderr_raw = 7;
+
+  // The digest for a blob containing the standard error of the action, which
+  // can be retrieved from the
+  // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+  // See `stderr_raw` for when this will be set.
+  Digest stderr_digest = 8;
+
+  // The details of the execution that originally produced this result.
+  ExecutedActionMetadata execution_metadata = 9;
+}
+
+// An `OutputFile` is similar to a
+// [FileNode][build.bazel.remote.execution.v2.FileNode], but it is used as an
+// output in an `ActionResult`. It allows a full file path rather than
+// only a name.
+//
+// `OutputFile` is binary-compatible with `FileNode`.
+message OutputFile {
+  // The full path of the file relative to the working directory, including the
+  // filename. The path separator is a forward slash `/`. Since this is a
+  // relative path, it MUST NOT begin with a leading forward slash.
+  string path = 1;
+
+  // The digest of the file's content.
+  Digest digest = 2;
+
+  reserved 3; // Used for a removed field in an earlier version of the API.
+
+  // True if file is executable, false otherwise.
+  bool is_executable = 4;
+}
+
+// A `Tree` contains all the
+// [Directory][build.bazel.remote.execution.v2.Directory] protos in a
+// single directory Merkle tree, compressed into one message.
+message Tree {
+  // The root directory in the tree.
+  Directory root = 1;
+
+  // All the child directories: the directories referred to by the root and,
+  // recursively, all its children. In order to reconstruct the directory tree,
+  // the client must take the digests of each of the child directories and then
+  // build up a tree starting from the `root`.
+  repeated Directory children = 2;
+}
+
+// An `OutputDirectory` is the output in an `ActionResult` corresponding to a
+// directory's full contents rather than a single file.
+message OutputDirectory {
+  // The full path of the directory relative to the working directory. The path
+  // separator is a forward slash `/`. Since this is a relative path, it MUST
+  // NOT begin with a leading forward slash. The empty string value is allowed,
+  // and it denotes the entire working directory.
+  string path = 1;
+
+  reserved 2; // Used for a removed field in an earlier version of the API.
+
+  // The digest of the encoded
+  // [Tree][build.bazel.remote.execution.v2.Tree] proto containing the
+  // directory's contents.
+  Digest tree_digest = 3;
+}
+
+// An `OutputSymlink` is similar to a
+// [Symlink][build.bazel.remote.execution.v2.SymlinkNode], but it is used as an
+// output in an `ActionResult`.
+//
+// `OutputSymlink` is binary-compatible with `SymlinkNode`.
+message OutputSymlink {
+  // The full path of the symlink relative to the working directory, including the
+  // filename. The path separator is a forward slash `/`. Since this is a
+  // relative path, it MUST NOT begin with a leading forward slash.
+  string path = 1;
+
+  // The target path of the symlink. The path separator is a forward slash `/`.
+  // The target path can be relative to the parent directory of the symlink or
+  // it can be an absolute path starting with `/`. Support for absolute paths
+  // can be checked using the [Capabilities][build.bazel.remote.execution.v2.Capabilities]
+  // API. The canonical form forbids the substrings `/./` and `//` in the target
+  // path. `..` components are allowed anywhere in the target path.
+  string target = 2;
+}
+
+// An `ExecutionPolicy` can be used to control the scheduling of the action.
+message ExecutionPolicy {
+  // The priority (relative importance) of this action. Generally, a lower value
+  // means that the action should be run sooner than actions having a greater
+  // priority value, but the interpretation of a given value is server-
+  // dependent. A priority of 0 means the *default* priority. Priorities may be
+  // positive or negative, and such actions should run later or sooner than
+  // actions having the default priority, respectively. The particular semantics
+  // of this field is up to the server. In particular, every server will have
+  // their own supported range of priorities, and will decide how these map into
+  // scheduling policy.
+  int32 priority = 1;
+}
+
+// A `ResultsCachePolicy` is used for fine-grained control over how action
+// outputs are stored in the CAS and Action Cache.
+message ResultsCachePolicy {
+  // The priority (relative importance) of this content in the overall cache.
+  // Generally, a lower value means a longer retention time or other advantage,
+  // but the interpretation of a given value is server-dependent. A priority of
+  // 0 means a *default* value, decided by the server.
+  //
+  // The particular semantics of this field is up to the server. In particular,
+  // every server will have their own supported range of priorities, and will
+  // decide how these map into retention/eviction policy.
+  int32 priority = 1;
+}
+
+// A request message for
+// [Execution.Execute][build.bazel.remote.execution.v2.Execution.Execute].
+message ExecuteRequest {
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+
+  // If true, the action will be executed even if its result is already
+  // present in the [ActionCache][build.bazel.remote.execution.v2.ActionCache].
+  // The execution is still allowed to be merged with other in-flight executions
+  // of the same action, however - semantically, the service MUST only guarantee
+  // that the results of an execution with this field set were not visible
+  // before the corresponding execution request was sent.
+  // Note that actions from execution requests setting this field set are still
+  // eligible to be entered into the action cache upon completion, and services
+  // SHOULD overwrite any existing entries that may exist. This allows
+  // skip_cache_lookup requests to be used as a mechanism for replacing action
+  // cache entries that reference outputs no longer available or that are
+  // poisoned in any way.
+  // If false, the result may be served from the action cache.
+  bool skip_cache_lookup = 3;
+
+  reserved 2, 4, 5; // Used for removed fields in an earlier version of the API.
+
+  // The digest of the [Action][build.bazel.remote.execution.v2.Action] to
+  // execute.
+  Digest action_digest = 6;
+
+  // An optional policy for execution of the action.
+  // The server will have a default policy if this is not provided.
+  ExecutionPolicy execution_policy = 7;
+
+  // An optional policy for the results of this execution in the remote cache.
+  // The server will have a default policy if this is not provided.
+  // This may be applied to both the ActionResult and the associated blobs.
+  ResultsCachePolicy results_cache_policy = 8;
+}
+
+// A `LogFile` is a log stored in the CAS.
+message LogFile {
+  // The digest of the log contents.
+  Digest digest = 1;
+
+  // This is a hint as to the purpose of the log, and is set to true if the log
+  // is human-readable text that can be usefully displayed to a user, and false
+  // otherwise. For instance, if a command-line client wishes to print the
+  // server logs to the terminal for a failed action, this allows it to avoid
+  // displaying a binary file.
+  bool human_readable = 2;
+}
+
+// The response message for
+// [Execution.Execute][build.bazel.remote.execution.v2.Execution.Execute],
+// which will be contained in the [response
+// field][google.longrunning.Operation.response] of the
+// [Operation][google.longrunning.Operation].
+message ExecuteResponse {
+  // The result of the action.
+  ActionResult result = 1;
+
+  // True if the result was served from cache, false if it was executed.
+  bool cached_result = 2;
+
+  // If the status has a code other than `OK`, it indicates that the action did
+  // not finish execution. For example, if the operation times out during
+  // execution, the status will have a `DEADLINE_EXCEEDED` code. Servers MUST
+  // use this field for errors in execution, rather than the error field on the
+  // `Operation` object.
+  //
+  // If the status code is other than `OK`, then the result MUST NOT be cached.
+  // For an error status, the `result` field is optional; the server may
+  // populate the output-, stdout-, and stderr-related fields if it has any
+  // information available, such as the stdout and stderr of a timed-out action.
+  google.rpc.Status status = 3;
+
+  // An optional list of additional log outputs the server wishes to provide. A
+  // server can use this to return execution-specific logs however it wishes.
+  // This is intended primarily to make it easier for users to debug issues that
+  // may be outside of the actual job execution, such as by identifying the
+  // worker executing the action or by providing logs from the worker's setup
+  // phase. The keys SHOULD be human readable so that a client can display them
+  // to a user.
+  map<string, LogFile> server_logs = 4;
+
+  // Freeform informational message with details on the execution of the action
+  // that may be displayed to the user upon failure or when requested explicitly.
+  string message = 5;
+}
+
+// Metadata about an ongoing
+// [execution][build.bazel.remote.execution.v2.Execution.Execute], which
+// will be contained in the [metadata
+// field][google.longrunning.Operation.response] of the
+// [Operation][google.longrunning.Operation].
+message ExecuteOperationMetadata {
+  // The current stage of execution.
+  enum Stage {
+    UNKNOWN = 0;
+
+    // Checking the result against the cache.
+    CACHE_CHECK = 1;
+
+    // Currently idle, awaiting a free machine to execute.
+    QUEUED = 2;
+
+    // Currently being executed by a worker.
+    EXECUTING = 3;
+
+    // Finished execution.
+    COMPLETED = 4;
+  }
+
+  Stage stage = 1;
+
+  // The digest of the [Action][build.bazel.remote.execution.v2.Action]
+  // being executed.
+  Digest action_digest = 2;
+
+  // If set, the client can use this name with
+  // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the
+  // standard output.
+  string stdout_stream_name = 3;
+
+  // If set, the client can use this name with
+  // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the
+  // standard error.
+  string stderr_stream_name = 4;
+}
+
+// A request message for
+// [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution].
+message WaitExecutionRequest {
+  // The name of the [Operation][google.longrunning.Operation]
+  // returned by [Execute][build.bazel.remote.execution.v2.Execution.Execute].
+  string name = 1;
+}
+
+// A request message for
+// [ActionCache.GetActionResult][build.bazel.remote.execution.v2.ActionCache.GetActionResult].
+message GetActionResultRequest {
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+
+  // The digest of the [Action][build.bazel.remote.execution.v2.Action]
+  // whose result is requested.
+  Digest action_digest = 2;
+}
+
+// A request message for
+// [ActionCache.UpdateActionResult][build.bazel.remote.execution.v2.ActionCache.UpdateActionResult].
+message UpdateActionResultRequest {
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+
+  // The digest of the [Action][build.bazel.remote.execution.v2.Action]
+  // whose result is being uploaded.
+  Digest action_digest = 2;
+
+  // The [ActionResult][build.bazel.remote.execution.v2.ActionResult]
+  // to store in the cache.
+  ActionResult action_result = 3;
+
+  // An optional policy for the results of this execution in the remote cache.
+  // The server will have a default policy if this is not provided.
+  // This may be applied to both the ActionResult and the associated blobs.
+  ResultsCachePolicy results_cache_policy = 4;
+}
+
+// A request message for
+// [ContentAddressableStorage.FindMissingBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs].
+message FindMissingBlobsRequest {
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+
+  // A list of the blobs to check.
+  repeated Digest blob_digests = 2;
+}
+
+// A response message for
+// [ContentAddressableStorage.FindMissingBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs].
+message FindMissingBlobsResponse {
+  // A list of the blobs requested *not* present in the storage.
+  repeated Digest missing_blob_digests = 2;
+}
+
+// A request message for
+// [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+message BatchUpdateBlobsRequest {
+  // A request corresponding to a single blob that the client wants to upload.
+  message Request {
+    // The digest of the blob. This MUST be the digest of `data`.
+    Digest digest = 1;
+
+    // The raw binary data.
+    bytes data = 2;
+  }
+
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+
+  // The individual upload requests.
+  repeated Request requests = 2;
+}
+
+// A response message for
+// [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
+message BatchUpdateBlobsResponse {
+  // A response corresponding to a single blob that the client tried to upload.
+  message Response {
+    // The blob digest to which this response corresponds.
+    Digest digest = 1;
+
+    // The result of attempting to upload that blob.
+    google.rpc.Status status = 2;
+  }
+
+  // The responses to the requests.
+  repeated Response responses = 1;
+}
+
+// A request message for
+// [ContentAddressableStorage.BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs].
+message BatchReadBlobsRequest {
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+
+  // The individual blob digests.
+  repeated Digest digests = 2;
+}
+
+// A response message for
+// [ContentAddressableStorage.BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs].
+message BatchReadBlobsResponse {
+  // A response corresponding to a single blob that the client tried to download.
+  message Response {
+    // The digest to which this response corresponds.
+    Digest digest = 1;
+
+    // The raw binary data.
+    bytes data = 2;
+
+    // The result of attempting to download that blob.
+    google.rpc.Status status = 3;
+  }
+
+  // The responses to the requests.
+  repeated Response responses = 1;
+}
+
+// A request message for
+// [ContentAddressableStorage.GetTree][build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree].
+message GetTreeRequest {
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+
+  // The digest of the root, which must be an encoded
+  // [Directory][build.bazel.remote.execution.v2.Directory] message
+  // stored in the
+  // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
+  Digest root_digest = 2;
+
+  // A maximum page size to request. If present, the server will request no more
+  // than this many items. Regardless of whether a page size is specified, the
+  // server may place its own limit on the number of items to be returned and
+  // require the client to retrieve more items using a subsequent request.
+  int32 page_size = 3;
+
+  // A page token, which must be a value received in a previous
+  // [GetTreeResponse][build.bazel.remote.execution.v2.GetTreeResponse].
+  // If present, the server will use it to return the following page of results.
+  string page_token = 4;
+}
+
+// A response message for
+// [ContentAddressableStorage.GetTree][build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree].
+message GetTreeResponse {
+  // The directories descended from the requested root.
+  repeated Directory directories = 1;
+
+  // If present, signifies that there are more results which the client can
+  // retrieve by passing this as the page_token in a subsequent
+  // [request][build.bazel.remote.execution.v2.GetTreeRequest].
+  // If empty, signifies that this is the last page of results.
+  string next_page_token = 2;
+}
+
+// A request message for
+// [Capabilities.GetCapabilities][build.bazel.remote.execution.v2.Capabilities.GetCapabilities].
+message GetCapabilitiesRequest {
+  // The instance of the execution system to operate against. A server may
+  // support multiple instances of the execution system (with their own workers,
+  // storage, caches, etc.). The server MAY require use of this field to select
+  // between them in an implementation-defined fashion, otherwise it can be
+  // omitted.
+  string instance_name = 1;
+}
+
+// A response message for
+// [Capabilities.GetCapabilities][build.bazel.remote.execution.v2.Capabilities.GetCapabilities].
+message ServerCapabilities {
+  // Capabilities of the remote cache system.
+  CacheCapabilities cache_capabilities = 1;
+
+  // Capabilities of the remote execution system.
+  ExecutionCapabilities execution_capabilities = 2;
+
+  // Earliest RE API version supported, including deprecated versions.
+  build.bazel.semver.SemVer deprecated_api_version = 3;
+
+  // Earliest non-deprecated RE API version supported.
+  build.bazel.semver.SemVer low_api_version = 4;
+
+  // Latest RE API version supported.
+  build.bazel.semver.SemVer high_api_version = 5;
+}
+
+// The digest function used for converting values into keys for CAS and Action
+// Cache.
+enum DigestFunction {
+  // It is an error for the server to return this value.
+  UNKNOWN = 0;
+
+  // The Sha-256 digest function.
+  SHA256 = 1;
+
+  // The Sha-1 digest function.
+  SHA1 = 2;
+
+  // The MD5 digest function.
+  MD5 = 3;
+}
+
+// Describes the server/instance capabilities for updating the action cache.
+message ActionCacheUpdateCapabilities {
+  bool update_enabled = 1;
+}
+
+// Allowed values for priority in
+// [ResultsCachePolicy][google.devtools.remoteexecution.v2.ResultsCachePolicy]
+// Used for querying both cache and execution valid priority ranges.
+message PriorityCapabilities {
+  // Supported range of priorities, including boundaries.
+  message PriorityRange {
+    int32 min_priority = 1;
+    int32 max_priority = 2;
+  }
+  repeated PriorityRange priorities = 1;
+}
+
+// Capabilities of the remote cache system.
+message CacheCapabilities {
+  // Describes how the server treats absolute symlink targets.
+  enum SymlinkAbsolutePathStrategy {
+    UNKNOWN = 0;
+
+    // Server will return an `INVALID_ARGUMENT` on input symlinks with absolute
+    // targets.
+    // If an action tries to create an output symlink with an absolute target, a
+    // `FAILED_PRECONDITION` will be returned.
+    DISALLOWED = 1;
+
+    // Server will allow symlink targets to escape the input root tree, possibly
+    // resulting in non-hermetic builds.
+    ALLOWED = 2;
+  }
+
+  // All the digest functions supported by the remote cache.
+  // Remote cache may support multiple digest functions simultaneously.
+  repeated DigestFunction digest_function = 1;
+
+  // Capabilities for updating the action cache.
+  ActionCacheUpdateCapabilities action_cache_update_capabilities = 2;
+
+  // Supported cache priority range for both CAS and ActionCache.
+  PriorityCapabilities cache_priority_capabilities = 3;
+
+  // Maximum total size of blobs to be uploaded/downloaded using
+  // batch methods. A value of 0 means no limit is set, although
+  // in practice there will always be a message size limitation
+  // of the protocol in use, e.g. GRPC.
+  int64 max_batch_total_size_bytes = 4;
+
+  // Whether absolute symlink targets are supported.
+  SymlinkAbsolutePathStrategy symlink_absolute_path_strategy = 5;
+}
+
+// Capabilities of the remote execution system.
+message ExecutionCapabilities {
+  // Remote execution may only support a single digest function.
+  DigestFunction digest_function = 1;
+
+  // Whether remote execution is enabled for the particular server/instance.
+  bool exec_enabled = 2;
+
+  // Supported execution priority range.
+  PriorityCapabilities execution_priority_capabilities = 3;
+}
+
+// Details for the tool used to call the API.
+message ToolDetails {
+  // Name of the tool, e.g. bazel.
+  string tool_name = 1;
+
+  // Version of the tool used for the request, e.g. 5.0.3.
+  string tool_version = 2;
+}
+
+// An optional Metadata to attach to any RPC request to tell the server about an
+// external context of the request. The server may use this for logging or other
+// purposes. To use it, the client attaches the header to the call using the
+// canonical proto serialization:
+//
+// * name: `build.bazel.remote.execution.v2.requestmetadata-bin`
+// * contents: the base64 encoded binary `RequestMetadata` message.
+message RequestMetadata {
+  // The details for the tool invoking the requests.
+  ToolDetails tool_details = 1;
+
+  // An identifier that ties multiple requests to the same action.
+  // For example, multiple requests to the CAS, Action Cache, and Execution
+  // API are used in order to compile foo.cc.
+  string action_id = 2;
+
+  // An identifier that ties multiple actions together to a final result.
+  // For example, multiple actions are required to build and run foo_test.
+  string tool_invocation_id = 3;
+
+  // An identifier to tie multiple tool invocations together. For example,
+  // runs of foo_test, bar_test and baz_test on a post-submit of a given patch.
+  string correlated_invocations_id = 4;
+}
diff --git a/build/bazel/semver/BUILD b/build/bazel/semver/BUILD
new file mode 100644
index 0000000..a3da887
--- /dev/null
+++ b/build/bazel/semver/BUILD
@@ -0,0 +1,24 @@
+package(default_visibility = ["//visibility:public"])
+load("@com_github_grpc_grpc//bazel:cc_grpc_library.bzl", "cc_grpc_library")
+
+licenses(["notice"])
+
+proto_library(
+    name = "semver_proto",
+    srcs = ["semver.proto"],
+)
+
+java_proto_library(
+    name = "semver_java_proto",
+    deps = [":semver_proto"],
+)
+
+cc_grpc_library(
+    name = "semver_cc_proto",
+    srcs = ["semver.proto"],
+    deps = [],
+    proto_only = False,
+    well_known_protos = True,
+    use_external = False,
+)
+
diff --git a/build/bazel/semver/semver.proto b/build/bazel/semver/semver.proto
new file mode 100644
index 0000000..3b626b7
--- /dev/null
+++ b/build/bazel/semver/semver.proto
@@ -0,0 +1,41 @@
+// Copyright 2018 The Bazel Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package build.bazel.semver;
+
+option csharp_namespace = "Build.Bazel.Semver";
+option go_package = "semver";
+option java_multiple_files = true;
+option java_outer_classname = "SemverProto";
+option java_package = "build.bazel.semver";
+option objc_class_prefix = "SMV";
+
+// The full version of a given tool.
+message SemVer {
+  // The major version, e.g 10 for 10.2.3.
+  int32 major = 1;
+
+  // The minor version, e.g. 2 for 10.2.3.
+  int32 minor = 2;
+
+  // The patch version, e.g 3 for 10.2.3.
+  int32 patch = 3;
+
+  // The pre-release version. Either this field or major/minor/patch fields
+  // must be filled. They are mutually exclusive. Pre-release versions are
+  // assumed to be earlier than any released versions.
+  string prerelease = 4;
+}
diff --git a/external/BUILD.googleapis b/external/BUILD.googleapis
new file mode 100644
index 0000000..1cfef9f
--- /dev/null
+++ b/external/BUILD.googleapis
@@ -0,0 +1,78 @@
+package(default_visibility = ["//visibility:public"])
+
+load("@com_github_grpc_grpc//bazel:cc_grpc_library.bzl", "cc_grpc_library")
+
+licenses(["notice"])
+
+proto_library(
+    name = "google_api_annotations_proto",
+    srcs = ["google/api/annotations.proto"],
+    deps = [
+        ":google_api_http_proto",
+        "@com_google_protobuf//:descriptor_proto",
+    ],
+)
+
+cc_grpc_library(
+    name = "google_api_annotations_cc_proto",
+    srcs = ["google/api/annotations.proto"],
+    deps = [":google_api_http_cc_proto"],
+    proto_only = False,
+    well_known_protos = True,
+    use_external = False,
+)
+
+proto_library(
+    name = "google_api_http_proto",
+    srcs = ["google/api/http.proto"],
+)
+
+cc_grpc_library(
+    name = "google_api_http_cc_proto",
+    srcs = ["google/api/http.proto"],
+    deps = [],
+    proto_only = False,
+    well_known_protos = True,
+    use_external = False,
+)
+
+proto_library(
+    name = "google_longrunning_operations_proto",
+    srcs = ["google/longrunning/operations.proto"],
+    deps = [
+        ":google_api_annotations_proto",
+        ":google_api_http_proto",
+        ":google_rpc_status_proto",
+        "@com_google_protobuf//:any_proto",
+        "@com_google_protobuf//:empty_proto",
+    ],
+)
+
+cc_grpc_library(
+    name = "google_longrunning_operations_cc_proto",
+    srcs = ["google/longrunning/operations.proto"],
+    deps = [
+        ":google_api_annotations_cc_proto",
+        ":google_api_http_cc_proto",
+        ":google_rpc_status_cc_proto",
+    ],
+    proto_only = False,
+    well_known_protos = True,
+    use_external = False,
+)
+
+proto_library(
+    name = "google_rpc_status_proto",
+    srcs = ["google/rpc/status.proto"],
+    deps = ["@com_google_protobuf//:any_proto"],
+)
+
+cc_grpc_library(
+    name = "google_rpc_status_cc_proto",
+    srcs = ["google/rpc/status.proto"],
+    deps = [],
+    proto_only = False,
+    well_known_protos = True,
+    use_external = False,
+)
+