Rename Request to Execution at the API level.

There's a separate notion of Request at the HIDL level;
this is not renamed.

Bug: 63905942
Test: nn/runtime/tests

Change-Id: I671e3b387d735a9091dc5a2d14afbea6b45f444c
diff --git a/common/operations/EmbeddingLookupTest.cpp b/common/operations/EmbeddingLookupTest.cpp
index e088f28..bead417 100644
--- a/common/operations/EmbeddingLookupTest.cpp
+++ b/common/operations/EmbeddingLookupTest.cpp
@@ -93,10 +93,10 @@
 
     Compilation compilation(&model_);
     compilation.compile();
-    Request request(&compilation);
+    Execution execution(&compilation);
 
 #define SetInputOrWeight(X)                                                  \
-  ASSERT_EQ(request.setInput(EmbeddingLookup::k##X##Tensor, X##_.data(), sizeof(X##_)), \
+  ASSERT_EQ(execution.setInput(EmbeddingLookup::k##X##Tensor, X##_.data(), sizeof(X##_)), \
             Result::NO_ERROR);
 
     FOR_ALL_INPUT_AND_WEIGHT_TENSORS(SetInputOrWeight);
@@ -104,14 +104,14 @@
 #undef SetInputOrWeight
 
 #define SetOutput(X)                                                          \
-  ASSERT_EQ(request.setOutput(EmbeddingLookup::k##X##Tensor, X##_.data(), sizeof(X##_)), \
+  ASSERT_EQ(execution.setOutput(EmbeddingLookup::k##X##Tensor, X##_.data(), sizeof(X##_)), \
             Result::NO_ERROR);
 
     FOR_ALL_OUTPUT_TENSORS(SetOutput);
 
 #undef SetOutput
 
-    ASSERT_EQ(request.compute(), Result::NO_ERROR);
+    ASSERT_EQ(execution.compute(), Result::NO_ERROR);
   }
 
 #define DefineSetter(X)                          \
diff --git a/common/operations/HashtableLookupTest.cpp b/common/operations/HashtableLookupTest.cpp
index c97268c..76fcbdd 100644
--- a/common/operations/HashtableLookupTest.cpp
+++ b/common/operations/HashtableLookupTest.cpp
@@ -107,10 +107,10 @@
 
     Compilation compilation(&model_);
     compilation.compile();
-    Request request(&compilation);
+    Execution execution(&compilation);
 
 #define SetInputOrWeight(X)                                                  \
-  ASSERT_EQ(request.setInput(HashtableLookup::k##X##Tensor, X##_.data(), sizeof(X##_)), \
+  ASSERT_EQ(execution.setInput(HashtableLookup::k##X##Tensor, X##_.data(), sizeof(X##_)), \
             Result::NO_ERROR);
 
     FOR_ALL_INPUT_AND_WEIGHT_TENSORS(SetInputOrWeight);
@@ -118,14 +118,14 @@
 #undef SetInputOrWeight
 
 #define SetOutput(X)                                                          \
-  ASSERT_EQ(request.setOutput(HashtableLookup::k##X##Tensor, X##_.data(), sizeof(X##_)), \
+  ASSERT_EQ(execution.setOutput(HashtableLookup::k##X##Tensor, X##_.data(), sizeof(X##_)), \
             Result::NO_ERROR);
 
     FOR_ALL_OUTPUT_TENSORS(SetOutput);
 
 #undef SetOutput
 
-    ASSERT_EQ(request.compute(), Result::NO_ERROR);
+    ASSERT_EQ(execution.compute(), Result::NO_ERROR);
   }
 
 #define DefineSetter(X)                          \
diff --git a/common/operations/LSHProjectionTest.cpp b/common/operations/LSHProjectionTest.cpp
index 5f19e86..8dafc46 100644
--- a/common/operations/LSHProjectionTest.cpp
+++ b/common/operations/LSHProjectionTest.cpp
@@ -99,20 +99,20 @@
 
     Compilation compilation(&model_);
     compilation.compile();
-    Request request(&compilation);
+    Execution execution(&compilation);
 
-#define SetInputOrWeight(X, T)                                           \
-    ASSERT_EQ(request.setInput(LSHProjection::k##X##Tensor, X##_.data(), \
-                               sizeof(X##_)),                            \
+#define SetInputOrWeight(X, T)                                             \
+    ASSERT_EQ(execution.setInput(LSHProjection::k##X##Tensor, X##_.data(), \
+                                 sizeof(X##_)),                            \
               Result::NO_ERROR);
 
     FOR_ALL_INPUT_AND_WEIGHT_TENSORS(SetInputOrWeight);
 
 #undef SetInputOrWeight
 
-#define SetOutput(X)                                                    \
-  ASSERT_EQ(request.setOutput(LSHProjection::k##X##Tensor, X##_.data(), \
-                              sizeof(X##_)),                            \
+#define SetOutput(X)                                                      \
+  ASSERT_EQ(execution.setOutput(LSHProjection::k##X##Tensor, X##_.data(), \
+                                sizeof(X##_)),                            \
             Result::NO_ERROR);
 
     FOR_ALL_OUTPUT_TENSORS(SetOutput);
@@ -120,10 +120,10 @@
 #undef SetOutput
 
     ASSERT_EQ(
-        request.setInput(LSHProjection::kTypeParam, &type_, sizeof(type_)),
+        execution.setInput(LSHProjection::kTypeParam, &type_, sizeof(type_)),
         Result::NO_ERROR);
 
-    ASSERT_EQ(request.compute(), Result::NO_ERROR);
+    ASSERT_EQ(execution.compute(), Result::NO_ERROR);
   }
 
  private:
diff --git a/common/operations/LSTMTest.cpp b/common/operations/LSTMTest.cpp
index 28fdfc1..70e9f3d 100644
--- a/common/operations/LSTMTest.cpp
+++ b/common/operations/LSTMTest.cpp
@@ -177,19 +177,19 @@
 
         Compilation compilation(&model_);
         compilation.compile();
-        Request request(&compilation);
-#define SetInputOrWeight(X)                                             \
-        ASSERT_EQ(request.setInput(LSTMCell::k##X##Tensor, X##_.data(), \
-                                   sizeof(X##_)),                       \
+        Execution execution(&compilation);
+#define SetInputOrWeight(X)                                               \
+        ASSERT_EQ(execution.setInput(LSTMCell::k##X##Tensor, X##_.data(), \
+                                     sizeof(X##_)),                       \
                   Result::NO_ERROR);
 
         FOR_ALL_INPUT_AND_WEIGHT_TENSORS(SetInputOrWeight);
 
 #undef SetInputOrWeight
 
-#define SetOutput(X)                                                    \
-        ASSERT_EQ(request.setOutput(LSTMCell::k##X##Tensor, X##_.data(), \
-                                    sizeof(X##_)),                      \
+#define SetOutput(X)                                                       \
+        ASSERT_EQ(execution.setOutput(LSTMCell::k##X##Tensor, X##_.data(), \
+                                      sizeof(X##_)),                       \
                   Result::NO_ERROR);
 
         FOR_ALL_OUTPUT_TENSORS(SetOutput);
@@ -197,45 +197,45 @@
 #undef SetOutput
 
         if (use_cifg_) {
-            request.setInput(LSTMCell::kInputToInputWeightsTensor, nullptr, 0);
-            request.setInput(LSTMCell::kRecurrentToInputWeightsTensor, nullptr, 0);
+            execution.setInput(LSTMCell::kInputToInputWeightsTensor, nullptr, 0);
+            execution.setInput(LSTMCell::kRecurrentToInputWeightsTensor, nullptr, 0);
         }
 
         if (use_peephole_) {
             if (use_cifg_) {
-                request.setInput(LSTMCell::kCellToInputWeightsTensor, nullptr, 0);
+                execution.setInput(LSTMCell::kCellToInputWeightsTensor, nullptr, 0);
             }
         } else {
-            request.setInput(LSTMCell::kCellToInputWeightsTensor, nullptr, 0);
-            request.setInput(LSTMCell::kCellToForgetWeightsTensor, nullptr, 0);
-            request.setInput(LSTMCell::kCellToOutputWeightsTensor, nullptr, 0);
+            execution.setInput(LSTMCell::kCellToInputWeightsTensor, nullptr, 0);
+            execution.setInput(LSTMCell::kCellToForgetWeightsTensor, nullptr, 0);
+            execution.setInput(LSTMCell::kCellToOutputWeightsTensor, nullptr, 0);
         }
 
         if (use_projection_weights_) {
             if (!use_projection_bias_) {
-                request.setInput(LSTMCell::kProjectionBiasTensor, nullptr, 0);
+                execution.setInput(LSTMCell::kProjectionBiasTensor, nullptr, 0);
             }
         } else {
-            request.setInput(LSTMCell::kProjectionWeightsTensor, nullptr, 0);
-            request.setInput(LSTMCell::kProjectionBiasTensor, nullptr, 0);
+            execution.setInput(LSTMCell::kProjectionWeightsTensor, nullptr, 0);
+            execution.setInput(LSTMCell::kProjectionBiasTensor, nullptr, 0);
         }
 
-        ASSERT_EQ(request.setInput(LSTMCell::kActivationParam,
-                                   &activation_, sizeof(activation_)),
+        ASSERT_EQ(execution.setInput(LSTMCell::kActivationParam,
+                                     &activation_, sizeof(activation_)),
                   Result::NO_ERROR);
-        ASSERT_EQ(request.setInput(LSTMCell::kCellClipParam,
-                                   &cell_clip_, sizeof(cell_clip_)),
+        ASSERT_EQ(execution.setInput(LSTMCell::kCellClipParam,
+                                     &cell_clip_, sizeof(cell_clip_)),
                   Result::NO_ERROR);
-        ASSERT_EQ(request.setInput(LSTMCell::kProjClipParam,
-                                   &proj_clip_, sizeof(proj_clip_)),
+        ASSERT_EQ(execution.setInput(LSTMCell::kProjClipParam,
+                                     &proj_clip_, sizeof(proj_clip_)),
                   Result::NO_ERROR);
 
-        ASSERT_EQ(request.compute(), Result::NO_ERROR);
+        ASSERT_EQ(execution.compute(), Result::NO_ERROR);
     }
 
 private:
     Model model_;
-    // Request request_;
+    // Execution execution_;
     const uint32_t n_batch_;
     const uint32_t n_input_;
     const uint32_t n_cell_;
diff --git a/common/operations/RNNTest.cpp b/common/operations/RNNTest.cpp
index f1fd593..9f2146b 100644
--- a/common/operations/RNNTest.cpp
+++ b/common/operations/RNNTest.cpp
@@ -212,28 +212,28 @@
 
     Compilation compilation(&model_);
     compilation.compile();
-    Request request(&compilation);
-#define SetInputOrWeight(X)                                                 \
-  ASSERT_EQ(request.setInput(RNN::k##X##Tensor, X##_.data(), sizeof(X##_)), \
+    Execution execution(&compilation);
+#define SetInputOrWeight(X)                                                   \
+  ASSERT_EQ(execution.setInput(RNN::k##X##Tensor, X##_.data(), sizeof(X##_)), \
             Result::NO_ERROR);
 
     FOR_ALL_INPUT_AND_WEIGHT_TENSORS(SetInputOrWeight);
 
 #undef SetInputOrWeight
 
-#define SetOutput(X)                                                         \
-  ASSERT_EQ(request.setOutput(RNN::k##X##Tensor, X##_.data(), sizeof(X##_)), \
+#define SetOutput(X)                                                           \
+  ASSERT_EQ(execution.setOutput(RNN::k##X##Tensor, X##_.data(), sizeof(X##_)), \
             Result::NO_ERROR);
 
     FOR_ALL_OUTPUT_TENSORS(SetOutput);
 
 #undef SetOutput
 
-    ASSERT_EQ(request.setInput(RNN::kActivationParam, &activation_,
-                               sizeof(activation_)),
+    ASSERT_EQ(execution.setInput(RNN::kActivationParam, &activation_,
+                                 sizeof(activation_)),
               Result::NO_ERROR);
 
-    ASSERT_EQ(request.compute(), Result::NO_ERROR);
+    ASSERT_EQ(execution.compute(), Result::NO_ERROR);
   }
 
  private:
diff --git a/common/operations/SVDFTest.cpp b/common/operations/SVDFTest.cpp
index 9d7d8ff..58f773a 100644
--- a/common/operations/SVDFTest.cpp
+++ b/common/operations/SVDFTest.cpp
@@ -188,17 +188,17 @@
 
     Compilation compilation(&model_);
     compilation.compile();
-    Request request(&compilation);
-#define SetInputOrWeight(X)                                                  \
-  ASSERT_EQ(request.setInput(SVDF::k##X##Tensor, X##_.data(), sizeof(X##_)), \
+    Execution execution(&compilation);
+#define SetInputOrWeight(X)                                                    \
+  ASSERT_EQ(execution.setInput(SVDF::k##X##Tensor, X##_.data(), sizeof(X##_)), \
             Result::NO_ERROR);
 
     FOR_ALL_INPUT_AND_WEIGHT_TENSORS(SetInputOrWeight);
 
 #undef SetInputOrWeight
 
-#define SetOutput(X)                                                          \
-  ASSERT_EQ(request.setOutput(SVDF::k##X##Tensor, X##_.data(), sizeof(X##_)), \
+#define SetOutput(X)                                                            \
+  ASSERT_EQ(execution.setOutput(SVDF::k##X##Tensor, X##_.data(), sizeof(X##_)), \
             Result::NO_ERROR);
 
     FOR_ALL_OUTPUT_TENSORS(SetOutput);
@@ -206,15 +206,15 @@
 #undef SetOutput
 
     int rank = 1;
-    ASSERT_EQ(request.setInput(SVDF::kRankParam, &rank, sizeof(rank)),
+    ASSERT_EQ(execution.setInput(SVDF::kRankParam, &rank, sizeof(rank)),
               Result::NO_ERROR);
 
     int activation = ActivationFn::kActivationNone;
-    ASSERT_EQ(request.setInput(SVDF::kActivationParam, &activation,
-                               sizeof(activation)),
+    ASSERT_EQ(execution.setInput(SVDF::kActivationParam, &activation,
+                                 sizeof(activation)),
               Result::NO_ERROR);
 
-    ASSERT_EQ(request.compute(), Result::NO_ERROR);
+    ASSERT_EQ(execution.compute(), Result::NO_ERROR);
   }
 
 #define DefineSetter(X)                          \
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 4365c9d..bc97876 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -35,11 +35,11 @@
     srcs: [
         "CompilationBuilder.cpp",
         "Event.cpp",
+        "ExecutionBuilder.cpp",
         "Manager.cpp",
         "Memory.cpp",
         "ModelBuilder.cpp",
         "NeuralNetworks.cpp",
-        "RequestBuilder.cpp",
     ],
 
     target: {
diff --git a/runtime/CompilationBuilder.cpp b/runtime/CompilationBuilder.cpp
index 92bb46e..7e5d238 100644
--- a/runtime/CompilationBuilder.cpp
+++ b/runtime/CompilationBuilder.cpp
@@ -18,7 +18,7 @@
 
 #include "CompilationBuilder.h"
 
-#include "RequestBuilder.h"
+#include "ExecutionBuilder.h"
 
 namespace android {
 namespace nn {
@@ -33,9 +33,9 @@
     return ANEURALNETWORKS_NO_ERROR;
 }
 
-int CompilationBuilder::createRequest(RequestBuilder **request) {
-    *request = new RequestBuilder(this);
-    return (*request ? ANEURALNETWORKS_NO_ERROR : ANEURALNETWORKS_OUT_OF_MEMORY);
+int CompilationBuilder::createExecution(ExecutionBuilder **execution) {
+    *execution = new ExecutionBuilder(this);
+    return (*execution ? ANEURALNETWORKS_NO_ERROR : ANEURALNETWORKS_OUT_OF_MEMORY);
 }
 
 }  // namespace nn
diff --git a/runtime/CompilationBuilder.h b/runtime/CompilationBuilder.h
index 0e1bc5f..9c5afe3 100644
--- a/runtime/CompilationBuilder.h
+++ b/runtime/CompilationBuilder.h
@@ -22,12 +22,12 @@
 namespace android {
 namespace nn {
 
+class ExecutionBuilder;
 class ModelBuilder;
-class RequestBuilder;
 
 class CompilationBuilder {
 public:
-    friend class RequestBuilder;  // TODO remove this
+    friend class ExecutionBuilder;  // TODO remove this
 
     CompilationBuilder(const ModelBuilder* model);
 
@@ -35,13 +35,13 @@
 
     int compile();  // TODO: Asynchronous (startCompile?)
 
-    int createRequest(RequestBuilder** request);
+    int createExecution(ExecutionBuilder** execution);
 
 private:
     // int startComputeOnCpu(const Model& model, sp<Event>* event);
 
     const ModelBuilder* mModel;
-    // Whether the application prefers to go fast or use low power for this request.
+    // Whether the application prefers to go fast or use low power for this execution.
     uint32_t mPreference = ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER;
 };
 
diff --git a/runtime/RequestBuilder.cpp b/runtime/ExecutionBuilder.cpp
similarity index 83%
rename from runtime/RequestBuilder.cpp
rename to runtime/ExecutionBuilder.cpp
index 3738783..5f1cc35 100644
--- a/runtime/RequestBuilder.cpp
+++ b/runtime/ExecutionBuilder.cpp
@@ -14,9 +14,9 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "RequestBuilder"
+#define LOG_TAG "ExecutionBuilder"
 
-#include "RequestBuilder.h"
+#include "ExecutionBuilder.h"
 
 #include "CompilationBuilder.h"
 #include "CpuExecutor.h"
@@ -64,7 +64,7 @@
         uint32_t count = newType->dimensions.count;
         if (static_cast<OperandType>(newType->type) != operand.type ||
             count != operand.dimensions.size()) {
-            LOG(ERROR) << "ANeuralNetworksRequest_setInput/Output incompatible types";
+            LOG(ERROR) << "ANeuralNetworksExecution_setInput/Output incompatible types";
             return ANEURALNETWORKS_BAD_DATA;
         }
         for (uint32_t i = 0; i < count; i++) {
@@ -74,12 +74,12 @@
     return ANEURALNETWORKS_NO_ERROR;
 }
 
-RequestBuilder::RequestBuilder(const CompilationBuilder* compilation) :
+ExecutionBuilder::ExecutionBuilder(const CompilationBuilder* compilation) :
         mModel(compilation->mModel),
         mInputs(mModel->inputCount()),
         mOutputs(mModel->outputCount()),
         mMemories(mModel->getMemories()) {
-    LOG(DEBUG) << "RequestBuilder::RequestBuilder";
+    LOG(DEBUG) << "ExecutionBuilder::ExecutionBuilder";
     for (auto& p : mInputs) {
         p.state = ModelArgumentInfo::UNSPECIFIED;
     }
@@ -88,22 +88,22 @@
     }
 }
 
-int RequestBuilder::setInput(uint32_t index, const ANeuralNetworksOperandType* type,
-                             const void* buffer, uint32_t length) {
+int ExecutionBuilder::setInput(uint32_t index, const ANeuralNetworksOperandType* type,
+                               const void* buffer, uint32_t length) {
     uint32_t count = static_cast<uint32_t>(mInputs.size());
     if (index >= count) {
-        LOG(ERROR) << "ANeuralNetworksRequest_setInput bad index " << index << " " << count;
+        LOG(ERROR) << "ANeuralNetworksExecution_setInput bad index " << index << " " << count;
         return ANEURALNETWORKS_BAD_DATA;
     }
     return mInputs[index].setFromPointer(mModel->getInputOperand(index), type,
                                          const_cast<void*>(buffer), length);
 }
 
-int RequestBuilder::setInputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type,
-                                       const Memory* memory, uint32_t offset, uint32_t length) {
+int ExecutionBuilder::setInputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type,
+                                         const Memory* memory, uint32_t offset, uint32_t length) {
     uint32_t count = static_cast<uint32_t>(mInputs.size());
     if (index >= count) {
-        LOG(ERROR) << "ANeuralNetworksRequest_setInputFromMemory bad index " << index << " "
+        LOG(ERROR) << "ANeuralNetworksExecution_setInputFromMemory bad index " << index << " "
                    << count;
         return ANEURALNETWORKS_BAD_DATA;
     }
@@ -115,21 +115,21 @@
                                         length);
 }
 
-int RequestBuilder::setOutput(uint32_t index, const ANeuralNetworksOperandType* type, void* buffer,
-                              uint32_t length) {
+int ExecutionBuilder::setOutput(uint32_t index, const ANeuralNetworksOperandType* type, void* buffer,
+                                uint32_t length) {
     uint32_t count = static_cast<uint32_t>(mOutputs.size());
     if (index >= count) {
-        LOG(ERROR) << "ANeuralNetworksRequest_setOutput bad index " << index << " " << count;
+        LOG(ERROR) << "ANeuralNetworksExecution_setOutput bad index " << index << " " << count;
         return ANEURALNETWORKS_BAD_DATA;
     }
     return mOutputs[index].setFromPointer(mModel->getOutputOperand(index), type, buffer, length);
 }
 
-int RequestBuilder::setOutputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type,
-                                        const Memory* memory, uint32_t offset, uint32_t length) {
+int ExecutionBuilder::setOutputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type,
+                                          const Memory* memory, uint32_t offset, uint32_t length) {
     uint32_t count = static_cast<uint32_t>(mOutputs.size());
     if (index >= count) {
-        LOG(ERROR) << "ANeuralNetworksRequest_setOutputFromMemory bad index " << index << " "
+        LOG(ERROR) << "ANeuralNetworksExecution_setOutputFromMemory bad index " << index << " "
                    << count;
         return ANEURALNETWORKS_BAD_DATA;
     }
@@ -141,7 +141,7 @@
                                          length);
 }
 
-int RequestBuilder::startCompute() {
+int ExecutionBuilder::startCompute() {
     // TODO validate that we have full types for all inputs and outputs,
     // that the graph is not cyclic,
     /*
@@ -149,18 +149,18 @@
 
     for (auto& p : mInputs) {
         if (p.state == ModelArgumentInfo::UNSPECIFIED) {
-            LOG(ERROR) << "ANeuralNetworksRequest_startCompute not all inputs specified";
+            LOG(ERROR) << "ANeuralNetworksExecution_startCompute not all inputs specified";
             return ANEURALNETWORKS_BAD_DATA;
         }
     }
     */
     for (auto& p : mOutputs) {
         if (p.state == ModelArgumentInfo::UNSPECIFIED) {
-            LOG(ERROR) << "ANeuralNetworksRequest_startCompute not all outputs specified";
+            LOG(ERROR) << "ANeuralNetworksExecution_startCompute not all outputs specified";
             return ANEURALNETWORKS_BAD_DATA;
         }
     }
-    LOG(DEBUG) << "RequestBuilder::startCompute";
+    LOG(DEBUG) << "ExecutionBuilder::startCompute";
 
     std::shared_ptr<Device> device = DeviceManager::get()->getAvailableDriver();
     Model model;
@@ -170,9 +170,9 @@
                              : startComputeOnDevice(device->getInterface(), model);
 }
 
-int RequestBuilder::wait() {
+int ExecutionBuilder::wait() {
     if (mEvent == nullptr) {
-        LOG(ERROR) << "ANeuralNetworksRequest_wait without request in flight";
+        LOG(ERROR) << "ANeuralNetworksExecution_wait without execution in flight";
         return ANEURALNETWORKS_BAD_STATE;
     }
     mEvent->wait();
@@ -181,8 +181,8 @@
 
 // Figures out how to place each of the input or outputs in a buffer. This just does the layout,
 // it does not copy data.  Aligns each input a bit.
-int RequestBuilder::allocatePointerArgumentsToPool(std::vector<ModelArgumentInfo>* args,
-                                                   Memory* memory) {
+int ExecutionBuilder::allocatePointerArgumentsToPool(std::vector<ModelArgumentInfo>* args,
+                                                     Memory* memory) {
     uint32_t nextPoolIndex = mMemories.size();
     int64_t total = 0;
     for (auto& info : *args) {
@@ -196,7 +196,7 @@
         }
     };
     if (total > 0xFFFFFFFF) {
-        LOG(ERROR) << "ANeuralNetworksRequest_startCompute Size of all inputs or outputs exceeds "
+        LOG(ERROR) << "ANeuralNetworksExecution_startCompute Size of all inputs or outputs exceeds "
                       "2^32.";
         return ANEURALNETWORKS_BAD_DATA;
     }
@@ -217,8 +217,8 @@
     }
 }
 
-int RequestBuilder::startComputeOnDevice(sp<IDevice> driver, const Model& model) {
-    LOG(DEBUG) << "RequestBuilder::startComputeOnDevice";
+int ExecutionBuilder::startComputeOnDevice(sp<IDevice> driver, const Model& model) {
+    LOG(DEBUG) << "ExecutionBuilder::startComputeOnDevice";
     // TODO Dangerous!  In async, the model will outlive it here. Safe for now
     sp<Event> preparationEvent = new Event();
     ErrorStatus prepareStatus = ErrorStatus::GENERAL_FAILURE;
@@ -274,7 +274,7 @@
     }
 
     // Prepare the event for asynchronous execution. The sp<Event>
-    // object is recorded if the request has been successfully
+    // object is recorded if the execution has been successfully
     // launched.  The sp is used for ref-counting purposes. Without
     // it, the HIDL service could attempt to communicate with a dead
     // event object.
@@ -284,7 +284,7 @@
     sp<Event> eventSp = new Event();
 
     LOG(DEBUG) << "Before preparedModel->execute() " << toString(request);
-    // Execute the request.
+    // Execute.
     // TODO: What happens to the Event if the service dies abnormally
     // -- won't that keep the Event live forever, because the service
     // never has the opportunity to bump the reference count down? Or
@@ -319,7 +319,7 @@
             memcpy(info.buffer, data + loc.offset, loc.length);
         }
     }
-    LOG(DEBUG) << "RequestBuilder::startComputeOnDevice completed";
+    LOG(DEBUG) << "ExecutionBuilder::startComputeOnDevice completed";
 
     mEvent = eventSp;
     return ANEURALNETWORKS_NO_ERROR;
@@ -335,11 +335,11 @@
     event->notify(status);
 }
 
-int RequestBuilder::startComputeOnCpu(const Model& model) {
+int ExecutionBuilder::startComputeOnCpu(const Model& model) {
     // TODO: use a thread pool
 
     // Prepare the event for asynchronous execution. The sp<Event> object is
-    // recorded if the request has been successfully launched.
+    // recorded if the execution has been successfully launched.
     sp<Event> eventSp = new Event();
 
     std::vector<RunTimePoolInfo> runTimePoolInfos;
diff --git a/runtime/RequestBuilder.h b/runtime/ExecutionBuilder.h
similarity index 93%
rename from runtime/RequestBuilder.h
rename to runtime/ExecutionBuilder.h
index a154c5c..1df3f0a 100644
--- a/runtime/RequestBuilder.h
+++ b/runtime/ExecutionBuilder.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_ML_NN_RUNTIME_REQUEST_BUILDER_H
-#define ANDROID_ML_NN_RUNTIME_REQUEST_BUILDER_H
+#ifndef ANDROID_ML_NN_RUNTIME_EXECUTION_BUILDER_H
+#define ANDROID_ML_NN_RUNTIME_EXECUTION_BUILDER_H
 
 #include "Event.h"
 #include "HalInterfaces.h"
@@ -56,9 +56,9 @@
     int updateDimensionInfo(const Operand& operand, const ANeuralNetworksOperandType* newType);
 };
 
-class RequestBuilder {
+class ExecutionBuilder {
 public:
-    RequestBuilder(const CompilationBuilder* compilation);
+    ExecutionBuilder(const CompilationBuilder* compilation);
 
     int setInput(uint32_t index, const ANeuralNetworksOperandType* type, const void* buffer,
                  uint32_t length);
@@ -99,11 +99,11 @@
     Memory mOutputPointerArguments;
     MemoryTracker mMemories;
 
-    // Used for synchronizing with request.
+    // Used for synchronizing with execution.
     sp<Event> mEvent;
 };
 
 } // namespace nn
 } // namespace android
 
-#endif // ANDROID_ML_NN_RUNTIME_REQUEST_BUILDER_H
+#endif // ANDROID_ML_NN_RUNTIME_EXECUTION_BUILDER_H
diff --git a/runtime/NeuralNetworks.cpp b/runtime/NeuralNetworks.cpp
index 80fe0d5..a91cb84 100644
--- a/runtime/NeuralNetworks.cpp
+++ b/runtime/NeuralNetworks.cpp
@@ -22,11 +22,11 @@
 
 #include "CompilationBuilder.h"
 #include "Event.h"
+#include "ExecutionBuilder.h"
 #include "NeuralNetworks.h"
 #include "Manager.h"
 #include "Memory.h"
 #include "ModelBuilder.h"
-#include "RequestBuilder.h"
 
 #include <memory>
 #include <vector>
@@ -457,123 +457,123 @@
     return ANEURALNETWORKS_NO_ERROR;
 }
 
-int ANeuralNetworksRequest_create(ANeuralNetworksCompilation* compilation,
-                                  ANeuralNetworksRequest** request) {
-    if (!compilation || !request) {
-        LOG(ERROR) << "ANeuralNetworksRequest_create passed a nullptr";
+int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation,
+                                    ANeuralNetworksExecution** execution) {
+    if (!compilation || !execution) {
+        LOG(ERROR) << "ANeuralNetworksExecution_create passed a nullptr";
         return ANEURALNETWORKS_UNEXPECTED_NULL;
     }
 
     CompilationBuilder* c = reinterpret_cast<CompilationBuilder*>(compilation);
-    RequestBuilder* r = nullptr;
-    int result = c->createRequest(&r);
-    *request = reinterpret_cast<ANeuralNetworksRequest*>(r);
+    ExecutionBuilder* r = nullptr;
+    int result = c->createExecution(&r);
+    *execution = reinterpret_cast<ANeuralNetworksExecution*>(r);
     return result;
 }
 
-void ANeuralNetworksRequest_free(ANeuralNetworksRequest* request) {
-    // TODO specification says that a request-in-flight can be deleted
+void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) {
+    // TODO specification says that an execution-in-flight can be deleted
     // No validation.  Free of nullptr is valid.
-    RequestBuilder* r = reinterpret_cast<RequestBuilder*>(request);
+    ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution);
     if (r) {
         r->wait();
         delete r;
     }
 }
 
-int ANeuralNetworksRequest_setInput(ANeuralNetworksRequest* request, int32_t index,
-                                    const ANeuralNetworksOperandType* type, const void* buffer,
-                                    size_t length) {
+int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution, int32_t index,
+                                      const ANeuralNetworksOperandType* type, const void* buffer,
+                                      size_t length) {
     // TODO: For a non-optional input, also verify that buffer is not null.
-    if (!request) {
-        LOG(ERROR) << "ANeuralNetworksRequest_setInput passed a nullptr";
+    if (!execution) {
+        LOG(ERROR) << "ANeuralNetworksExecution_setInput passed a nullptr";
         return ANEURALNETWORKS_UNEXPECTED_NULL;
     }
     if (type != nullptr) {
-        int n = ValidateOperandType(*type, "ANeuralNetworksRequest_setInput", false);
+        int n = ValidateOperandType(*type, "ANeuralNetworksExecution_setInput", false);
         if (n != ANEURALNETWORKS_NO_ERROR) {
             return n;
         }
     }
     if (length > 0xFFFFFFFF) {
-        LOG(ERROR) << "ANeuralNetworksRequest_setInput input exceeds max length " << length;
+        LOG(ERROR) << "ANeuralNetworksExecution_setInput input exceeds max length " << length;
         return ANEURALNETWORKS_BAD_DATA;
     }
     uint32_t l = static_cast<uint32_t>(length);
-    RequestBuilder* r = reinterpret_cast<RequestBuilder*>(request);
+    ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution);
     return r->setInput(index, type, buffer, l);
 }
 
-int ANeuralNetworksRequest_setInputFromMemory(ANeuralNetworksRequest* request, int32_t index,
-                                              const ANeuralNetworksOperandType* type,
-                                              const ANeuralNetworksMemory* memory, uint32_t offset,
-                                              uint32_t length) {
-    if (!request || !memory) {
-        LOG(ERROR) << "ANeuralNetworksRequest_setInputFromMemory passed a nullptr";
+int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* execution, int32_t index,
+                                                const ANeuralNetworksOperandType* type,
+                                                const ANeuralNetworksMemory* memory, uint32_t offset,
+                                                uint32_t length) {
+    if (!execution || !memory) {
+        LOG(ERROR) << "ANeuralNetworksExecution_setInputFromMemory passed a nullptr";
         return ANEURALNETWORKS_UNEXPECTED_NULL;
     }
     // TODO validate the rest
 
     const Memory* m = reinterpret_cast<const Memory*>(memory);
-    RequestBuilder* r = reinterpret_cast<RequestBuilder*>(request);
+    ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution);
     return r->setInputFromMemory(index, type, m, offset, length);
 }
 
-int ANeuralNetworksRequest_setOutput(ANeuralNetworksRequest* request, int32_t index,
-                                     const ANeuralNetworksOperandType* type, void* buffer,
-                                     size_t length) {
-    if (!request || !buffer) {
-        LOG(ERROR) << "ANeuralNetworksRequest_setOutput passed a nullptr";
+int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int32_t index,
+                                       const ANeuralNetworksOperandType* type, void* buffer,
+                                       size_t length) {
+    if (!execution || !buffer) {
+        LOG(ERROR) << "ANeuralNetworksExecution_setOutput passed a nullptr";
         return ANEURALNETWORKS_UNEXPECTED_NULL;
     }
     if (type != nullptr) {
-        int n = ValidateOperandType(*type, "ANeuralNetworksRequest_setOutput", false);
+        int n = ValidateOperandType(*type, "ANeuralNetworksExecution_setOutput", false);
         if (n != ANEURALNETWORKS_NO_ERROR) {
             return n;
         }
     }
     if (length > 0xFFFFFFFF) {
-        LOG(ERROR) << "ANeuralNetworksRequest_setOutput input exceeds max length " << length;
+        LOG(ERROR) << "ANeuralNetworksExecution_setOutput input exceeds max length " << length;
         return ANEURALNETWORKS_BAD_DATA;
     }
     uint32_t l = static_cast<uint32_t>(length);
 
-    RequestBuilder* r = reinterpret_cast<RequestBuilder*>(request);
+    ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution);
     return r->setOutput(index, type, buffer, l);
 }
 
-int ANeuralNetworksRequest_setOutputFromMemory(ANeuralNetworksRequest* request, int32_t index,
-                                               const ANeuralNetworksOperandType* type,
-                                               const ANeuralNetworksMemory* memory, uint32_t offset,
-                                               uint32_t length) {
-    if (!request || !memory) {
-        LOG(ERROR) << "ANeuralNetworksRequest_setOutputFromMemory passed a nullptr";
+int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution* execution, int32_t index,
+                                                 const ANeuralNetworksOperandType* type,
+                                                 const ANeuralNetworksMemory* memory, uint32_t offset,
+                                                 uint32_t length) {
+    if (!execution || !memory) {
+        LOG(ERROR) << "ANeuralNetworksExecution_setOutputFromMemory passed a nullptr";
         return ANEURALNETWORKS_UNEXPECTED_NULL;
     }
     // TODO validate the rest
 
-    RequestBuilder* r = reinterpret_cast<RequestBuilder*>(request);
+    ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution);
     const Memory* m = reinterpret_cast<const Memory*>(memory);
     return r->setOutputFromMemory(index, type, m, offset, length);
 }
 
-int ANeuralNetworksRequest_startCompute(ANeuralNetworksRequest* request) {
-    if (!request) {
-        LOG(ERROR) << "ANeuralNetworksRequest_startCompute passed a nullptr";
+int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution* execution) {
+    if (!execution) {
+        LOG(ERROR) << "ANeuralNetworksExecution_startCompute passed a nullptr";
         return ANEURALNETWORKS_UNEXPECTED_NULL;
     }
     // TODO validate the rest
 
-    RequestBuilder* r = reinterpret_cast<RequestBuilder*>(request);
+    ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution);
     return r->startCompute();
 }
 
-int ANeuralNetworksRequest_wait(ANeuralNetworksRequest* request) {
-    if (!request) {
-        LOG(ERROR) << "ANeuralNetworksRequest_wait passed a nullptr";
+int ANeuralNetworksExecution_wait(ANeuralNetworksExecution* execution) {
+    if (!execution) {
+        LOG(ERROR) << "ANeuralNetworksExecution_wait passed a nullptr";
         return ANEURALNETWORKS_UNEXPECTED_NULL;
     }
 
-    RequestBuilder* r = reinterpret_cast<RequestBuilder*>(request);
+    ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution);
     return r->wait();
 }
diff --git a/runtime/include/NeuralNetworks.h b/runtime/include/NeuralNetworks.h
index 0c05e40..b2694cb 100644
--- a/runtime/include/NeuralNetworks.h
+++ b/runtime/include/NeuralNetworks.h
@@ -1073,8 +1073,8 @@
  * can be used to directly created shared memory.
  *
  * Memory objects can also be used to specify the input and output arguments of
- * a request. See {@link ANeuralNetworksRequest_setInputFromMemory}
- * and {@link ANeuralNetworksRequest_setOutputFromMemory}. This is a typical
+ * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory}
+ * and {@link ANeuralNetworksExecution_setOutputFromMemory}. This is a typical
  * usage for hardware buffers. See {@link ANeuralNetworksMemory_createFromHardwareBuffer}.
  */
 typedef struct ANeuralNetworksMemory ANeuralNetworksMemory;
@@ -1108,9 +1108,9 @@
  *    <li>Perform the compilation with {@link ANeuralNetworksCompilation_start}.</li>
  *    <li>Wait for the compilation to complete with {@link ANeuralNetworksCompilation_wait}.</li>
  *    <li>Use the compilation as many times as needed
- *        with {@link ANeuralNetworksRequest_create}.</li>
+ *        with {@link ANeuralNetworksExecution_create}.</li>
  *    <li>Destroy the compilation with {@link ANeuralNetworksCompilation_free}
- *        once all requests using the compilation have completed.</li></ul></p>
+ *        once all executions using the compilation have completed.</li></ul></p>
  *
  * <p>A compilation cannot be modified once {@link ANeuralNetworksCompilation_start}
  * has been called on it.</p>
@@ -1124,36 +1124,36 @@
 typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation;
 
 /**
- * ANeuralNetworksRequest is an opaque type that can be used to apply a machine
+ * ANeuralNetworksExecution is an opaque type that can be used to apply a machine
  * learning model to a set of inputs.
  *
  * <p>To use:<ul>
- *    <li>Create a new request instance by calling the
- *        {@link ANeuralNetworksRequest_create} function.</li>
+ *    <li>Create a new execution instance by calling the
+ *        {@link ANeuralNetworksExecution_create} function.</li>
  *    <li>Associate data to the model inputs with
- *        {@link ANeuralNetworksRequest_setInput} or
- *        {@link ANeuralNetworksRequest_setInputFromMemory}.</li>
+ *        {@link ANeuralNetworksExecution_setInput} or
+ *        {@link ANeuralNetworksExecution_setInputFromMemory}.</li>
  *    <li>Associate output buffers to the model outputs with
- *        {@link ANeuralNetworksRequest_setOutput} or
- *        {@link ANeuralNetworksRequest_setOutputFromMemory}.</li>
- *    <li>Apply the model with {@link ANeuralNetworksRequest_startCompute}.</li>
- *    <li>Wait for the request to complete with {@link
- *        ANeuralNetworksRequest_wait}.</li>
- *    <li>Destroy the request with
- *        {@link ANeuralNetworksRequest_free}.</li></ul></p>
+ *        {@link ANeuralNetworksExecution_setOutput} or
+ *        {@link ANeuralNetworksExecution_setOutputFromMemory}.</li>
+ *    <li>Apply the model with {@link ANeuralNetworksExecution_startCompute}.</li>
+ *    <li>Wait for the execution to complete with {@link
+ *        ANeuralNetworksExecution_wait}.</li>
+ *    <li>Destroy the execution with
+ *        {@link ANeuralNetworksExecution_free}.</li></ul></p>
  *
- * <p>A request cannot be modified once {@link ANeuralNetworksRequest_start}
+ * <p>An execution cannot be modified once {@link ANeuralNetworksExecution_start}
  * has been called on it.</p>
  *
- * <p>A request can be applied to a model with
- * {@link ANeuralNetworksRequest_startCompute} only once. Create new requests
+ * <p>An execution can be applied to a model with
+ * {@link ANeuralNetworksExecution_startCompute} only once. Create new executions
  * to do new evaluations of the model.</p>
  *
  * <p>It is the application's responsibility to make sure that only one thread
- * modifies a request at a given time. It is however safe for more than one
- * thread to use {@link ANeuralNetworksRequest_wait} at the same time.</p>
+ * modifies an execution at a given time. It is however safe for more than one
+ * thread to use {@link ANeuralNetworksExecution_wait} at the same time.</p>
  */
-typedef struct ANeuralNetworksRequest ANeuralNetworksRequest;
+typedef struct ANeuralNetworksExecution ANeuralNetworksExecution;
 
 typedef struct ANeuralNetworksIntList {
     uint32_t count;
@@ -1209,10 +1209,10 @@
  * Destroys the machine learning runtime.
  *
  * This function frees any resource used by the runtime. It will wait
- * until in flight requests have completed and will prevent new ones
- * from being started with {@link ANeuralNetworksRequest_startCompute}.
+ * until in flight executions have completed and will prevent new ones
+ * from being started with {@link ANeuralNetworksExecution_startCompute}.
  *
- * Threads blocked on {@link ANeuralNetworksRequest_wait} calls will be
+ * Threads blocked on {@link ANeuralNetworksExecution_wait} calls will be
  * released before this function terminates.
  *
  * See {@link ANeuralNetworksInitialize} for details on how multiple calls
@@ -1262,7 +1262,7 @@
  * Create an empty {@link ANeuralNetworksModel}.
  *
  * <p>This only creates the object. Computation is performed once
- * {@link ANeuralNetworksRequest_startCompute} is invoked.
+ * {@link ANeuralNetworksExecution_startCompute} is invoked.
  *
  * The model should be constructed with calls to
  * {@link ANeuralNetworksModel_addOperation} and
@@ -1317,16 +1317,16 @@
  * The order in which the operands are added is important. The first one added
  * to a model will have the index value 0, the second 1, etc. These indexes are
  * used as operand identifiers in {@link ANeuralNetworksModel_addOperation},
- * {@link ANeuralNetworksRequest_setInput},
- * {@link ANeuralNetworksRequest_setInputFromMemory},
- * {@link ANeuralNetworksRequest_setOutput},
- * {@link ANeuralNetworksRequest_setOutputFromMemory} and
- * {@link ANeuralNetworksRequest_setOperandValue}.
+ * {@link ANeuralNetworksExecution_setInput},
+ * {@link ANeuralNetworksExecution_setInputFromMemory},
+ * {@link ANeuralNetworksExecution_setOutput},
+ * {@link ANeuralNetworksExecution_setOutputFromMemory} and
+ * {@link ANeuralNetworksExecution_setOperandValue}.
  *
  * To build a model that can accomodate inputs of various sizes, as you may want
  * to do for a CNN, set the size of the dimensions that will vary at run time to 0.
  * If you do so, provide the full dimensions when calling
- * {@link ANeuralNetworksRequest_setInput} or {@link ANeuralNetworksRequest_setInputFromMemory}.
+ * {@link ANeuralNetworksExecution_setInput} or {@link ANeuralNetworksExecution_setInputFromMemory}.
  *
  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
  * called will return an error.
@@ -1349,7 +1349,7 @@
  *
  * For tensor values, a pointer to the buffer is stored within the model.
  * The application is responsible for not changing the content of this region
- * until all requests using this model have completed. As the data may
+ * until all executions using this model have completed. As the data may
  * be copied during processing, modifying the data after this call yields
  * undefined results.
  *
@@ -1373,7 +1373,7 @@
  *
  * The content of the memory is not copied. A reference to that memory is stored
  * inside the model. The application is responsible for not changing the content
- * of the memory region until all requests using this model have completed.
+ * of the memory region until all executions using this model have completed.
  * As the data may be copied during processing, modifying the data after this call
  * yields undefined results.
  *
@@ -1505,11 +1505,11 @@
  * to wait for that signal.</p>
  *
  * Multiple compilations can be scheduled and performed concurrently, and
- * compilations can be performed concurrently with execution of requests.
+ * compilations can be performed concurrently with executions.
  * The runtime makes no guarantee on the ordering of the completion of compilations
- * and requests. If it's important to the application, the application should enforce
+ * and executions. If it's important to the application, the application should enforce
  * the ordering by using
- * {@link ANeuralNetworksCompilation_wait} and {@link ANeuralNetworksRequest_wait}.
+ * {@link ANeuralNetworksCompilation_wait} and {@link ANeuralNetworksExecution_wait}.
  *
  * ANeuralNetworksCompilation_wait must be called to recuperate the resources used
  * by the compilation.
@@ -1537,48 +1537,48 @@
 int ANeuralNetworksCompilation_wait(ANeuralNetworksCompilation* compilation);
 
 /**
- * Create a {@link ANeuralNetworksRequest} to apply the given compilation.
+ * Create a {@link ANeuralNetworksExecution} to apply the given compilation.
  * This only creates the object. Computation is only performed once
- * {@link ANeuralNetworksRequest_startCompute} is invoked.
+ * {@link ANeuralNetworksExecution_startCompute} is invoked.
  *
- * <p>The provided compilation must outlive the request.</p>
+ * <p>The provided compilation must outlive the execution.</p>
  *
- * See {@link ANeuralNetworksRequest} for information on multithreaded usage.
+ * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
  *
  * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.
- * @param request The newly created object or NULL if unsuccessful.
+ * @param execution The newly created object or NULL if unsuccessful.
  *
  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
  *         if the compilation is invalid.
  */
-int ANeuralNetworksRequest_create(ANeuralNetworksCompilation* compilation,
-                                  ANeuralNetworksRequest** request);
+int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation,
+                                    ANeuralNetworksExecution** execution);
 
 /**
- * Destroy a request.
+ * Destroy an execution.
  *
- * <p>If called on a request for which
- * {@link ANeuralNetworksRequest_startCompute} has been called, the
- * function will return immediately but will mark the request to be deleted
- * once the computation completes.   The {link ANeuralNetworksRequest_wait}
+ * <p>If called on an execution for which
+ * {@link ANeuralNetworksExecution_startCompute} has been called, the
+ * function will return immediately but will mark the execution to be deleted
+ * once the computation completes.   The {link ANeuralNetworksExecution_wait}
  * will return ANEURALNETWORKS_ERROR_DELETED.
  *
- * See {@link ANeuralNetworksRequest} for information on multithreaded usage.
+ * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
  *
- * @param request The request to be destroyed. Passing NULL is acceptable and
- *                results in no operation.
+ * @param execution The execution to be destroyed. Passing NULL is acceptable and
+ *                  results in no operation.
  */
-void ANeuralNetworksRequest_free(ANeuralNetworksRequest* request);
+void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution);
 
 /**
  * Associate a user buffer with an input of the model of the
- * {@link ANeuralNetworksRequest}.
+ * {@link ANeuralNetworksExecution}.
  *
- * <p>The provided buffer must outlive the request.</p>
+ * <p>The provided buffer must outlive the execution.</p>
  *
- * See {@link ANeuralNetworksRequest} for information on multithreaded usage.
+ * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
  *
- * @param request The request to be modified.
+ * @param execution The execution to be modified.
  * @param index The index of the model operand we're associating the input to.
  * @param type The type of the operand. This should be used to specify the
  *             dimensions that were set to 0 when the operand was added to the
@@ -1591,19 +1591,19 @@
  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
  *         name is not recognized or the buffer is too small for the input.
  */
-int ANeuralNetworksRequest_setInput(ANeuralNetworksRequest* request, int32_t index,
-                                    const ANeuralNetworksOperandType* type, const void* buffer,
-                                    size_t length);
+int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution, int32_t index,
+                                      const ANeuralNetworksOperandType* type, const void* buffer,
+                                      size_t length);
 
 /**
  * Associate part of a memory object with an input of the model of the
- * {@link ANeuralNetworksRequest}.
+ * {@link ANeuralNetworksExecution}.
  *
- * <p>The provided memory must outlive the request.</p>
+ * <p>The provided memory must outlive the execution.</p>
  *
- * See {@link ANeuralNetworksRequest} for information on multithreaded usage.
+ * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
  *
- * @param request The request to be modified.
+ * @param execution The execution to be modified.
  * @param index The index of the model operand we're associating the input to.
  * @param type The type of the operand. This can be used to specify the
  *             dimensions that were set to 0 when the operand was added to the
@@ -1618,20 +1618,20 @@
  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
  *         name is not recognized or the buffer is too small for the input.
  */
-int ANeuralNetworksRequest_setInputFromMemory(ANeuralNetworksRequest* request, int32_t index,
-                                              const ANeuralNetworksOperandType* type,
-                                              const ANeuralNetworksMemory* memory, uint32_t offset,
-                                              uint32_t length);
+int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* execution, int32_t index,
+                                                const ANeuralNetworksOperandType* type,
+                                                const ANeuralNetworksMemory* memory, uint32_t offset,
+                                                uint32_t length);
 
 /**
  * Associate a user buffer with an output of the model of the
- * {@link ANeuralNetworksRequest}.
+ * {@link ANeuralNetworksExecution}.
  *
- * <p>The provided buffer must outlive the request.</p>
+ * <p>The provided buffer must outlive the execution.</p>
  *
- * See {@link ANeuralNetworksRequest} for information on multithreaded usage.
+ * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
  *
- * @param request The request to be modified.
+ * @param execution The execution to be modified.
  * @param index The index of the model operand we're associating the output to.
  * @param type The type of the operand. This can be used to specify the
  *             dimensions that were set to 0 when the operand was added to the
@@ -1644,19 +1644,19 @@
  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
  *         name is not recognized or the buffer is too small for the output.
  */
-int ANeuralNetworksRequest_setOutput(ANeuralNetworksRequest* request, int32_t index,
-                                     const ANeuralNetworksOperandType* type, void* buffer,
-                                     size_t length);
+int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int32_t index,
+                                       const ANeuralNetworksOperandType* type, void* buffer,
+                                       size_t length);
 
 /**
  * Associate part of a memory object with an output of the model of the
- * {@link ANeuralNetworksRequest}.
+ * {@link ANeuralNetworksExecution}.
  *
- * <p>The provided memory must outlive the request.</p>
+ * <p>The provided memory must outlive the execution.</p>
  *
- * See {@link ANeuralNetworksRequest} for information on multithreaded usage.
+ * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
  *
- * @param request The request to be modified.
+ * @param execution The execution to be modified.
  * @param index The index of the model operand we're associating the input to.
  * @param type The type of the operand. This can be used to specify the
  *             dimensions that were set to 0 when the operand was added to the
@@ -1671,47 +1671,47 @@
  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
  *         name is not recognized or the buffer is too small for the output.
  */
-int ANeuralNetworksRequest_setOutputFromMemory(ANeuralNetworksRequest* request, int32_t index,
-                                               const ANeuralNetworksOperandType* type,
-                                               const ANeuralNetworksMemory* memory, uint32_t offset,
-                                               uint32_t length);
+int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution* execution, int32_t index,
+                                                 const ANeuralNetworksOperandType* type,
+                                                 const ANeuralNetworksMemory* memory, uint32_t offset,
+                                                 uint32_t length);
 
 /**
- * Schedule the request for execution.
+ * Schedule evaluation of the execution.
  *
- * <p>Schedules the request for execution. Once the model has been
- * applied and the outputs are ready to be consumed, the request will be
- * signaled. Use {@link ANeuralNetworksRequest_wait} to wait for that signal.
+ * <p>Schedules evaluation of the execution. Once the model has been
+ * applied and the outputs are ready to be consumed, the execution will be
+ * signaled. Use {@link ANeuralNetworksExecution_wait} to wait for that signal.
  * </p>
  *
- * Multiple requests can be scheduled and executed concurrently, and compilations
- * can be performed concurrently with execution of requests. The runtime makes
- * no guarantee on the ordering of the completion of compilations and requests.
+ * Multiple executions can be scheduled and evaluated concurrently, and compilations
+ * can be performed concurrently with executions. The runtime makes
+ * no guarantee on the ordering of the completion of compilations and executions.
  * If it's important to the application, the application should enforce the ordering
- * by using {@link ANeuralNetworksCompilation_wait} and {@link ANeuralNetworksRequest_wait}.
+ * by using {@link ANeuralNetworksCompilation_wait} and {@link ANeuralNetworksExecution_wait}.
  *
- * ANeuralNetworksRequest_wait must be called to recuperate the resources used
- * by the request.
+ * ANeuralNetworksExecution_wait must be called to recuperate the resources used
+ * by the execution.
  *
- * See {@link ANeuralNetworksRequest} for information on multithreaded usage.
+ * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
  *
- * @param request The request to be scheduled and executed.
+ * @param execution The execution to be scheduled and executed.
  *
  * @return ANEURALNETWORKS_NO_ERROR if successful.
  */
-int ANeuralNetworksRequest_startCompute(ANeuralNetworksRequest* request);
+int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution* execution);
 
 /**
- * Waits until the request completes.
+ * Waits until the execution completes.
  *
- * More than one thread can wait on a request.  When the request completes,
+ * More than one thread can wait on a execution.  When the execution completes,
  * all threads will be released.
  *
- * See {@link ANeuralNetworksRequest} for information on multithreaded usage.
+ * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
  *
- * @return ANEURALNETWORKS_NO_ERROR if the request completed normally.
+ * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
  */
-int ANeuralNetworksRequest_wait(ANeuralNetworksRequest* request);
+int ANeuralNetworksExecution_wait(ANeuralNetworksExecution* execution);
 
 __END_DECLS
 
diff --git a/runtime/include/NeuralNetworksWrapper.h b/runtime/include/NeuralNetworksWrapper.h
index c9c4b49..cd6c0e4 100644
--- a/runtime/include/NeuralNetworksWrapper.h
+++ b/runtime/include/NeuralNetworksWrapper.h
@@ -276,33 +276,33 @@
     ANeuralNetworksCompilation* mCompilation = nullptr;
 };
 
-class Request {
+class Execution {
 public:
-    Request(const Compilation* compilation) {
-        int result = ANeuralNetworksRequest_create(compilation->getHandle(), &mRequest);
+    Execution(const Compilation* compilation) {
+        int result = ANeuralNetworksExecution_create(compilation->getHandle(), &mExecution);
         if (result != 0) {
             // TODO Handle the error
         }
     }
 
-    ~Request() { ANeuralNetworksRequest_free(mRequest); }
+    ~Execution() { ANeuralNetworksExecution_free(mExecution); }
 
     // Disallow copy semantics to ensure the runtime object can only be freed
     // once. Copy semantics could be enabled if some sort of reference counting
     // or deep-copy system for runtime objects is added later.
-    Request(const Request&) = delete;
-    Request& operator=(const Request&) = delete;
+    Execution(const Execution&) = delete;
+    Execution& operator=(const Execution&) = delete;
 
     // Move semantics to remove access to the runtime object from the wrapper
     // object that is being moved. This ensures the runtime object will be
     // freed only once.
-    Request(Request&& other) {
+    Execution(Execution&& other) {
         *this = std::move(other);
     }
-    Request& operator=(Request&& other) {
+    Execution& operator=(Execution&& other) {
         if (this != &other) {
-            mRequest = other.mRequest;
-            other.mRequest = nullptr;
+            mExecution = other.mExecution;
+            other.mExecution = nullptr;
         }
         return *this;
     }
@@ -310,48 +310,48 @@
     Result setInput(uint32_t index, const void* buffer, size_t length,
                     const ANeuralNetworksOperandType* type = nullptr) {
         return static_cast<Result>(
-                    ANeuralNetworksRequest_setInput(mRequest, index, type, buffer, length));
+                    ANeuralNetworksExecution_setInput(mExecution, index, type, buffer, length));
     }
 
     Result setInputFromMemory(uint32_t index, const Memory* memory, uint32_t offset,
                               uint32_t length, const ANeuralNetworksOperandType* type = nullptr) {
-        return static_cast<Result>(ANeuralNetworksRequest_setInputFromMemory(
-                    mRequest, index, type, memory->get(), offset, length));
+        return static_cast<Result>(ANeuralNetworksExecution_setInputFromMemory(
+                    mExecution, index, type, memory->get(), offset, length));
     }
 
     Result setOutput(uint32_t index, void* buffer, size_t length,
                      const ANeuralNetworksOperandType* type = nullptr) {
         return static_cast<Result>(
-                    ANeuralNetworksRequest_setOutput(mRequest, index, type, buffer, length));
+                    ANeuralNetworksExecution_setOutput(mExecution, index, type, buffer, length));
     }
 
     Result setOutputFromMemory(uint32_t index, const Memory* memory, uint32_t offset,
                                uint32_t length, const ANeuralNetworksOperandType* type = nullptr) {
-        return static_cast<Result>(ANeuralNetworksRequest_setOutputFromMemory(
-                    mRequest, index, type, memory->get(), offset, length));
+        return static_cast<Result>(ANeuralNetworksExecution_setOutputFromMemory(
+                    mExecution, index, type, memory->get(), offset, length));
     }
 
     Result startCompute() {
-        Result result = static_cast<Result>(ANeuralNetworksRequest_startCompute(mRequest));
+        Result result = static_cast<Result>(ANeuralNetworksExecution_startCompute(mExecution));
         return result;
     }
 
     Result wait() {
-        return static_cast<Result>(ANeuralNetworksRequest_wait(mRequest));
+        return static_cast<Result>(ANeuralNetworksExecution_wait(mExecution));
     }
 
     Result compute() {
-        Result result = static_cast<Result>(ANeuralNetworksRequest_startCompute(mRequest));
+        Result result = static_cast<Result>(ANeuralNetworksExecution_startCompute(mExecution));
         if (result != Result::NO_ERROR) {
             return result;
         }
         // TODO how to manage the lifetime of events when multiple waiters is not
         // clear.
-        return static_cast<Result>(ANeuralNetworksRequest_wait(mRequest));
+        return static_cast<Result>(ANeuralNetworksExecution_wait(mExecution));
     }
 
 private:
-    ANeuralNetworksRequest* mRequest = nullptr;
+    ANeuralNetworksExecution* mExecution = nullptr;
 };
 
 }  // namespace wrapper
diff --git a/runtime/test/TestGenerated.cpp b/runtime/test/TestGenerated.cpp
index cbc6470..ccbaf66 100644
--- a/runtime/test/TestGenerated.cpp
+++ b/runtime/test/TestGenerated.cpp
@@ -48,13 +48,13 @@
         for (auto& example : examples) {
             Compilation compilation(&model);
             compilation.compile();
-            Request request(&compilation);
+            Execution execution(&compilation);
 
             // Go through all inputs
             for (auto& i : example.first) {
                 std::vector<T>& input = i.second;
-                request.setInput(i.first, (const void*)input.data(),
-                                 input.size() * sizeof(T));
+                execution.setInput(i.first, (const void*)input.data(),
+                                   input.size() * sizeof(T));
             }
 
             std::map<int, std::vector<T>> test_outputs;
@@ -65,12 +65,12 @@
                 std::vector<T>& output = i.second;
                 test_outputs[i.first].resize(output.size());
                 std::vector<T>& test_output = test_outputs[i.first];
-                request.setOutput(output_no++, (void*)test_output.data(),
-                                  test_output.size() * sizeof(T));
+                execution.setOutput(output_no++, (void*)test_output.data(),
+                                    test_output.size() * sizeof(T));
             }
-            Result r = request.compute();
+            Result r = execution.compute();
             if (r != Result::NO_ERROR)
-                std::cerr << "Request was not completed normally\n";
+                std::cerr << "Execution was not completed normally\n";
             bool mismatch = false;
             for (auto& i : example.second) {
                 const std::vector<T>& test = test_outputs[i.first];
@@ -109,11 +109,11 @@
 
             Compilation compilation(&model);
             compilation.compile();
-            Request request(&compilation);
+            Execution execution(&compilation);
 
             // Go through all ty-typed inputs
-            for_all(inputs, [&request](int idx, auto p, auto s) {
-                ASSERT_EQ(Result::NO_ERROR, request.setInput(idx, p, s));
+            for_all(inputs, [&execution](int idx, auto p, auto s) {
+                ASSERT_EQ(Result::NO_ERROR, execution.setInput(idx, p, s));
             });
 
             MixedTyped test;
@@ -121,11 +121,11 @@
             resize_accordingly<float>(golden, test);
             resize_accordingly<int32_t>(golden, test);
             resize_accordingly<uint8_t>(golden, test);
-            for_all(test, [&request](int idx, void* p, auto s) {
-                ASSERT_EQ(Result::NO_ERROR, request.setOutput(idx, p, s));
+            for_all(test, [&execution](int idx, void* p, auto s) {
+                ASSERT_EQ(Result::NO_ERROR, execution.setOutput(idx, p, s));
             });
 
-            Result r = request.compute();
+            Result r = execution.compute();
             ASSERT_EQ(Result::NO_ERROR, r);
 #define USE_EXPECT_FLOAT_EQ 1
 #ifdef USE_EXPECT_FLOAT_EQ
diff --git a/runtime/test/TestMemory.cpp b/runtime/test/TestMemory.cpp
index d170f34..d3dd345 100644
--- a/runtime/test/TestMemory.cpp
+++ b/runtime/test/TestMemory.cpp
@@ -127,12 +127,12 @@
     Compilation compilation2(&model);
     ASSERT_EQ(compilation2.compile(), Result::NO_ERROR);
 
-    Request request2(&compilation2);
-    ASSERT_EQ(request2.setInputFromMemory(0, &input, offsetForMatrix1, sizeof(Matrix3x4)),
+    Execution execution2(&compilation2);
+    ASSERT_EQ(execution2.setInputFromMemory(0, &input, offsetForMatrix1, sizeof(Matrix3x4)),
               Result::NO_ERROR);
-    ASSERT_EQ(request2.setOutputFromMemory(0, &actual, offsetForActual, sizeof(Matrix3x4)),
+    ASSERT_EQ(execution2.setOutputFromMemory(0, &actual, offsetForActual, sizeof(Matrix3x4)),
               Result::NO_ERROR);
-    ASSERT_EQ(request2.compute(), Result::NO_ERROR);
+    ASSERT_EQ(execution2.compute(), Result::NO_ERROR);
     ASSERT_EQ(CompareMatrices(expected3, *reinterpret_cast<Matrix3x4*>(outputData + offsetForActual)), 0);
 }
 
@@ -177,10 +177,10 @@
     memset(&actual, 0, sizeof(actual));
     Compilation compilation2(&model);
     ASSERT_EQ(compilation2.compile(), Result::NO_ERROR);
-    Request request2(&compilation2);
-    ASSERT_EQ(request2.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
-    ASSERT_EQ(request2.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
-    ASSERT_EQ(request2.compute(), Result::NO_ERROR);
+    Execution execution2(&compilation2);
+    ASSERT_EQ(execution2.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
+    ASSERT_EQ(execution2.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
+    ASSERT_EQ(execution2.compute(), Result::NO_ERROR);
     ASSERT_EQ(CompareMatrices(expected3, actual), 0);
 
     close(fd);
diff --git a/runtime/test/TestTrivialModel.cpp b/runtime/test/TestTrivialModel.cpp
index 41167c9..5c025bc 100644
--- a/runtime/test/TestTrivialModel.cpp
+++ b/runtime/test/TestTrivialModel.cpp
@@ -118,11 +118,11 @@
     memset(&actual, 0, sizeof(actual));
     Compilation compilation(&modelAdd2);
     compilation.compile();
-    Request request(&compilation);
-    ASSERT_EQ(request.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
-    ASSERT_EQ(request.setInput(1, matrix2, sizeof(Matrix3x4)), Result::NO_ERROR);
-    ASSERT_EQ(request.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
-    ASSERT_EQ(request.compute(), Result::NO_ERROR);
+    Execution execution(&compilation);
+    ASSERT_EQ(execution.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
+    ASSERT_EQ(execution.setInput(1, matrix2, sizeof(Matrix3x4)), Result::NO_ERROR);
+    ASSERT_EQ(execution.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
+    ASSERT_EQ(execution.compute(), Result::NO_ERROR);
     ASSERT_EQ(CompareMatrices(expected2, actual), 0);
 }
 
@@ -135,22 +135,22 @@
     memset(&actual, 0, sizeof(actual));
     Compilation compilation2(&modelAdd3);
     compilation2.compile();
-    Request request2(&compilation2);
-    ASSERT_EQ(request2.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
-    ASSERT_EQ(request2.setInput(1, matrix2, sizeof(Matrix3x4)), Result::NO_ERROR);
-    ASSERT_EQ(request2.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
-    ASSERT_EQ(request2.compute(), Result::NO_ERROR);
+    Execution execution2(&compilation2);
+    ASSERT_EQ(execution2.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
+    ASSERT_EQ(execution2.setInput(1, matrix2, sizeof(Matrix3x4)), Result::NO_ERROR);
+    ASSERT_EQ(execution2.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
+    ASSERT_EQ(execution2.compute(), Result::NO_ERROR);
     ASSERT_EQ(CompareMatrices(expected3, actual), 0);
 
     // Test it a second time to make sure the model is reusable.
     memset(&actual, 0, sizeof(actual));
     Compilation compilation3(&modelAdd3);
     compilation3.compile();
-    Request request3(&compilation3);
-    ASSERT_EQ(request3.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
-    ASSERT_EQ(request3.setInput(1, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
-    ASSERT_EQ(request3.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
-    ASSERT_EQ(request3.compute(), Result::NO_ERROR);
+    Execution execution3(&compilation3);
+    ASSERT_EQ(execution3.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
+    ASSERT_EQ(execution3.setInput(1, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
+    ASSERT_EQ(execution3.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
+    ASSERT_EQ(execution3.compute(), Result::NO_ERROR);
     ASSERT_EQ(CompareMatrices(expected3b, actual), 0);
 }
 
@@ -178,11 +178,11 @@
     memset(&actual, 0, sizeof(actual));
     Compilation compilation(&modelBroadcastAdd2);
     compilation.compile();
-    Request request(&compilation);
-    ASSERT_EQ(request.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
-    ASSERT_EQ(request.setInput(1, matrix2b, sizeof(Matrix4)), Result::NO_ERROR);
-    ASSERT_EQ(request.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
-    ASSERT_EQ(request.compute(), Result::NO_ERROR);
+    Execution execution(&compilation);
+    ASSERT_EQ(execution.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
+    ASSERT_EQ(execution.setInput(1, matrix2b, sizeof(Matrix4)), Result::NO_ERROR);
+    ASSERT_EQ(execution.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
+    ASSERT_EQ(execution.compute(), Result::NO_ERROR);
     ASSERT_EQ(CompareMatrices(expected2b, actual), 0);
 }
 
@@ -210,11 +210,11 @@
     memset(&actual, 0, sizeof(actual));
     Compilation compilation(&modelBroadcastMul2);
     compilation.compile();
-    Request request(&compilation);
-    ASSERT_EQ(request.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
-    ASSERT_EQ(request.setInput(1, matrix2b, sizeof(Matrix4)), Result::NO_ERROR);
-    ASSERT_EQ(request.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
-    ASSERT_EQ(request.compute(), Result::NO_ERROR);
+    Execution execution(&compilation);
+    ASSERT_EQ(execution.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
+    ASSERT_EQ(execution.setInput(1, matrix2b, sizeof(Matrix4)), Result::NO_ERROR);
+    ASSERT_EQ(execution.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
+    ASSERT_EQ(execution.compute(), Result::NO_ERROR);
     ASSERT_EQ(CompareMatrices(expected2c, actual), 0);
 }
 
diff --git a/runtime/test/TestValidation.cpp b/runtime/test/TestValidation.cpp
index 77be491..13aa238 100644
--- a/runtime/test/TestValidation.cpp
+++ b/runtime/test/TestValidation.cpp
@@ -69,7 +69,7 @@
     ANeuralNetworksCompilation* mCompilation = nullptr;
 };
 
-class ValidationTestRequest : public ValidationTestCompilation {
+class ValidationTestExecution : public ValidationTestCompilation {
 protected:
     virtual void SetUp() {
         ValidationTestCompilation::SetUp();
@@ -77,13 +77,14 @@
         ASSERT_EQ(ANeuralNetworksCompilation_start(mCompilation), ANEURALNETWORKS_NO_ERROR);
         ASSERT_EQ(ANeuralNetworksCompilation_wait(mCompilation), ANEURALNETWORKS_NO_ERROR);
 
-        ASSERT_EQ(ANeuralNetworksRequest_create(mCompilation, &mRequest), ANEURALNETWORKS_NO_ERROR);
+        ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &mExecution),
+                  ANEURALNETWORKS_NO_ERROR);
     }
     virtual void TearDown() {
-        ANeuralNetworksRequest_free(mRequest);
+        ANeuralNetworksExecution_free(mExecution);
         ValidationTestCompilation::TearDown();
     }
-    ANeuralNetworksRequest* mRequest = nullptr;
+    ANeuralNetworksExecution* mExecution = nullptr;
 };
 
 TEST_F(ValidationTest, CreateModel) {
@@ -176,52 +177,55 @@
               ANEURALNETWORKS_BAD_DATA);
 }
 
-TEST_F(ValidationTestCompilation, CreateRequest) {
-    ANeuralNetworksRequest* request = nullptr;
-    EXPECT_EQ(ANeuralNetworksRequest_create(nullptr, &request), ANEURALNETWORKS_UNEXPECTED_NULL);
-    EXPECT_EQ(ANeuralNetworksRequest_create(mCompilation, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
-    // EXPECT_EQ(ANeuralNetworksRequest_create(mCompilation, ANeuralNetworksRequest *
-    // *request),
+TEST_F(ValidationTestCompilation, CreateExecution) {
+    ANeuralNetworksExecution* execution = nullptr;
+    EXPECT_EQ(ANeuralNetworksExecution_create(nullptr, &execution),
+              ANEURALNETWORKS_UNEXPECTED_NULL);
+    EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, nullptr),
+              ANEURALNETWORKS_UNEXPECTED_NULL);
+    // EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, ANeuralNetworksExecution *
+    // *execution),
     //          ANEURALNETWORKS_UNEXPECTED_NULL);
 }
 
 #if 0
 // TODO do more..
-TEST_F(ValidationTestRequest, SetInput) {
-    EXPECT_EQ(ANeuralNetworksRequest_setInput(ANeuralNetworksRequest * request, int32_t index,
-                                              const ANeuralNetworksOperandType* type,
-                                              const void* buffer, size_t length),
+TEST_F(ValidationTestExecution, SetInput) {
+    EXPECT_EQ(ANeuralNetworksExecution_setInput(ANeuralNetworksExecution * execution, int32_t index,
+                                                const ANeuralNetworksOperandType* type,
+                                                const void* buffer, size_t length),
               ANEURALNETWORKS_UNEXPECTED_NULL);
 }
 
-TEST_F(ValidationTestRequest, SetInputFromMemory) {
-    EXPECT_EQ(ANeuralNetworksRequest_setInputFromMemory(ANeuralNetworksRequest * request,
-                                                        int32_t index,
-                                                        const ANeuralNetworksOperandType* type,
-                                                        const ANeuralNetworksMemory* buffer,
-                                                        uint32_t offset),
+TEST_F(ValidationTestExecution, SetInputFromMemory) {
+    EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution * execution,
+                                                          int32_t index,
+                                                          const ANeuralNetworksOperandType* type,
+                                                          const ANeuralNetworksMemory* buffer,
+                                                          uint32_t offset),
               ANEURALNETWORKS_UNEXPECTED_NULL);
 }
 
-TEST_F(ValidationTestRequest, SetOutput) {
-    EXPECT_EQ(ANeuralNetworksRequest_setOutput(ANeuralNetworksRequest * request, int32_t index,
-                                               const ANeuralNetworksOperandType* type, void* buffer,
-                                               size_t length),
+TEST_F(ValidationTestExecution, SetOutput) {
+    EXPECT_EQ(ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution * execution,
+                                                 int32_t index,
+                                                 const ANeuralNetworksOperandType* type,
+                                                 void* buffer, size_t length),
               ANEURALNETWORKS_UNEXPECTED_NULL);
 }
 
-TEST_F(ValidationTestRequest, SetOutputFromMemory) {
-    EXPECT_EQ(ANeuralNetworksRequest_setOutputFromMemory(ANeuralNetworksRequest * request,
-                                                         int32_t index,
-                                                         const ANeuralNetworksOperandType* type,
-                                                         const ANeuralNetworksMemory* buffer,
-                                                         uint32_t offset),
+TEST_F(ValidationTestExecution, SetOutputFromMemory) {
+    EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution * execution,
+                                                           int32_t index,
+                                                           const ANeuralNetworksOperandType* type,
+                                                           const ANeuralNetworksMemory* buffer,
+                                                           uint32_t offset),
               ANEURALNETWORKS_UNEXPECTED_NULL);
 }
 
-TEST_F(ValidationTestRequest, StartCompute) {
-    EXPECT_EQ(ANeuralNetworksRequest_startCompute(ANeuralNetworksRequest * request,
-                                                  ANeuralNetworksEvent * *event),
+TEST_F(ValidationTestExecution, StartCompute) {
+    EXPECT_EQ(ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution * execution,
+                                                    ANeuralNetworksEvent * *event),
               ANEURALNETWORKS_UNEXPECTED_NULL);
 }