Upgrade 1.0 sample driver to 1.1

This CL updates the 1.0 sample drivers to 1.1. Because V1_1::IDevice
inherits from V1_0::IDevice, the sample driver can still be used as a
1.0 driver. When the NN runtime holds a 1.1 driver, it will always use
its updated *_1_1 function calls; to verify the NN runtime is still
compliant with 1.0 drivers, the VTS framework generates a binary called
'android.hardware.neuralnetworks@1.0-adapter' to fake a 1.0 driver using
a 1.1+ driver.

This change additionally updates RandomPartitioningTest to use the 1.1
version of SampleDriver, and removes driver/Android.bp since it is no
longer needed in the build system.

Bug: 63911257
Test: mm
Test: cts and vts test binaries

Merged-In: I1f98d1329571a4571c1b8d5f532ca04dad5a59ca
Change-Id: I1f98d1329571a4571c1b8d5f532ca04dad5a59ca
(cherry picked from commit 12b72790cce889e6a85bfc62b5e22f993130ad3c)
diff --git a/driver/Android.bp b/driver/Android.bp
deleted file mode 100644
index 53d39d3..0000000
--- a/driver/Android.bp
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-subdirs = [
-    "cache",
-    "sample",
-]
diff --git a/driver/sample/Android.bp b/driver/sample/Android.bp
index d7466a4..d193b16 100644
--- a/driver/sample/Android.bp
+++ b/driver/sample/Android.bp
@@ -52,35 +52,35 @@
 }
 
 cc_binary {
-    name: "android.hardware.neuralnetworks@1.0-service-sample-all",
+    name: "android.hardware.neuralnetworks@1.1-service-sample-all",
     defaults: ["NeuralNetworksSampleDriver_server_defaults"],
     init_rc: ["SampleDriverAll.rc"],
     srcs: ["SampleDriverAll.cpp"]
 }
 
 cc_binary {
-    name: "android.hardware.neuralnetworks@1.0-service-sample-float-fast",
+    name: "android.hardware.neuralnetworks@1.1-service-sample-float-fast",
     defaults: ["NeuralNetworksSampleDriver_server_defaults"],
     init_rc: ["SampleDriverFloatFast.rc"],
     srcs: ["SampleDriverFloatFast.cpp"]
 }
 
 cc_binary {
-    name: "android.hardware.neuralnetworks@1.0-service-sample-float-slow",
+    name: "android.hardware.neuralnetworks@1.1-service-sample-float-slow",
     defaults: ["NeuralNetworksSampleDriver_server_defaults"],
     init_rc: ["SampleDriverFloatSlow.rc"],
     srcs: ["SampleDriverFloatSlow.cpp"]
 }
 
 cc_binary {
-    name: "android.hardware.neuralnetworks@1.0-service-sample-quant",
+    name: "android.hardware.neuralnetworks@1.1-service-sample-quant",
     defaults: ["NeuralNetworksSampleDriver_server_defaults"],
     init_rc: ["SampleDriverQuant.rc"],
     srcs: ["SampleDriverQuant.cpp"]
 }
 
 cc_binary {
-    name: "android.hardware.neuralnetworks@1.0-service-sample-minimal",
+    name: "android.hardware.neuralnetworks@1.1-service-sample-minimal",
     defaults: ["NeuralNetworksSampleDriver_server_defaults"],
     init_rc: ["SampleDriverMinimal.rc"],
     srcs: ["SampleDriverMinimal.cpp"]
diff --git a/driver/sample/SampleDriver.cpp b/driver/sample/SampleDriver.cpp
index bc172ec..3aea5e3 100644
--- a/driver/sample/SampleDriver.cpp
+++ b/driver/sample/SampleDriver.cpp
@@ -30,8 +30,20 @@
 namespace nn {
 namespace sample_driver {
 
+Return<void> SampleDriver::getSupportedOperations(const V1_0::Model& model,
+                                                  getSupportedOperations_cb cb) {
+    // TODO(butlermichael): Do we validate Model as V1_0?
+    return getSupportedOperations_1_1(convertToV1_1(model), cb);
+}
+
 Return<ErrorStatus> SampleDriver::prepareModel(const V1_0::Model& model,
                                                const sp<IPreparedModelCallback>& callback) {
+    // TODO(butlermichael): Do we validate model as V1_0?
+    return prepareModel_1_1(convertToV1_1(model), callback);
+}
+
+Return<ErrorStatus> SampleDriver::prepareModel_1_1(const V1_1::Model& model,
+                                                   const sp<IPreparedModelCallback>& callback) {
     if (VLOG_IS_ON(DRIVER)) {
         VLOG(DRIVER) << "prepareModel";
         logModelToInfo(model);
diff --git a/driver/sample/SampleDriver.h b/driver/sample/SampleDriver.h
index 8052de2..b2f4399 100644
--- a/driver/sample/SampleDriver.h
+++ b/driver/sample/SampleDriver.h
@@ -32,12 +32,16 @@
 //
 // Since these drivers simulate hardware, they must run the computations
 // on the CPU.  An actual driver would not do that.
-class SampleDriver : public V1_0::IDevice {
+class SampleDriver : public IDevice {
 public:
     SampleDriver(const char* name) : mName(name) {}
     ~SampleDriver() override {}
+    Return<void> getSupportedOperations(const V1_0::Model& model,
+                                        getSupportedOperations_cb cb) override;
     Return<ErrorStatus> prepareModel(const V1_0::Model& model,
                                      const sp<IPreparedModelCallback>& callback) override;
+    Return<ErrorStatus> prepareModel_1_1(const V1_1::Model& model,
+                                         const sp<IPreparedModelCallback>& callback) override;
     Return<DeviceStatus> getStatus() override;
 
     // Starts and runs the driver service.  Typically called from main().
@@ -49,7 +53,7 @@
 
 class SamplePreparedModel : public IPreparedModel {
 public:
-    SamplePreparedModel(const V1_0::Model& model) : mModel(model) {}
+    SamplePreparedModel(const Model& model) : mModel(model) {}
     ~SamplePreparedModel() override {}
     bool initialize();
     Return<ErrorStatus> execute(const Request& request,
@@ -58,7 +62,7 @@
 private:
     void asyncExecute(const Request& request, const sp<IExecutionCallback>& callback);
 
-    V1_0::Model mModel;
+    Model mModel;
     std::vector<RunTimePoolInfo> mPoolInfos;
 };
 
diff --git a/driver/sample/SampleDriverAll.cpp b/driver/sample/SampleDriverAll.cpp
index 7ab4efd..03f4bc9 100644
--- a/driver/sample/SampleDriverAll.cpp
+++ b/driver/sample/SampleDriverAll.cpp
@@ -34,8 +34,8 @@
 public:
     SampleDriverAll() : SampleDriver("sample-all") {}
     Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override;
-    Return<void> getSupportedOperations(const V1_0::Model& model,
-                                        getSupportedOperations_cb cb) override;
+    Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
+                                            getSupportedOperations_1_1_cb cb) override;
 };
 
 Return<void> SampleDriverAll::getCapabilities(getCapabilities_cb cb) {
@@ -47,8 +47,8 @@
     return Void();
 }
 
-Return<void> SampleDriverAll::getSupportedOperations(const V1_0::Model& model,
-                                                     getSupportedOperations_cb cb) {
+Return<void> SampleDriverAll::getSupportedOperations_1_1(const V1_1::Model& model,
+                                                         getSupportedOperations_1_1_cb cb) {
     VLOG(DRIVER) << "getSupportedOperations()";
     if (validateModel(model)) {
         const size_t count = model.operations.size();
diff --git a/driver/sample/SampleDriverAll.rc b/driver/sample/SampleDriverAll.rc
index 7a9ad2a..4c29912 100644
--- a/driver/sample/SampleDriverAll.rc
+++ b/driver/sample/SampleDriverAll.rc
@@ -1,4 +1,4 @@
-service neuralnetworks_hal_service_sample_all /vendor/bin/hw/android.hardware.neuralnetworks@1.0-service-sample-all
+service neuralnetworks_hal_service_sample_all /vendor/bin/hw/android.hardware.neuralnetworks@1.1-service-sample-all
     class hal
     user system
     group system
diff --git a/driver/sample/SampleDriverFloatFast.cpp b/driver/sample/SampleDriverFloatFast.cpp
index 62619e4..0ccc694 100644
--- a/driver/sample/SampleDriverFloatFast.cpp
+++ b/driver/sample/SampleDriverFloatFast.cpp
@@ -34,8 +34,8 @@
 public:
     SampleDriverFloatFast() : SampleDriver("sample-float-fast") {}
     Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override;
-    Return<void> getSupportedOperations(const V1_0::Model& model,
-                                        getSupportedOperations_cb cb) override;
+    Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
+                                            getSupportedOperations_1_1_cb cb) override;
 };
 
 Return<void> SampleDriverFloatFast::getCapabilities(getCapabilities_cb cb) {
@@ -47,14 +47,14 @@
     return Void();
 }
 
-Return<void> SampleDriverFloatFast::getSupportedOperations(const V1_0::Model& model,
-                                                           getSupportedOperations_cb cb) {
+Return<void> SampleDriverFloatFast::getSupportedOperations_1_1(const V1_1::Model& model,
+                                                               getSupportedOperations_1_1_cb cb) {
     VLOG(DRIVER) << "getSupportedOperations()";
     if (validateModel(model)) {
         const size_t count = model.operations.size();
         std::vector<bool> supported(count);
         for (size_t i = 0; i < count; i++) {
-            const V1_0::Operation& operation = model.operations[i];
+            const Operation& operation = model.operations[i];
             if (operation.inputs.size() > 0) {
                 const Operand& firstOperand = model.operands[operation.inputs[0]];
                 supported[i] = firstOperand.type == OperandType::TENSOR_FLOAT32;
diff --git a/driver/sample/SampleDriverFloatFast.rc b/driver/sample/SampleDriverFloatFast.rc
index aec1645..7363a03 100644
--- a/driver/sample/SampleDriverFloatFast.rc
+++ b/driver/sample/SampleDriverFloatFast.rc
@@ -1,4 +1,4 @@
-service neuralnetworks_hal_service_sample_float_fast /vendor/bin/hw/android.hardware.neuralnetworks@1.0-service-sample-float-fast
+service neuralnetworks_hal_service_sample_float_fast /vendor/bin/hw/android.hardware.neuralnetworks@1.1-service-sample-float-fast
     class hal
     user system
     group system
diff --git a/driver/sample/SampleDriverFloatSlow.cpp b/driver/sample/SampleDriverFloatSlow.cpp
index 6674079..46659f2 100644
--- a/driver/sample/SampleDriverFloatSlow.cpp
+++ b/driver/sample/SampleDriverFloatSlow.cpp
@@ -34,8 +34,8 @@
 public:
     SampleDriverFloatSlow() : SampleDriver("sample-float-slow") {}
     Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override;
-    Return<void> getSupportedOperations(const V1_0::Model& model,
-                                        getSupportedOperations_cb cb) override;
+    Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
+                                            getSupportedOperations_1_1_cb cb) override;
 };
 
 Return<void> SampleDriverFloatSlow::getCapabilities(getCapabilities_cb cb) {
@@ -47,14 +47,14 @@
     return Void();
 }
 
-Return<void> SampleDriverFloatSlow::getSupportedOperations(const V1_0::Model& model,
-                                                           getSupportedOperations_cb cb) {
+Return<void> SampleDriverFloatSlow::getSupportedOperations_1_1(const V1_1::Model& model,
+                                                               getSupportedOperations_1_1_cb cb) {
     VLOG(DRIVER) << "getSupportedOperations()";
     if (validateModel(model)) {
         const size_t count = model.operations.size();
         std::vector<bool> supported(count);
         for (size_t i = 0; i < count; i++) {
-            const V1_0::Operation& operation = model.operations[i];
+            const Operation& operation = model.operations[i];
             if (operation.inputs.size() > 0) {
                 const Operand& firstOperand = model.operands[operation.inputs[0]];
                 supported[i] = firstOperand.type == OperandType::TENSOR_FLOAT32;
diff --git a/driver/sample/SampleDriverFloatSlow.rc b/driver/sample/SampleDriverFloatSlow.rc
index dcfa122..630487d 100644
--- a/driver/sample/SampleDriverFloatSlow.rc
+++ b/driver/sample/SampleDriverFloatSlow.rc
@@ -1,4 +1,4 @@
-service neuralnetworks_hal_service_sample_float_slow /vendor/bin/hw/android.hardware.neuralnetworks@1.0-service-sample-float-slow
+service neuralnetworks_hal_service_sample_float_slow /vendor/bin/hw/android.hardware.neuralnetworks@1.1-service-sample-float-slow
     class hal
     user system
     group system
diff --git a/driver/sample/SampleDriverMinimal.cpp b/driver/sample/SampleDriverMinimal.cpp
index d15427f..bf78c97 100644
--- a/driver/sample/SampleDriverMinimal.cpp
+++ b/driver/sample/SampleDriverMinimal.cpp
@@ -35,8 +35,8 @@
 public:
     SampleDriverMinimal() : SampleDriver("sample-minimal") {}
     Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override;
-    Return<void> getSupportedOperations(const V1_0::Model& model,
-                                        getSupportedOperations_cb cb) override;
+    Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
+                                            getSupportedOperations_1_1_cb cb) override;
 };
 
 Return<void> SampleDriverMinimal::getCapabilities(getCapabilities_cb cb) {
@@ -48,8 +48,8 @@
     return Void();
 }
 
-Return<void> SampleDriverMinimal::getSupportedOperations(const V1_0::Model& model,
-                                                         getSupportedOperations_cb cb) {
+Return<void> SampleDriverMinimal::getSupportedOperations_1_1(const V1_1::Model& model,
+                                                             getSupportedOperations_1_1_cb cb) {
     VLOG(DRIVER) << "getSupportedOperations()";
     if (validateModel(model)) {
         const size_t count = model.operations.size();
@@ -57,11 +57,11 @@
         // Simulate supporting just a few ops
         for (size_t i = 0; i < count; i++) {
             supported[i] = false;
-            const V1_0::Operation& operation = model.operations[i];
+            const Operation& operation = model.operations[i];
             switch (operation.type) {
-                case V1_0::OperationType::ADD:
-                case V1_0::OperationType::CONCATENATION:
-                case V1_0::OperationType::CONV_2D: {
+                case OperationType::ADD:
+                case OperationType::CONCATENATION:
+                case OperationType::CONV_2D: {
                     const Operand& firstOperand = model.operands[operation.inputs[0]];
                     if (firstOperand.type == OperandType::TENSOR_FLOAT32) {
                         supported[i] = true;
diff --git a/driver/sample/SampleDriverMinimal.rc b/driver/sample/SampleDriverMinimal.rc
index d6464b5..205e416 100644
--- a/driver/sample/SampleDriverMinimal.rc
+++ b/driver/sample/SampleDriverMinimal.rc
@@ -1,4 +1,4 @@
-service neuralnetworks_hal_service_sample_minimal /vendor/bin/hw/android.hardware.neuralnetworks@1.0-service-sample-minimal
+service neuralnetworks_hal_service_sample_minimal /vendor/bin/hw/android.hardware.neuralnetworks@1.1-service-sample-minimal
     class hal
     user system
     group system
diff --git a/driver/sample/SampleDriverQuant.cpp b/driver/sample/SampleDriverQuant.cpp
index 58fabeb..e1915bb 100644
--- a/driver/sample/SampleDriverQuant.cpp
+++ b/driver/sample/SampleDriverQuant.cpp
@@ -34,8 +34,8 @@
 public:
     SampleDriverQuant() : SampleDriver("sample-quant") {}
     Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override;
-    Return<void> getSupportedOperations(const V1_0::Model& model,
-                                        getSupportedOperations_cb cb) override;
+    Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
+                                            getSupportedOperations_1_1_cb cb) override;
 };
 
 Return<void> SampleDriverQuant::getCapabilities(getCapabilities_cb cb) {
@@ -47,14 +47,14 @@
     return Void();
 }
 
-Return<void> SampleDriverQuant::getSupportedOperations(const V1_0::Model& model,
-                                                       getSupportedOperations_cb cb) {
+Return<void> SampleDriverQuant::getSupportedOperations_1_1(const V1_1::Model& model,
+                                                           getSupportedOperations_1_1_cb cb) {
     VLOG(DRIVER) << "getSupportedOperations()";
     if (validateModel(model)) {
         const size_t count = model.operations.size();
         std::vector<bool> supported(count);
         for (size_t i = 0; i < count; i++) {
-            const V1_0::Operation& operation = model.operations[i];
+            const Operation& operation = model.operations[i];
             if (operation.inputs.size() > 0) {
                 const Operand& firstOperand = model.operands[operation.inputs[0]];
                 supported[i] = firstOperand.type == OperandType::TENSOR_QUANT8_ASYMM;
diff --git a/driver/sample/SampleDriverQuant.rc b/driver/sample/SampleDriverQuant.rc
index 6c2034a..928707f 100644
--- a/driver/sample/SampleDriverQuant.rc
+++ b/driver/sample/SampleDriverQuant.rc
@@ -1,4 +1,4 @@
-service neuralnetworks_hal_service_sample_quant /vendor/bin/hw/android.hardware.neuralnetworks@1.0-service-sample-quant
+service neuralnetworks_hal_service_sample_quant /vendor/bin/hw/android.hardware.neuralnetworks@1.1-service-sample-quant
     class hal
     user system
     group system
diff --git a/runtime/test/TestPartitioningRandom.cpp b/runtime/test/TestPartitioningRandom.cpp
index a89730b..6f160a2 100644
--- a/runtime/test/TestPartitioningRandom.cpp
+++ b/runtime/test/TestPartitioningRandom.cpp
@@ -489,10 +489,8 @@
         return Void();
     }
 
-    Return<void> getSupportedOperations(const V1_0::Model& modelV1_0,
-                                        getSupportedOperations_cb cb) override {
-        V1_1::Model model = android::nn::convertToV1_1(modelV1_0);
-
+    Return<void> getSupportedOperations_1_1(const HidlModel& model,
+                                            getSupportedOperations_cb cb) override {
         if (nn::validateModel(model)) {
             const size_t count = model.operations.size();
             std::vector<bool> supported(count);
@@ -511,11 +509,11 @@
         return Void();
     }
 
-    Return<ErrorStatus> prepareModel(const V1_0::Model& model,
-                                     const sp<IPreparedModelCallback>& callback) override {
+    Return<ErrorStatus> prepareModel_1_1(const HidlModel& model,
+                                         const sp<IPreparedModelCallback>& callback) override {
         // NOTE: We verify that all operations in the model are supported.
         ErrorStatus outStatus = ErrorStatus::INVALID_ARGUMENT;
-        auto ret = getSupportedOperations(
+        auto ret = getSupportedOperations_1_1(
             model,
             [&outStatus](ErrorStatus inStatus, const hidl_vec<bool>& supportedOperations) {
                 if (inStatus == ErrorStatus::NONE) {
@@ -526,7 +524,7 @@
                 }
             });
         if (ret.isOk() && (outStatus == ErrorStatus::NONE)) {
-            return SampleDriver::prepareModel(model, callback);
+            return SampleDriver::prepareModel_1_1(model, callback);
         } else {
             callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
             return ErrorStatus::INVALID_ARGUMENT;