Add HAL version 1.3 and add TENSOR_QUANT8_ASYMM_SIGNED OperandType
Bug: 139120468
Bug: 137828494
Test: NeuralNetworksTest_static and VtsHalNeuralnetworksV1_2TargetTest
Change-Id: Iefcaf09145753facb2dc58bbc48e1e597c658b7c
Merged-In: Iefcaf09145753facb2dc58bbc48e1e597c658b7c
(cherry picked from commit 4963dfaf1e25f10b1e6389971d88fbac6ebd4e11)
diff --git a/common/Android.bp b/common/Android.bp
index 3f4b8ee..bcbda01 100644
--- a/common/Android.bp
+++ b/common/Android.bp
@@ -85,6 +85,7 @@
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
+ "android.hardware.neuralnetworks@1.3",
"android.hidl.allocator@1.0",
"android.hidl.memory@1.0",
"libfmq",
@@ -163,6 +164,7 @@
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
+ "android.hardware.neuralnetworks@1.3",
"android.hidl.allocator@1.0",
"android.hidl.memory@1.0",
"libbase",
@@ -224,6 +226,7 @@
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
+ "android.hardware.neuralnetworks@1.3",
"libbase",
"libgmock",
"liblog",
diff --git a/common/MetaModel.cpp b/common/MetaModel.cpp
index 6c921ca..4cc3ac9 100644
--- a/common/MetaModel.cpp
+++ b/common/MetaModel.cpp
@@ -18,10 +18,6 @@
#include "MetaModel.h"
-#include "GraphDump.h"
-#include "HalInterfaces.h"
-#include "Utils.h"
-
#include <algorithm>
#include <map>
#include <set>
@@ -29,6 +25,10 @@
#include <type_traits>
#include <utility>
+#include "GraphDump.h"
+#include "HalInterfaces.h"
+#include "Utils.h"
+
namespace android::nn {
using namespace hal;
@@ -74,6 +74,10 @@
struct ModelVersion<hal::V1_2::Model> {
static constexpr char name[] = "V1_2";
};
+template <>
+struct ModelVersion<hal::V1_3::Model> {
+ static constexpr char name[] = "V1_3";
+};
// Dispatcher mechanism for calling an appropriate uncheckedConvertToV1_*
// given the desired return type.
@@ -87,6 +91,10 @@
hal::V1_1::OperationType uncheckedConvertTo<hal::V1_1::OperationType>(OperationType type) {
return uncheckedConvertToV1_1(type);
}
+template <>
+hal::V1_2::OperationType uncheckedConvertTo<hal::V1_2::OperationType>(OperationType type) {
+ return type;
+}
// Dispatcher mechanism for calling an appropriate convertToV1_* given the
// desired return type. Note that there is no V1_1::Operand type.
@@ -96,22 +104,31 @@
hal::V1_0::Operand convertTo<hal::V1_0::Operand>(Operand operand) {
return convertToV1_0(operand);
}
+template <>
+hal::V1_2::Operand convertTo<hal::V1_2::Operand>(Operand operand) {
+ return convertToV1_2(operand);
+}
// Dispatcher mechanism for calling an appropriate compliantWithV1_* given the
// desired target model type.
template <typename T_SlicedModel>
-void getNoncompliantOperations(const hal::V1_2::Model& model,
+void getNoncompliantOperations(const hal::V1_3::Model& model,
std::set<uint32_t>* noncompliantOperations);
template <>
-void getNoncompliantOperations<hal::V1_0::Model>(const hal::V1_2::Model& model,
+void getNoncompliantOperations<hal::V1_0::Model>(const hal::V1_3::Model& model,
std::set<uint32_t>* noncompliantOperations) {
compliantWithV1_0(model, noncompliantOperations);
}
template <>
-void getNoncompliantOperations<hal::V1_1::Model>(const hal::V1_2::Model& model,
+void getNoncompliantOperations<hal::V1_1::Model>(const hal::V1_3::Model& model,
std::set<uint32_t>* noncompliantOperations) {
compliantWithV1_1(model, noncompliantOperations);
}
+template <>
+void getNoncompliantOperations<hal::V1_2::Model>(const hal::V1_3::Model& model,
+ std::set<uint32_t>* noncompliantOperations) {
+ compliantWithV1_2(model, noncompliantOperations);
+}
template <class T_SlicedModel>
bool invalid(const T_SlicedModel& model, bool strictSlicing) {
@@ -165,6 +182,8 @@
Slice<hal::V1_0::Model>* slice) const;
template MetaModel::ReturnedSlice<hal::V1_1::Model> MetaModel::getSlice(
Slice<hal::V1_1::Model>* slice) const;
+template MetaModel::ReturnedSlice<hal::V1_2::Model> MetaModel::getSlice(
+ Slice<hal::V1_2::Model>* slice) const;
// Utility class for makeSlice().
//
@@ -474,7 +493,7 @@
{
std::ostringstream toName;
toName << "Slice: To " << ModelVersion<decltype(slice.mHidlModel)>::name;
- graphDump(toName.str().c_str(), convertToV1_2(slice.mHidlModel));
+ graphDump(toName.str().c_str(), convertToV1_3(slice.mHidlModel));
}
}
diff --git a/common/Utils.cpp b/common/Utils.cpp
index 0b46d5e..c1970c8 100644
--- a/common/Utils.cpp
+++ b/common/Utils.cpp
@@ -18,17 +18,24 @@
#include "Utils.h"
-#include "NeuralNetworks.h"
-#include "NeuralNetworksOEM.h"
-#include "OperationResolver.h"
-#include "ValidateHal.h"
-
#include <android-base/logging.h>
#include <android-base/properties.h>
#include <android-base/strings.h>
#include <sys/system_properties.h>
+
#include <algorithm>
+#include <limits>
+#include <set>
+#include <string>
+#include <tuple>
#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "NeuralNetworks.h"
+#include "NeuralNetworksOEM.h"
+#include "OperationResolver.h"
+#include "ValidateHal.h"
namespace android {
namespace nn {
@@ -226,7 +233,7 @@
4, // ANEURALNETWORKS_UINT32
4, // ANEURALNETWORKS_TENSOR_FLOAT32
4, // ANEURALNETWORKS_TENSOR_INT32
- 1, // ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8
+ 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
1, // ANEURALNETWORKS_BOOL
2, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM
2, // ANEURALNETWORKS_TENSOR_FLOAT16
@@ -235,6 +242,7 @@
1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL
2, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM
1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM
+ 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
};
static_assert(COUNT(kSizeOfDataType) == kNumberOfDataTypes, "kSizeOfDataType is incorrect");
@@ -245,7 +253,7 @@
true, // ANEURALNETWORKS_UINT32
false, // ANEURALNETWORKS_TENSOR_FLOAT32
false, // ANEURALNETWORKS_TENSOR_INT32
- false, // ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8
+ false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
true, // ANEURALNETWORKS_BOOL
false, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM
false, // ANEURALNETWORKS_TENSOR_FLOAT16
@@ -254,6 +262,7 @@
false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL
false, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM
false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM
+ false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
};
static_assert(COUNT(kScalarDataType) == kNumberOfDataTypes, "kScalarDataType is incorrect");
@@ -349,6 +358,30 @@
LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
}
+void logModelToInfo(const V1_2::Model& model) {
+ LOG(INFO) << "V1_2::Model start";
+ LOG(INFO) << "operands" << toString(model.operands);
+ LOG(INFO) << "operations" << toString(model.operations);
+ LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
+ LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
+ LOG(INFO) << "operandValues size" << model.operandValues.size();
+ LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
+ LOG(INFO) << "relaxComputationFloat32toFloat16" << model.relaxComputationFloat32toFloat16;
+ LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix);
+}
+
+void logModelToInfo(const V1_3::Model& model) {
+ LOG(INFO) << "V1_3::Model start";
+ LOG(INFO) << "operands" << toString(model.operands);
+ LOG(INFO) << "operations" << toString(model.operations);
+ LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
+ LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
+ LOG(INFO) << "operandValues size" << model.operandValues.size();
+ LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
+ LOG(INFO) << "relaxComputationFloat32toFloat16" << model.relaxComputationFloat32toFloat16;
+ LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix);
+}
+
bool validateOperandSymmPerChannelQuantParams(
const Operand& halOperand, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant,
const char* tag) {
@@ -380,6 +413,14 @@
return true;
}
+static bool validateQuant8AsymmSignedParams(const ANeuralNetworksOperandType& type,
+ const char* tag) {
+ NN_RET_CHECK(-128 <= type.zeroPoint && type.zeroPoint <= 127)
+ << tag << " invalid zeroPoint: " << type.zeroPoint;
+ NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
+ return true;
+}
+
static bool validateQuant8SymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " invalid zeroPoint: " << type.zeroPoint;
NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
@@ -447,6 +488,8 @@
NN_RET_CHECK(validateTensorDimensions(type, tag, allowPartial));
if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
NN_RET_CHECK(validateQuant8AsymmParams(type, tag));
+ } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
+ NN_RET_CHECK(validateQuant8AsymmSignedParams(type, tag));
} else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_SYMM) {
NN_RET_CHECK(validateQuant8SymmParams(type, tag));
} else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_ASYMM) {
@@ -1763,21 +1806,23 @@
return {n, std::move(outputShapes), timing};
}
-// V1_2::Capabilities::operandPerformance utilities.
-// The field V1_2::Capabilities::operandPerformance is a vector sorted by the
-// field V1_2::Capabilities::OperandPerformance::type.
+// Capabilities::operandPerformance utilities.
+// The field Capabilities::operandPerformance is a vector sorted by the field
+// Capabilities::OperandPerformance::type.
-hidl_vec<Capabilities::OperandPerformance> nonExtensionOperandPerformance(PerformanceInfo perf) {
- using OpPerf = Capabilities::OperandPerformance;
+template <HalVersion version>
+hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
+ PerformanceInfo perf) {
+ using OpPerf = VersionedOperandPerformance<version>;
// Note: range presents enumerators in declaration order, not in numerical order.
- static constexpr hidl_enum_range<OperandType> kOperandTypeRange;
+ static constexpr hidl_enum_range<VersionedOperandType<version>> kOperandTypeRange;
hidl_vec<OpPerf> ret(kOperandTypeRange.end() - kOperandTypeRange.begin());
std::transform(kOperandTypeRange.begin(), kOperandTypeRange.end(), ret.begin(),
- [perf](OperandType type) {
- return Capabilities::OperandPerformance{type, perf};
+ [perf](VersionedOperandType<version> type) {
+ return OpPerf{type, perf};
});
std::sort(ret.begin(), ret.end(),
[](const OpPerf& a, const OpPerf& b) { return a.type < b.type; });
@@ -1785,22 +1830,42 @@
return ret;
}
-void update(hidl_vec<Capabilities::OperandPerformance>* operandPerformance, OperandType type,
- PerformanceInfo perf) {
+template hal::hidl_vec<V1_2::Capabilities::OperandPerformance>
+nonExtensionOperandPerformance<HalVersion::V1_2>(PerformanceInfo perf);
+template hal::hidl_vec<V1_3::Capabilities::OperandPerformance>
+nonExtensionOperandPerformance<HalVersion::V1_3>(PerformanceInfo perf);
+
+template <HalVersion version>
+void update(hal::hidl_vec<VersionedOperandPerformance<version>>* operandPerformance,
+ VersionedOperandType<version> type, hal::PerformanceInfo perf) {
CHECK(operandPerformance != nullptr);
- const auto it = std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type,
- [](const Capabilities::OperandPerformance& perf,
- OperandType type) { return perf.type < type; });
+ const auto it =
+ std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type,
+ [](const VersionedOperandPerformance<version>& perf,
+ VersionedOperandType<version> type) { return perf.type < type; });
CHECK(it != operandPerformance->end())
<< toString(type) << " not in " << toString(*operandPerformance);
it->info = perf;
}
-PerformanceInfo lookup(const hidl_vec<Capabilities::OperandPerformance>& operandPerformance,
- OperandType type) {
+void update(hidl_vec<V1_2::Capabilities::OperandPerformance>* operandPerformance,
+ V1_2::OperandType type, PerformanceInfo perf) {
+ update<HalVersion::V1_2>(operandPerformance, type, perf);
+}
+void update(hidl_vec<V1_3::Capabilities::OperandPerformance>* operandPerformance,
+ V1_3::OperandType type, PerformanceInfo perf) {
+ update<HalVersion::V1_3>(operandPerformance, type, perf);
+}
+
+template <HalVersion version>
+PerformanceInfo lookup(const hidl_vec<VersionedOperandPerformance<version>>& operandPerformance,
+ VersionedOperandType<version> type) {
const auto it = std::lower_bound(operandPerformance.begin(), operandPerformance.end(), type,
- [](const Capabilities::OperandPerformance& perf,
- OperandType type) { return perf.type < type; });
+ [](const VersionedOperandPerformance<version>& perf,
+ VersionedOperandType<version> type) {
+ return static_cast<OperandType>(perf.type) <
+ static_cast<OperandType>(type);
+ });
if (it == operandPerformance.end()) {
LOG(WARNING) << "No PerformanceInfo for " << toString(type);
return {.execTime = FLT_MAX, .powerUsage = FLT_MAX};
@@ -1809,6 +1874,15 @@
}
}
+PerformanceInfo lookup(const hidl_vec<V1_2::Capabilities::OperandPerformance>& operandPerformance,
+ V1_2::OperandType type) {
+ return lookup<HalVersion::V1_2>(operandPerformance, type);
+}
+PerformanceInfo lookup(const hidl_vec<V1_3::Capabilities::OperandPerformance>& operandPerformance,
+ V1_3::OperandType type) {
+ return lookup<HalVersion::V1_3>(operandPerformance, type);
+}
+
// Versioning
// In Android P, most data types are treated as having the same performance as TENSOR_QUANT8_ASYMM.
@@ -1819,6 +1893,18 @@
static bool isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities& capabilities) {
const PerformanceInfo quantized8Performance =
+ lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM);
+ return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP),
+ std::end(kQuantized8PerformanceConsistentWithP),
+ [quantized8Performance, &capabilities](OperandType type) {
+ return quantized8Performance ==
+ lookup(capabilities.operandPerformance,
+ static_cast<V1_2::OperandType>(type));
+ });
+}
+
+static bool isQuantized8PerformanceConsistentWithP(const V1_3::Capabilities& capabilities) {
+ const PerformanceInfo quantized8Performance =
lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM);
return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP),
std::end(kQuantized8PerformanceConsistentWithP),
@@ -1831,13 +1917,12 @@
static hidl_vec<V1_2::Capabilities::OperandPerformance> makeQuantized8PerformanceConsistentWithP(
PerformanceInfo quantized8Performance) {
hidl_vec<V1_2::Capabilities::OperandPerformance> ret(
- sizeof(kQuantized8PerformanceConsistentWithP) /
- sizeof(kQuantized8PerformanceConsistentWithP[0]));
+ std::size(kQuantized8PerformanceConsistentWithP));
std::transform(
std::begin(kQuantized8PerformanceConsistentWithP),
std::end(kQuantized8PerformanceConsistentWithP), ret.begin(),
[quantized8Performance](OperandType type) -> V1_2::Capabilities::OperandPerformance {
- return {type, quantized8Performance};
+ return {static_cast<V1_2::OperandType>(type), quantized8Performance};
});
return ret;
}
@@ -1852,6 +1937,20 @@
bool compliantWithV1_0(const V1_2::Capabilities& capabilities) {
const PerformanceInfo perfTensorFloat32 =
+ lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32);
+ const PerformanceInfo perfFloat32 =
+ lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32);
+ if (perfTensorFloat32 != perfFloat32 ||
+ perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor ||
+ perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) {
+ return false;
+ }
+
+ return isQuantized8PerformanceConsistentWithP(capabilities);
+}
+
+bool compliantWithV1_0(const V1_3::Capabilities& capabilities) {
+ const PerformanceInfo perfTensorFloat32 =
lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32);
const PerformanceInfo perfFloat32 =
lookup(capabilities.operandPerformance, OperandType::FLOAT32);
@@ -1875,6 +1974,17 @@
bool compliantWithV1_1(const V1_2::Capabilities& capabilities) {
if ((capabilities.relaxedFloat32toFloat16PerformanceTensor !=
capabilities.relaxedFloat32toFloat16PerformanceScalar) ||
+ (lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32) !=
+ lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32))) {
+ return false;
+ }
+
+ return isQuantized8PerformanceConsistentWithP(capabilities);
+}
+
+bool compliantWithV1_1(const V1_3::Capabilities& capabilities) {
+ if ((capabilities.relaxedFloat32toFloat16PerformanceTensor !=
+ capabilities.relaxedFloat32toFloat16PerformanceScalar) ||
(lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32) !=
lookup(capabilities.operandPerformance, OperandType::FLOAT32))) {
return false;
@@ -1891,34 +2001,27 @@
return true;
}
-bool compliantWithV1_2(const V1_0::Model&) {
+bool compliantWithV1_2(const V1_2::Capabilities&) {
return true;
}
-bool compliantWithV1_0(const V1_1::Model& model) {
- // In addition to new enumeration values being introduced in V1_1::Model, a
- // new flag was introduced to indicate whether or not float32 data can be
- // calculated using float16 units. This 'relaxComputationFloat32toFloat16'
- // flag is not relevant in whether a V1_1::Model is compliant with a
- // V1_0::Model because all 1.0 drivers require strict calculation by default
- // in the P NN runtime. Even if fp16 calculations are allowed, they can
- // still be computed by a strict fp32 driver.
- return std::all_of(
- model.operations.begin(), model.operations.end(), [&model](const V1_1::Operation& op) {
- int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(),
- op.inputs.size() > 0 ? op.inputs.data() : nullptr,
- op.outputs.size(),
- op.outputs.size() > 0 ? op.outputs.data() : nullptr,
- convertToV1_2(model.operands), HalVersion::V1_0);
- return error == ANEURALNETWORKS_NO_ERROR;
- });
-}
-
-bool compliantWithV1_1(const V1_0::Model&) {
+bool compliantWithV1_2(const V1_3::Capabilities&) {
return true;
}
-bool compliantWithV1_1(const V1_1::Model&) {
+bool compliantWithV1_3(const V1_0::Capabilities&) {
+ return true;
+}
+
+bool compliantWithV1_3(const V1_1::Capabilities&) {
+ return true;
+}
+
+bool compliantWithV1_3(const V1_2::Capabilities&) {
+ return true;
+}
+
+bool compliantWithV1_3(const V1_3::Capabilities&) {
return true;
}
@@ -1949,6 +2052,17 @@
<< " from V1_2::Capabilities to V1_0::Capabilities";
}
return {.float32Performance =
+ lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32),
+ .quantized8Performance = lookup(capabilities.operandPerformance,
+ V1_2::OperandType::TENSOR_QUANT8_ASYMM)};
+}
+
+V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities) {
+ if (!compliantWithV1_0(capabilities)) {
+ LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
+ << " from V1_3::Capabilities to V1_0::Capabilities";
+ }
+ return {.float32Performance =
lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32),
.quantized8Performance =
lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM)};
@@ -1970,6 +2084,19 @@
<< " from V1_2::Capabilities to V1_1::Capabilities";
}
return {.float32Performance =
+ lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32),
+ .quantized8Performance =
+ lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM),
+ .relaxedFloat32toFloat16Performance =
+ capabilities.relaxedFloat32toFloat16PerformanceTensor};
+}
+
+V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities) {
+ if (!compliantWithV1_1(capabilities)) {
+ LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
+ << " from V1_3::Capabilities to V1_1::Capabilities";
+ }
+ return {.float32Performance =
lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32),
.quantized8Performance =
lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM),
@@ -1985,8 +2112,9 @@
makeQuantized8PerformanceConsistentWithP(capabilities.quantized8Performance)};
auto& opPerf = ret.operandPerformance;
opPerf.resize(opPerf.size() + 2);
- opPerf[opPerf.size() - 2] = {OperandType::TENSOR_FLOAT32, capabilities.float32Performance};
- opPerf[opPerf.size() - 1] = {OperandType::FLOAT32, capabilities.float32Performance};
+ opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32,
+ capabilities.float32Performance};
+ opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance};
using OperandPerformance = V1_2::Capabilities::OperandPerformance;
std::sort(opPerf.begin(), opPerf.end(),
[](const OperandPerformance& a, const OperandPerformance& b) {
@@ -2004,8 +2132,9 @@
capabilities.quantized8Performance)};
auto& opPerf = ret.operandPerformance;
opPerf.resize(opPerf.size() + 2);
- opPerf[opPerf.size() - 2] = {OperandType::TENSOR_FLOAT32, capabilities.float32Performance};
- opPerf[opPerf.size() - 1] = {OperandType::FLOAT32, capabilities.float32Performance};
+ opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32,
+ capabilities.float32Performance};
+ opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance};
using OperandPerformance = V1_2::Capabilities::OperandPerformance;
std::sort(opPerf.begin(), opPerf.end(),
[](const OperandPerformance& a, const OperandPerformance& b) {
@@ -2018,6 +2147,62 @@
return capabilities;
}
+V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities) {
+ V1_2::Capabilities ret = {
+ .relaxedFloat32toFloat16PerformanceScalar =
+ capabilities.relaxedFloat32toFloat16PerformanceScalar,
+ .relaxedFloat32toFloat16PerformanceTensor =
+ capabilities.relaxedFloat32toFloat16PerformanceTensor,
+ };
+ const auto& inputOpPerf = capabilities.operandPerformance;
+ hidl_vec<V1_3::Capabilities::OperandPerformance> opPerfSupported;
+ opPerfSupported.resize(inputOpPerf.size());
+ auto last =
+ std::copy_if(inputOpPerf.begin(), inputOpPerf.end(), opPerfSupported.begin(),
+ [](V1_3::Capabilities::OperandPerformance opPerf) {
+ return validOperandType(static_cast<V1_2::OperandType>(opPerf.type));
+ });
+ opPerfSupported.resize(std::distance(opPerfSupported.begin(), last));
+
+ auto& convertedOpPerf = ret.operandPerformance;
+ convertedOpPerf.resize(opPerfSupported.size());
+ std::transform(opPerfSupported.begin(), opPerfSupported.end(), convertedOpPerf.begin(),
+ [](V1_3::Capabilities::OperandPerformance opPerf) {
+ return V1_2::Capabilities::OperandPerformance{
+ static_cast<V1_2::OperandType>(opPerf.type), opPerf.info};
+ });
+ return ret;
+}
+
+V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities) {
+ return convertToV1_3(convertToV1_2(capabilities));
+}
+
+V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities) {
+ return convertToV1_3(convertToV1_2(capabilities));
+}
+
+V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities) {
+ V1_3::Capabilities ret = {
+ .relaxedFloat32toFloat16PerformanceScalar =
+ capabilities.relaxedFloat32toFloat16PerformanceScalar,
+ .relaxedFloat32toFloat16PerformanceTensor =
+ capabilities.relaxedFloat32toFloat16PerformanceTensor,
+ };
+ auto& opPerf = ret.operandPerformance;
+ opPerf.resize(capabilities.operandPerformance.size());
+ std::transform(capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(),
+ opPerf.begin(), [](V1_2::Capabilities::OperandPerformance opPerf) {
+ return V1_3::Capabilities::OperandPerformance{
+ static_cast<V1_3::OperandType>(opPerf.type), opPerf.info};
+ });
+ return ret;
+}
+
+V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities) {
+ return capabilities;
+}
+
static V1_0::Operation uncheckedConvertToV1_0(const V1_1::Operation& operation) {
return {.type = uncheckedConvertToV1_0(operation.type),
.inputs = operation.inputs,
@@ -2046,57 +2231,22 @@
return result;
}
-bool compliantWithV1_0(const V1_2::Operand& operand) {
+bool compliantWithV1_0(const V1_3::Operand& operand) {
return validOperandType(static_cast<V1_0::OperandType>(operand.type)) &&
(nonExtensionOperandTypeIsScalar(static_cast<int>(operand.type)) ||
operand.dimensions.size() != 0);
}
-V1_0::Model convertToV1_0(const V1_0::Model& model) {
- return model;
+bool compliantWithV1_2(const V1_3::Operand& operand) {
+ return validOperandType(static_cast<V1_2::OperandType>(operand.type));
}
-V1_0::Model convertToV1_0(const V1_1::Model& model) {
- if (!compliantWithV1_0(model)) {
- LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
- << " from V1_1::Model to V1_0::Model";
- }
- return {.operands = model.operands,
- .operations = uncheckedConvertToV1_0(model.operations),
- .inputIndexes = model.inputIndexes,
- .outputIndexes = model.outputIndexes,
- .operandValues = model.operandValues,
- .pools = model.pools};
+bool compliantWithV1_3(const V1_3::Operand& operand) {
+ return true;
}
-V1_1::Model convertToV1_1(const V1_0::Model& model) {
- return {.operands = model.operands,
- .operations = convertToV1_1(model.operations),
- .inputIndexes = model.inputIndexes,
- .outputIndexes = model.outputIndexes,
- .operandValues = model.operandValues,
- .pools = model.pools,
- .relaxComputationFloat32toFloat16 = false};
-}
-
-V1_1::Model convertToV1_1(const V1_1::Model& model) {
- return model;
-}
-
-void logModelToInfo(const V1_2::Model& model) {
- LOG(INFO) << "V1_2::Model start";
- LOG(INFO) << "operands" << toString(model.operands);
- LOG(INFO) << "operations" << toString(model.operations);
- LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
- LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
- LOG(INFO) << "operandValues size" << model.operandValues.size();
- LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
-}
-
-static bool compliantWith(HalVersion version, const V1_2::Model& model,
+static bool compliantWith(HalVersion version, const V1_3::Model& model,
std::set<uint32_t>* noncompliantOperations) {
- if (version >= HalVersion::V1_2) return true;
-
// A boolean vector indicating whether each pool is compliant with the target HAL version.
std::vector<bool> isPoolCompliant(model.pools.size(), false);
std::transform(model.pools.begin(), model.pools.end(), isPoolCompliant.begin(),
@@ -2105,10 +2255,28 @@
// A boolean vector indicating whether each operand is compliant with the target HAL version.
std::vector<bool> isOperandCompliant(model.operands.size(), false);
std::transform(model.operands.begin(), model.operands.end(), isOperandCompliant.begin(),
- [&isPoolCompliant](const V1_2::Operand& op) {
- // There is no V1_1::Operand -- both V1_0::Model and V1_1::Model use
- // V1_0::Operand.
- return compliantWithV1_0(op) &&
+ [&isPoolCompliant, version](const Operand& op) {
+ bool is_operand_compliant = false;
+ switch (version) {
+ case HalVersion::UNKNOWN:
+ is_operand_compliant = false;
+ break;
+ case HalVersion::V1_0:
+ is_operand_compliant = compliantWithV1_0(op);
+ break;
+ case HalVersion::V1_1:
+ // There is no V1_1::Operand -- both V1_0::Model
+ // and V1_1::Model use V1_0::Operand.
+ is_operand_compliant = compliantWithV1_0(op);
+ break;
+ case HalVersion::V1_2:
+ is_operand_compliant = compliantWithV1_2(op);
+ break;
+ case HalVersion::V1_3:
+ is_operand_compliant = compliantWithV1_3(op);
+ break;
+ }
+ return is_operand_compliant &&
!(op.lifetime == OperandLifeTime::CONSTANT_REFERENCE &&
!isPoolCompliant[op.location.poolIndex]);
});
@@ -2119,8 +2287,7 @@
[&isOperandCompliant](const uint32_t ind) { return isOperandCompliant[ind]; });
};
- auto localValidateOperation = [&model, version,
- &allOperandsCompliant](const V1_2::Operation& op) {
+ auto localValidateOperation = [&model, version, &allOperandsCompliant](const Operation& op) {
if (!allOperandsCompliant(op.inputs) || !allOperandsCompliant(op.outputs)) return false;
int error = validateOperation(
static_cast<int32_t>(op.type), op.inputs.size(),
@@ -2143,14 +2310,69 @@
}
}
+bool compliantWithV1_0(const V1_0::Model& model) {
+ return true;
+}
+
+bool compliantWithV1_0(const V1_1::Model& model) {
+ // In addition to new enumeration values being introduced in V1_1::Model, a
+ // new flag was introduced to indicate whether or not float32 data can be
+ // calculated using float16 units. This 'relaxComputationFloat32toFloat16'
+ // flag is not relevant in whether a V1_1::Model is compliant with a
+ // V1_0::Model because all 1.0 drivers require strict calculation by default
+ // in the P NN runtime. Even if fp16 calculations are allowed, they can
+ // still be computed by a strict fp32 driver.
+ return std::all_of(
+ model.operations.begin(), model.operations.end(), [&model](const V1_1::Operation& op) {
+ int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(),
+ op.inputs.size() > 0 ? op.inputs.data() : nullptr,
+ op.outputs.size(),
+ op.outputs.size() > 0 ? op.outputs.data() : nullptr,
+ convertToV1_3(model.operands), HalVersion::V1_0);
+ return error == ANEURALNETWORKS_NO_ERROR;
+ });
+}
+
bool compliantWithV1_0(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) {
+ return compliantWith(HalVersion::V1_0, convertToV1_3(model), noncompliantOperations);
+}
+
+bool compliantWithV1_0(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) {
return compliantWith(HalVersion::V1_0, model, noncompliantOperations);
}
+bool compliantWithV1_1(const V1_0::Model&) {
+ return true;
+}
+
+bool compliantWithV1_1(const V1_1::Model&) {
+ return true;
+}
+
bool compliantWithV1_1(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) {
+ return compliantWith(HalVersion::V1_1, convertToV1_3(model), noncompliantOperations);
+}
+
+bool compliantWithV1_1(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) {
return compliantWith(HalVersion::V1_1, model, noncompliantOperations);
}
+bool compliantWithV1_2(const V1_0::Model&) {
+ return true;
+}
+
+bool compliantWithV1_2(const V1_1::Model&) {
+ return true;
+}
+
+bool compliantWithV1_2(const V1_2::Model&, std::set<uint32_t>* noncompliantOperations) {
+ return true;
+}
+
+bool compliantWithV1_2(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) {
+ return compliantWith(HalVersion::V1_2, model, noncompliantOperations);
+}
+
V1_0::OperationType uncheckedConvertToV1_0(V1_2::OperationType type) {
return static_cast<V1_0::OperationType>(type);
}
@@ -2159,6 +2381,10 @@
return static_cast<V1_1::OperationType>(type);
}
+V1_2::OperationType convertToV1_2(V1_2::OperationType type) {
+ return type;
+}
+
static V1_2::OperationType convertToV1_2(V1_0::OperationType type) {
return static_cast<V1_2::OperationType>(type);
}
@@ -2173,12 +2399,24 @@
.outputs = operation.outputs};
}
+static V1_0::Operation uncheckedConvertToV1_0(const V1_3::Operation& operation) {
+ return {.type = uncheckedConvertToV1_0(operation.type),
+ .inputs = operation.inputs,
+ .outputs = operation.outputs};
+}
+
static V1_1::Operation uncheckedConvertToV1_1(const V1_2::Operation& operation) {
return {.type = uncheckedConvertToV1_1(operation.type),
.inputs = operation.inputs,
.outputs = operation.outputs};
}
+static V1_1::Operation uncheckedConvertToV1_1(const V1_3::Operation& operation) {
+ return {.type = uncheckedConvertToV1_1(operation.type),
+ .inputs = operation.inputs,
+ .outputs = operation.outputs};
+}
+
static V1_2::Operation convertToV1_2(const V1_0::Operation& operation) {
return {.type = convertToV1_2(operation.type),
.inputs = operation.inputs,
@@ -2191,6 +2429,35 @@
.outputs = operation.outputs};
}
+static V1_2::Operation uncheckedConvertToV1_2(const V1_3::Operation& operation) {
+ return {.type = operation.type, .inputs = operation.inputs, .outputs = operation.outputs};
+}
+
+static V1_3::Operation convertToV1_3(const V1_0::Operation& operation) {
+ return {.type = convertToV1_2(operation.type),
+ .inputs = operation.inputs,
+ .outputs = operation.outputs};
+}
+
+static V1_3::Operation convertToV1_3(const V1_1::Operation& operation) {
+ return {.type = convertToV1_2(operation.type),
+ .inputs = operation.inputs,
+ .outputs = operation.outputs};
+}
+
+static V1_3::Operation convertToV1_3(const V1_2::Operation& operation) {
+ return {.type = operation.type, .inputs = operation.inputs, .outputs = operation.outputs};
+}
+
+static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
+ const hidl_vec<V1_3::Operation>& operations) {
+ hidl_vec<V1_0::Operation> result(operations.size());
+ std::transform(
+ operations.begin(), operations.end(), result.begin(),
+ [](const V1_3::Operation& operation) { return uncheckedConvertToV1_0(operation); });
+ return result;
+}
+
static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
const hidl_vec<V1_2::Operation>& operations) {
hidl_vec<V1_0::Operation> result(operations.size());
@@ -2200,6 +2467,15 @@
return result;
}
+static hidl_vec<V1_2::Operation> uncheckedConvertToV1_2(
+ const hidl_vec<V1_3::Operation>& operations) {
+ hidl_vec<V1_2::Operation> result(operations.size());
+ std::transform(
+ operations.begin(), operations.end(), result.begin(),
+ [](const V1_3::Operation& operation) { return uncheckedConvertToV1_2(operation); });
+ return result;
+}
+
static hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
const hidl_vec<V1_2::Operation>& operations) {
hidl_vec<V1_1::Operation> result(operations.size());
@@ -2209,6 +2485,15 @@
return result;
}
+static hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
+ const hidl_vec<V1_3::Operation>& operations) {
+ hidl_vec<V1_1::Operation> result(operations.size());
+ std::transform(
+ operations.begin(), operations.end(), result.begin(),
+ [](const V1_3::Operation& operation) { return uncheckedConvertToV1_1(operation); });
+ return result;
+}
+
static hidl_vec<V1_2::Operation> convertToV1_2(const hidl_vec<V1_0::Operation>& operations) {
hidl_vec<V1_2::Operation> result(operations.size());
std::transform(operations.begin(), operations.end(), result.begin(),
@@ -2223,26 +2508,111 @@
return result;
}
-// We only need to convert from 1.0 and back since there wasn't any changes to
-// Operand in 1.1
-V1_2::OperandType convertToV1_2(const V1_0::OperandType& operandType) {
- return static_cast<V1_2::OperandType>(operandType);
+static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_0::Operation>& operations) {
+ hidl_vec<V1_3::Operation> result(operations.size());
+ std::transform(operations.begin(), operations.end(), result.begin(),
+ [](const V1_0::Operation& operation) { return convertToV1_3(operation); });
+ return result;
+}
+
+static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_1::Operation>& operations) {
+ hidl_vec<V1_3::Operation> result(operations.size());
+ std::transform(operations.begin(), operations.end(), result.begin(),
+ [](const V1_1::Operation& operation) { return convertToV1_3(operation); });
+ return result;
+}
+
+static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_2::Operation>& operations) {
+ hidl_vec<V1_3::Operation> result(operations.size());
+ std::transform(operations.begin(), operations.end(), result.begin(),
+ [](const V1_2::Operation& operation) { return convertToV1_3(operation); });
+ return result;
}
static bool compliantWithV1_0(const V1_2::OperandType& operandType) {
return validOperandType(static_cast<V1_0::OperandType>(operandType));
}
+static bool compliantWithV1_0(const V1_3::OperandType& operandType) {
+ return validOperandType(static_cast<V1_0::OperandType>(operandType));
+}
+
+static bool compliantWithV1_2(const V1_3::OperandType& operandType) {
+ return validOperandType(static_cast<V1_2::OperandType>(operandType));
+}
+
V1_0::OperandType convertToV1_0(const V1_2::OperandType& operandType) {
if (!compliantWithV1_0(operandType)) {
LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
- << " from V1_2::Operand to V1_0::Operand";
+ << " from V1_2::OperandType to V1_0::OperandType";
}
return static_cast<V1_0::OperandType>(operandType);
}
-// We only need to convert from 1.0 and back since there wasn't any changes to
-// Operand in 1.1
+V1_2::OperandType convertToV1_2(const V1_0::OperandType& operandType) {
+ return static_cast<V1_2::OperandType>(operandType);
+}
+
+V1_2::OperandType convertToV1_2(const V1_3::OperandType& operandType) {
+ if (!compliantWithV1_2(operandType)) {
+ LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
+ << " from V1_3::OperandType to V1_2::OperandType";
+ }
+ return static_cast<V1_2::OperandType>(operandType);
+}
+
+V1_0::OperandType convertToV1_0(const V1_3::OperandType& operandType) {
+ if (!compliantWithV1_0(operandType)) {
+ LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
+ << " from V1_3::Operand to V1_0::Operand";
+ }
+ return static_cast<V1_0::OperandType>(operandType);
+}
+
+V1_0::OperandLifeTime convertToV1_0(const V1_0::OperandLifeTime& operandLifeTime) {
+ return operandLifeTime;
+}
+
+template <typename InExtraParams, typename OutExtraParams>
+OutExtraParams copyExtraParams(const InExtraParams& extraParams) {
+ OutExtraParams out;
+ switch (extraParams.getDiscriminator()) {
+ case InExtraParams::hidl_discriminator::none: {
+ out.none(extraParams.none());
+ } break;
+ case InExtraParams::hidl_discriminator::channelQuant: {
+ out.channelQuant({
+ .scales = extraParams.channelQuant().scales,
+ .channelDim = extraParams.channelQuant().channelDim,
+ });
+ } break;
+ case InExtraParams::hidl_discriminator::extension: {
+ out.extension(extraParams.extension());
+ } break;
+ }
+ return out;
+}
+
+V1_0::Operand convertToV1_0(const V1_2::Operand& operand) {
+ return {.type = convertToV1_0(operand.type),
+ .dimensions = operand.dimensions,
+ .numberOfConsumers = operand.numberOfConsumers,
+ .scale = operand.scale,
+ .zeroPoint = operand.zeroPoint,
+ .lifetime = convertToV1_0(operand.lifetime),
+ .location = operand.location};
+}
+
+V1_0::Operand convertToV1_0(const V1_3::Operand& operand) {
+ return {.type = convertToV1_0(operand.type),
+ .dimensions = operand.dimensions,
+ .numberOfConsumers = operand.numberOfConsumers,
+ .scale = operand.scale,
+ .zeroPoint = operand.zeroPoint,
+ .lifetime = convertToV1_0(operand.lifetime),
+ .location = operand.location};
+}
+
V1_2::Operand convertToV1_2(const V1_0::Operand& operand) {
return {.type = convertToV1_2(operand.type),
.dimensions = operand.dimensions,
@@ -2253,12 +2623,20 @@
.location = operand.location};
}
-V1_2::Operand convertToV1_2(const V1_2::Operand& operand) {
- return operand;
+V1_2::Operand convertToV1_2(const V1_3::Operand& operand) {
+ return {.type = convertToV1_2(operand.type),
+ .dimensions = operand.dimensions,
+ .numberOfConsumers = operand.numberOfConsumers,
+ .scale = operand.scale,
+ .zeroPoint = operand.zeroPoint,
+ .lifetime = static_cast<V1_0::OperandLifeTime>(operand.lifetime),
+ .location = operand.location,
+ .extraParams = copyExtraParams<V1_3::Operand::ExtraParams, V1_2::Operand::ExtraParams>(
+ operand.extraParams)};
}
-V1_0::Operand convertToV1_0(const V1_2::Operand& operand) {
- return {.type = convertToV1_0(operand.type),
+V1_3::Operand convertToV1_3(const V1_0::Operand& operand) {
+ return {.type = static_cast<V1_3::OperandType>(operand.type),
.dimensions = operand.dimensions,
.numberOfConsumers = operand.numberOfConsumers,
.scale = operand.scale,
@@ -2267,8 +2645,40 @@
.location = operand.location};
}
-// We only need to convert from 1.0 and back since there wasn't any changes to
-// Operand in 1.1
+V1_3::Operand convertToV1_3(const V1_2::Operand& operand) {
+ return {.type = static_cast<V1_3::OperandType>(operand.type),
+ .dimensions = operand.dimensions,
+ .numberOfConsumers = operand.numberOfConsumers,
+ .scale = operand.scale,
+ .zeroPoint = operand.zeroPoint,
+ .lifetime = operand.lifetime,
+ .location = operand.location,
+ .extraParams = copyExtraParams<V1_2::Operand::ExtraParams, V1_3::Operand::ExtraParams>(
+ operand.extraParams)};
+}
+
+V1_3::Operand convertToV1_3(const V1_3::Operand& operand) {
+ return operand;
+}
+
+hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_0::Operand>& operands) {
+ return operands;
+}
+
+hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_2::Operand>& operands) {
+ hidl_vec<V1_0::Operand> result(operands.size());
+ std::transform(operands.begin(), operands.end(), result.begin(),
+ [](const V1_2::Operand& operand) { return convertToV1_0(operand); });
+ return result;
+}
+
+hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_3::Operand>& operands) {
+ hidl_vec<V1_0::Operand> result(operands.size());
+ std::transform(operands.begin(), operands.end(), result.begin(),
+ [](const V1_3::Operand& operand) { return convertToV1_0(operand); });
+ return result;
+}
+
hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_0::Operand>& operands) {
hidl_vec<V1_2::Operand> result(operands.size());
std::transform(operands.begin(), operands.end(), result.begin(),
@@ -2280,13 +2690,48 @@
return operands;
}
-hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_2::Operand>& operands) {
- hidl_vec<V1_0::Operand> result(operands.size());
+hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_3::Operand>& operands) {
+ hidl_vec<V1_2::Operand> result(operands.size());
std::transform(operands.begin(), operands.end(), result.begin(),
- [](const V1_2::Operand& operand) { return convertToV1_0(operand); });
+ [](const V1_3::Operand& operand) { return convertToV1_2(operand); });
return result;
}
+hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_0::Operand>& operands) {
+ hidl_vec<V1_3::Operand> result(operands.size());
+ std::transform(operands.begin(), operands.end(), result.begin(),
+ [](const V1_0::Operand& operand) { return convertToV1_3(operand); });
+ return result;
+}
+
+hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_2::Operand>& operands) {
+ hidl_vec<V1_3::Operand> result(operands.size());
+ std::transform(operands.begin(), operands.end(), result.begin(),
+ [](const V1_2::Operand& operand) { return convertToV1_3(operand); });
+ return result;
+}
+
+hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_3::Operand>& operands) {
+ return operands;
+}
+
+V1_0::Model convertToV1_0(const V1_0::Model& model) {
+ return model;
+}
+
+V1_0::Model convertToV1_0(const V1_1::Model& model) {
+ if (!compliantWithV1_0(model)) {
+ LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
+ << " from V1_1::Model to V1_0::Model";
+ }
+ return {.operands = model.operands,
+ .operations = uncheckedConvertToV1_0(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools};
+}
+
V1_0::Model convertToV1_0(const V1_2::Model& model) {
if (!compliantWithV1_0(model)) {
LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
@@ -2300,6 +2745,33 @@
.pools = model.pools};
}
+V1_0::Model convertToV1_0(const V1_3::Model& model) {
+ if (!compliantWithV1_0(model)) {
+ LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
+ << " from V1_3::Model to V1_0::Model";
+ }
+ return {.operands = convertToV1_0(model.operands),
+ .operations = uncheckedConvertToV1_0(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools};
+}
+
+V1_1::Model convertToV1_1(const V1_0::Model& model) {
+ return {.operands = model.operands,
+ .operations = convertToV1_1(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools,
+ .relaxComputationFloat32toFloat16 = false};
+}
+
+V1_1::Model convertToV1_1(const V1_1::Model& model) {
+ return model;
+}
+
V1_1::Model convertToV1_1(const V1_2::Model& model) {
if (!compliantWithV1_1(model)) {
LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
@@ -2314,6 +2786,42 @@
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
}
+V1_1::Model convertToV1_1(const V1_3::Model& model) {
+ if (!compliantWithV1_1(model)) {
+ LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
+ << " from V1_3::Model to V1_1::Model";
+ }
+ return {.operands = convertToV1_0(model.operands), // Operands in 1.1 and 1.0 are identical.
+ .operations = uncheckedConvertToV1_1(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools,
+ .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
+}
+
+static hidl_vec<V1_2::Model::ExtensionNameAndPrefix> convertToV1_2(
+ const hidl_vec<V1_3::Model::ExtensionNameAndPrefix>& extensionNameAndPrefix) {
+ hidl_vec<V1_2::Model::ExtensionNameAndPrefix> result(extensionNameAndPrefix.size());
+ std::transform(extensionNameAndPrefix.begin(), extensionNameAndPrefix.end(), result.begin(),
+ [](const V1_3::Model::ExtensionNameAndPrefix& nameAndPrefix)
+ -> V1_2::Model::ExtensionNameAndPrefix {
+ return {.name = nameAndPrefix.name, .prefix = nameAndPrefix.prefix};
+ });
+ return result;
+}
+
+static hidl_vec<V1_3::Model::ExtensionNameAndPrefix> convertToV1_3(
+ const hidl_vec<V1_2::Model::ExtensionNameAndPrefix>& extensionNameAndPrefix) {
+ hidl_vec<V1_3::Model::ExtensionNameAndPrefix> result(extensionNameAndPrefix.size());
+ std::transform(extensionNameAndPrefix.begin(), extensionNameAndPrefix.end(), result.begin(),
+ [](const V1_2::Model::ExtensionNameAndPrefix& nameAndPrefix)
+ -> V1_3::Model::ExtensionNameAndPrefix {
+ return {.name = nameAndPrefix.name, .prefix = nameAndPrefix.prefix};
+ });
+ return result;
+}
+
V1_2::Model convertToV1_2(const V1_0::Model& model) {
return {.operands = convertToV1_2(model.operands),
.operations = convertToV1_2(model.operations),
@@ -2338,6 +2846,56 @@
return model;
}
+V1_2::Model convertToV1_2(const V1_3::Model& model) {
+ if (!compliantWithV1_2(model)) {
+ LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
+ << " from V1_3::Model to V1_2::Model";
+ }
+ return {.operands = convertToV1_2(model.operands),
+ .operations = uncheckedConvertToV1_2(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools,
+ .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
+ .extensionNameToPrefix = convertToV1_2(model.extensionNameToPrefix)};
+}
+
+V1_3::Model convertToV1_3(const V1_0::Model& model) {
+ return {.operands = convertToV1_3(model.operands),
+ .operations = convertToV1_3(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools,
+ .relaxComputationFloat32toFloat16 = false};
+}
+
+V1_3::Model convertToV1_3(const V1_1::Model& model) {
+ return {.operands = convertToV1_3(model.operands),
+ .operations = convertToV1_3(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools,
+ .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
+}
+
+V1_3::Model convertToV1_3(const V1_2::Model& model) {
+ return {.operands = convertToV1_3(model.operands),
+ .operations = convertToV1_3(model.operations),
+ .inputIndexes = model.inputIndexes,
+ .outputIndexes = model.outputIndexes,
+ .operandValues = model.operandValues,
+ .pools = model.pools,
+ .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
+ .extensionNameToPrefix = convertToV1_3(model.extensionNameToPrefix)};
+}
+
+V1_3::Model convertToV1_3(const V1_3::Model& model) {
+ return model;
+}
+
#ifdef NN_DEBUGGABLE
uint32_t getProp(const char* str, uint32_t defaultValue) {
const std::string propStr = android::base::GetProperty(str, "");
diff --git a/common/ValidateHal.cpp b/common/ValidateHal.cpp
index a74b656..8ec6320 100644
--- a/common/ValidateHal.cpp
+++ b/common/ValidateHal.cpp
@@ -17,13 +17,17 @@
#define LOG_TAG "ValidateHal"
#include "ValidateHal.h"
+
+#include <android-base/logging.h>
+
+#include <algorithm>
+#include <vector>
+
#include "NeuralNetworks.h"
#include "OperationsUtils.h"
#include "Tracing.h"
#include "Utils.h"
-#include <android-base/logging.h>
-
namespace android {
namespace nn {
@@ -43,6 +47,10 @@
struct ModelToHalVersion<V1_2::Model> {
static constexpr HalVersion version = HalVersion::V1_2;
};
+template <>
+struct ModelToHalVersion<V1_3::Model> {
+ static constexpr HalVersion version = HalVersion::V1_3;
+};
class MemoryAccessVerifier {
public:
@@ -73,7 +81,7 @@
std::vector<size_t> mPoolSizes;
};
-static bool validateOperandExtraParams(const V1_2::Operand& operand, uint32_t index) {
+static bool validateOperandExtraParams(const V1_3::Operand& operand, uint32_t index) {
switch (operand.type) {
case OperandType::FLOAT32:
case OperandType::INT32:
@@ -83,19 +91,20 @@
case OperandType::TENSOR_FLOAT16:
case OperandType::TENSOR_INT32:
case OperandType::TENSOR_QUANT8_ASYMM:
+ case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
case OperandType::TENSOR_QUANT8_SYMM:
case OperandType::TENSOR_QUANT16_ASYMM:
case OperandType::TENSOR_QUANT16_SYMM:
case OperandType::TENSOR_BOOL8: {
NN_RET_CHECK(operand.extraParams.getDiscriminator() ==
- V1_2::Operand::ExtraParams::hidl_discriminator::none)
+ V1_3::Operand::ExtraParams::hidl_discriminator::none)
<< "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type)
<< " has incorrect extraParams: " << toString(operand.extraParams);
} break;
case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: {
NN_RET_CHECK(operand.extraParams.getDiscriminator() ==
- V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant)
+ V1_3::Operand::ExtraParams::hidl_discriminator::channelQuant)
<< "Operand " << index << ": Operand of type "
<< getOperandTypeName(operand.type) << " without a Channel Quantization params";
auto& channelQuant = operand.extraParams.channelQuant();
@@ -125,9 +134,9 @@
default: {
if (isExtensionOperandType(operand.type)) {
NN_RET_CHECK(operand.extraParams.getDiscriminator() ==
- V1_2::Operand::ExtraParams::hidl_discriminator::extension ||
+ V1_3::Operand::ExtraParams::hidl_discriminator::extension ||
operand.extraParams.getDiscriminator() ==
- V1_2::Operand::ExtraParams::hidl_discriminator::none)
+ V1_3::Operand::ExtraParams::hidl_discriminator::none)
<< "Operand " << index << ": Extension operand of type "
<< getOperandTypeName(operand.type)
<< " has incorrect extraParams: " << toString(operand.extraParams);
@@ -152,7 +161,7 @@
}
// Once we are sure the operand is supported by its version, it is safe
// to convert it to the latest version for the rest of the validations.
- V1_2::Operand operand = convertToV1_2(versionedOperand);
+ V1_3::Operand operand = convertToV1_3(versionedOperand);
// Validate type and dimensions.
switch (operand.type) {
case OperandType::FLOAT16:
@@ -173,6 +182,7 @@
case OperandType::TENSOR_FLOAT32:
case OperandType::TENSOR_INT32:
case OperandType::TENSOR_QUANT8_ASYMM:
+ case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
case OperandType::TENSOR_QUANT8_SYMM:
case OperandType::TENSOR_QUANT16_ASYMM:
case OperandType::TENSOR_QUANT16_SYMM:
@@ -228,6 +238,7 @@
}
break;
case OperandType::TENSOR_QUANT8_ASYMM:
+ case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
case OperandType::TENSOR_QUANT8_SYMM:
case OperandType::TENSOR_QUANT16_ASYMM:
case OperandType::TENSOR_QUANT16_SYMM:
@@ -277,6 +288,14 @@
return false;
}
break;
+ case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ if (operand.zeroPoint < -128 || operand.zeroPoint > 127) {
+ LOG(ERROR) << "Operand " << index << ": Operand of type "
+ << getOperandTypeName(operand.type) << " with an invalid zeroPoint "
+ << operand.zeroPoint << ", must be in range [-128, 127]";
+ return false;
+ }
+ break;
case OperandType::TENSOR_QUANT16_ASYMM:
if (operand.zeroPoint < 0 || operand.zeroPoint > 65535) {
LOG(ERROR) << "Operand " << index << ": Operand of type "
@@ -381,6 +400,10 @@
return HalVersion::V1_2;
}
+static HalVersion getHalVersion(const V1_3::Operation&) {
+ return HalVersion::V1_3;
+}
+
template <typename VersionedOperation>
static bool validateOperations(const hidl_vec<VersionedOperation>& operations,
const hidl_vec<Operand>& operands) {
@@ -491,7 +514,7 @@
}
// We only need versioned operands for their validation. For all the other
// validations we can use operands upcasted to the latest version.
- const hidl_vec<Operand> latestVersionOperands = convertToV1_2(model.operands);
+ const hidl_vec<Operand> latestVersionOperands = convertToV1_3(model.operands);
return (validateOperands(model.operands, model.operandValues, model.pools,
/*allowUnspecifiedRank=*/version >= HalVersion::V1_2) &&
validateOperations(model.operations, latestVersionOperands) &&
@@ -505,6 +528,7 @@
template bool validateModel<V1_0::Model>(const V1_0::Model& model);
template bool validateModel<V1_1::Model>(const V1_1::Model& model);
template bool validateModel<V1_2::Model>(const V1_2::Model& model);
+template bool validateModel<V1_3::Model>(const V1_3::Model& model);
// Validates the arguments of a request. type is either "input" or "output" and is used
// for printing error messages. The operandIndexes is the appropriate array of input
@@ -589,10 +613,10 @@
bool validateRequest(const Request& request, const T_Model& model) {
HalVersion version = ModelToHalVersion<T_Model>::version;
return (validateRequestArguments(request.inputs, model.inputIndexes,
- convertToV1_2(model.operands), request.pools,
+ convertToV1_3(model.operands), request.pools,
/*allowUnspecified=*/false, "input") &&
validateRequestArguments(request.outputs, model.outputIndexes,
- convertToV1_2(model.operands), request.pools,
+ convertToV1_3(model.operands), request.pools,
/*allowUnspecified=*/version >= HalVersion::V1_2, "output") &&
validatePools(request.pools, version));
}
@@ -600,6 +624,7 @@
template bool validateRequest<V1_0::Model>(const Request& request, const V1_0::Model& model);
template bool validateRequest<V1_1::Model>(const Request& request, const V1_1::Model& model);
template bool validateRequest<V1_2::Model>(const Request& request, const V1_2::Model& model);
+template bool validateRequest<V1_3::Model>(const Request& request, const V1_3::Model& model);
bool validateExecutionPreference(ExecutionPreference preference) {
return preference == ExecutionPreference::LOW_POWER ||
@@ -643,6 +668,31 @@
case V1_2::OperandType::TENSOR_OEM_BYTE:
return true;
default:
+ return isExtensionOperandType(static_cast<V1_3::OperandType>(operandType));
+ }
+}
+
+bool validOperandType(V1_3::OperandType operandType) {
+ switch (operandType) {
+ case V1_3::OperandType::FLOAT16:
+ case V1_3::OperandType::FLOAT32:
+ case V1_3::OperandType::INT32:
+ case V1_3::OperandType::UINT32:
+ case V1_3::OperandType::BOOL:
+ case V1_3::OperandType::TENSOR_FLOAT16:
+ case V1_3::OperandType::TENSOR_FLOAT32:
+ case V1_3::OperandType::TENSOR_INT32:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_ASYMM:
+ case V1_3::OperandType::TENSOR_QUANT16_SYMM:
+ case V1_3::OperandType::TENSOR_BOOL8:
+ case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+ case V1_3::OperandType::OEM:
+ case V1_3::OperandType::TENSOR_OEM_BYTE:
+ return true;
+ default:
return isExtensionOperandType(operandType);
}
}
diff --git a/common/include/GraphDump.h b/common/include/GraphDump.h
index bee994b..207afe5 100644
--- a/common/include/GraphDump.h
+++ b/common/include/GraphDump.h
@@ -17,7 +17,7 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_GRAPH_DUMP_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_GRAPH_DUMP_H
-#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
#include <iostream>
@@ -45,7 +45,7 @@
// A model input or output (operand) is shown in "reverse colors" --
// white text on a black background.
//
-void graphDump(const char* name, const ::android::hardware::neuralnetworks::V1_2::Model& model,
+void graphDump(const char* name, const ::android::hardware::neuralnetworks::V1_3::Model& model,
std::ostream* outStream = nullptr);
} // namespace nn
diff --git a/common/include/HalInterfaces.h b/common/include/HalInterfaces.h
index f6ca648..50297fb 100644
--- a/common/include/HalInterfaces.h
+++ b/common/include/HalInterfaces.h
@@ -29,6 +29,8 @@
#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <android/hardware/neuralnetworks/1.3/IDevice.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
@@ -50,6 +52,7 @@
namespace V1_0 = hardware::neuralnetworks::V1_0;
namespace V1_1 = hardware::neuralnetworks::V1_1;
namespace V1_2 = hardware::neuralnetworks::V1_2;
+namespace V1_3 = hardware::neuralnetworks::V1_3;
using V1_0::DataLocation;
using V1_0::DeviceStatus;
@@ -60,7 +63,6 @@
using V1_0::Request;
using V1_0::RequestArgument;
using V1_1::ExecutionPreference;
-using V1_2::Capabilities;
using V1_2::Constant;
using V1_2::DeviceType;
using V1_2::Extension;
@@ -68,21 +70,22 @@
using V1_2::FmqResultDatum;
using V1_2::IBurstCallback;
using V1_2::IBurstContext;
-using V1_2::IDevice;
using V1_2::IExecutionCallback;
using V1_2::IPreparedModel;
using V1_2::IPreparedModelCallback;
using V1_2::MeasureTiming;
-using V1_2::Model;
-using V1_2::Operand;
-using V1_2::OperandType;
-using V1_2::OperandTypeRange;
-using V1_2::Operation;
using V1_2::OperationType;
using V1_2::OperationTypeRange;
using V1_2::OutputShape;
using V1_2::SymmPerChannelQuantParams;
using V1_2::Timing;
+using V1_3::Capabilities;
+using V1_3::IDevice;
+using V1_3::Model;
+using V1_3::Operand;
+using V1_3::OperandType;
+using V1_3::OperandTypeRange;
+using V1_3::Operation;
using CacheToken =
hardware::hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
diff --git a/common/include/MetaModel.h b/common/include/MetaModel.h
index 89aee9f..c22ee1b 100644
--- a/common/include/MetaModel.h
+++ b/common/include/MetaModel.h
@@ -74,6 +74,7 @@
ReturnedSlice<hal::V1_0::Model> getSliceV1_0() const { return getSlice(&mSliceV1_0); }
ReturnedSlice<hal::V1_1::Model> getSliceV1_1() const { return getSlice(&mSliceV1_1); }
+ ReturnedSlice<hal::V1_2::Model> getSliceV1_2() const { return getSlice(&mSliceV1_2); }
// Disallowing copy constructor and assignment operator is for efficiency,
// not for correctness. The default copy constructor and assignment
@@ -115,6 +116,7 @@
};
mutable Slice<hal::V1_0::Model> mSliceV1_0;
mutable Slice<hal::V1_1::Model> mSliceV1_1;
+ mutable Slice<hal::V1_2::Model> mSliceV1_2;
template <class T_SlicedModel>
ReturnedSlice<T_SlicedModel> getSlice(Slice<T_SlicedModel>* slice) const;
diff --git a/common/include/Utils.h b/common/include/Utils.h
index 730ed58..156c1ad 100644
--- a/common/include/Utils.h
+++ b/common/include/Utils.h
@@ -17,19 +17,22 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H
+#include <android-base/logging.h>
+
+#include <set>
+#include <string>
+#include <tuple>
+#include <vector>
+
#include "HalInterfaces.h"
#include "NeuralNetworks.h"
#include "ValidateHal.h"
-#include <android-base/logging.h>
-#include <set>
-#include <vector>
-
namespace android {
namespace nn {
// The number of data types (OperandCode) defined in NeuralNetworks.h.
-const int kNumberOfDataTypes = 14;
+const int kNumberOfDataTypes = 15;
// The number of operation types (OperationCode) defined in NeuralNetworks.h.
const int kNumberOfOperationTypes = 95;
@@ -153,25 +156,51 @@
std::ostringstream mBuffer;
};
+template <HalVersion version>
+struct VersionedType {};
+
+template <>
+struct VersionedType<HalVersion::V1_2> {
+ using OperandPerformance = hal::V1_2::Capabilities::OperandPerformance;
+ using OperandType = hal::V1_2::OperandType;
+};
+
+template <>
+struct VersionedType<HalVersion::V1_3> {
+ using OperandPerformance = hal::V1_3::Capabilities::OperandPerformance;
+ using OperandType = hal::V1_3::OperandType;
+};
+
+template <HalVersion version>
+using VersionedOperandPerformance = typename VersionedType<version>::OperandPerformance;
+template <HalVersion version>
+using VersionedOperandType = typename VersionedType<version>::OperandType;
+
} // namespace
// Return a vector with one entry for each non extension OperandType, set to the
// specified PerformanceInfo value. The vector will be sorted by OperandType.
-hal::hidl_vec<hal::Capabilities::OperandPerformance> nonExtensionOperandPerformance(
+template <HalVersion version>
+hal::hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
hal::PerformanceInfo perf);
// Update the vector entry corresponding to the specified OperandType with the
// specified PerformanceInfo value. The vector must already have an entry for
// that OperandType, and must be sorted by OperandType.
-void update(hal::hidl_vec<hal::Capabilities::OperandPerformance>* operandPerformance,
- hal::OperandType type, hal::PerformanceInfo perf);
+void update(hal::hidl_vec<hal::V1_2::Capabilities::OperandPerformance>* operandPerformance,
+ hal::V1_2::OperandType type, hal::PerformanceInfo perf);
+void update(hal::hidl_vec<hal::V1_3::Capabilities::OperandPerformance>* operandPerformance,
+ hal::V1_3::OperandType type, hal::PerformanceInfo perf);
// Look for a vector entry corresponding to the specified OperandType. If
// found, return the associated PerformanceInfo. If not, return a pessimistic
// PerformanceInfo (FLT_MAX). The vector must be sorted by OperandType.
hal::PerformanceInfo lookup(
- const hal::hidl_vec<hal::Capabilities::OperandPerformance>& operandPerformance,
- hal::OperandType type);
+ const hal::hidl_vec<hal::V1_2::Capabilities::OperandPerformance>& operandPerformance,
+ hal::V1_2::OperandType type);
+hal::PerformanceInfo lookup(
+ const hal::hidl_vec<hal::V1_3::Capabilities::OperandPerformance>& operandPerformance,
+ hal::V1_3::OperandType type);
// Returns true if an operand type is an extension type.
bool isExtensionOperandType(hal::OperandType type);
@@ -233,6 +262,7 @@
void logModelToInfo(const hal::V1_0::Model& model);
void logModelToInfo(const hal::V1_1::Model& model);
void logModelToInfo(const hal::V1_2::Model& model);
+void logModelToInfo(const hal::V1_3::Model& model);
inline std::string toString(uint32_t obj) {
return std::to_string(obj);
@@ -257,6 +287,8 @@
return "HAL version 1.1";
case HalVersion::V1_2:
return "HAL version 1.2";
+ case HalVersion::V1_3:
+ return "HAL version 1.3";
}
}
@@ -310,14 +342,19 @@
bool compliantWithV1_0(const hal::V1_0::Capabilities& capabilities);
bool compliantWithV1_0(const hal::V1_1::Capabilities& capabilities);
bool compliantWithV1_0(const hal::V1_2::Capabilities& capabilities);
+bool compliantWithV1_0(const hal::V1_3::Capabilities& capabilities);
bool compliantWithV1_1(const hal::V1_0::Capabilities& capabilities);
bool compliantWithV1_1(const hal::V1_1::Capabilities& capabilities);
bool compliantWithV1_1(const hal::V1_2::Capabilities& capabilities);
+bool compliantWithV1_1(const hal::V1_3::Capabilities& capabilities);
bool compliantWithV1_2(const hal::V1_0::Capabilities& capabilities);
bool compliantWithV1_2(const hal::V1_1::Capabilities& capabilities);
bool compliantWithV1_2(const hal::V1_2::Capabilities& capabilities);
-
-bool compliantWithV1_0(const hal::V1_2::Operand& operand);
+bool compliantWithV1_2(const hal::V1_3::Capabilities& capabilities);
+bool compliantWithV1_3(const hal::V1_0::Capabilities& capabilities);
+bool compliantWithV1_3(const hal::V1_1::Capabilities& capabilities);
+bool compliantWithV1_3(const hal::V1_2::Capabilities& capabilities);
+bool compliantWithV1_3(const hal::V1_3::Capabilities& capabilities);
// If noncompliantOperations != nullptr, then
// precondition: noncompliantOperations->empty()
@@ -329,41 +366,75 @@
bool compliantWithV1_0(const hal::V1_1::Model& model);
bool compliantWithV1_0(const hal::V1_2::Model& model,
std::set<uint32_t>* noncompliantOperations = nullptr);
+bool compliantWithV1_0(const hal::V1_3::Model& model,
+ std::set<uint32_t>* noncompliantOperations = nullptr);
bool compliantWithV1_1(const hal::V1_0::Model& model);
bool compliantWithV1_1(const hal::V1_1::Model& model);
bool compliantWithV1_1(const hal::V1_2::Model& model,
std::set<uint32_t>* noncompliantOperations = nullptr);
+bool compliantWithV1_1(const hal::V1_3::Model& model,
+ std::set<uint32_t>* noncompliantOperations = nullptr);
+bool compliantWithV1_2(const hal::V1_0::Model& model);
+bool compliantWithV1_2(const hal::V1_1::Model& model);
+bool compliantWithV1_2(const hal::V1_2::Model& model,
+ std::set<uint32_t>* noncompliantOperations = nullptr);
+bool compliantWithV1_2(const hal::V1_3::Model& model,
+ std::set<uint32_t>* noncompliantOperations = nullptr);
hal::V1_0::Capabilities convertToV1_0(const hal::V1_0::Capabilities& capabilities);
hal::V1_0::Capabilities convertToV1_0(const hal::V1_1::Capabilities& capabilities);
hal::V1_0::Capabilities convertToV1_0(const hal::V1_2::Capabilities& capabilities);
+hal::V1_0::Capabilities convertToV1_0(const hal::V1_3::Capabilities& capabilities);
hal::V1_1::Capabilities convertToV1_1(const hal::V1_0::Capabilities& capabilities);
hal::V1_1::Capabilities convertToV1_1(const hal::V1_1::Capabilities& capabilities);
hal::V1_1::Capabilities convertToV1_1(const hal::V1_2::Capabilities& capabilities);
+hal::V1_1::Capabilities convertToV1_1(const hal::V1_3::Capabilities& capabilities);
hal::V1_2::Capabilities convertToV1_2(const hal::V1_0::Capabilities& capabilities);
hal::V1_2::Capabilities convertToV1_2(const hal::V1_1::Capabilities& capabilities);
hal::V1_2::Capabilities convertToV1_2(const hal::V1_2::Capabilities& capabilities);
+hal::V1_2::Capabilities convertToV1_2(const hal::V1_3::Capabilities& capabilities);
+hal::V1_3::Capabilities convertToV1_3(const hal::V1_0::Capabilities& capabilities);
+hal::V1_3::Capabilities convertToV1_3(const hal::V1_1::Capabilities& capabilities);
+hal::V1_3::Capabilities convertToV1_3(const hal::V1_2::Capabilities& capabilities);
+hal::V1_3::Capabilities convertToV1_3(const hal::V1_3::Capabilities& capabilities);
hal::V1_0::Model convertToV1_0(const hal::V1_0::Model& model);
hal::V1_0::Model convertToV1_0(const hal::V1_1::Model& model);
hal::V1_0::Model convertToV1_0(const hal::V1_2::Model& model);
+hal::V1_0::Model convertToV1_0(const hal::V1_3::Model& model);
hal::V1_1::Model convertToV1_1(const hal::V1_0::Model& model);
hal::V1_1::Model convertToV1_1(const hal::V1_1::Model& model);
hal::V1_1::Model convertToV1_1(const hal::V1_2::Model& model);
+hal::V1_1::Model convertToV1_1(const hal::V1_3::Model& model);
hal::V1_2::Model convertToV1_2(const hal::V1_0::Model& model);
hal::V1_2::Model convertToV1_2(const hal::V1_1::Model& model);
hal::V1_2::Model convertToV1_2(const hal::V1_2::Model& model);
+hal::V1_2::Model convertToV1_2(const hal::V1_3::Model& model);
+hal::V1_3::Model convertToV1_3(const hal::V1_0::Model& model);
+hal::V1_3::Model convertToV1_3(const hal::V1_1::Model& model);
+hal::V1_3::Model convertToV1_3(const hal::V1_2::Model& model);
+hal::V1_3::Model convertToV1_3(const hal::V1_3::Model& model);
hal::V1_0::OperationType uncheckedConvertToV1_0(hal::V1_2::OperationType type);
hal::V1_1::OperationType uncheckedConvertToV1_1(hal::V1_2::OperationType type);
hal::V1_0::Operand convertToV1_0(const hal::V1_2::Operand& operand);
-
+hal::V1_0::Operand convertToV1_0(const hal::V1_3::Operand& operand);
hal::V1_2::Operand convertToV1_2(const hal::V1_0::Operand& operand);
-hal::V1_2::Operand convertToV1_2(const hal::V1_2::Operand& operand);
+hal::V1_2::Operand convertToV1_2(const hal::V1_3::Operand& operand);
+hal::V1_3::Operand convertToV1_3(const hal::V1_0::Operand& operand);
+hal::V1_3::Operand convertToV1_3(const hal::V1_2::Operand& operand);
+hal::V1_3::Operand convertToV1_3(const hal::V1_3::Operand& operand);
+hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_0::Operand>& operands);
+hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_2::Operand>& operands);
+hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_3::Operand>& operands);
hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_0::Operand>& operands);
hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_2::Operand>& operands);
+hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_3::Operand>& operands);
+hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_0::Operand>& operands);
+hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_2::Operand>& operands);
+hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_3::Operand>& operands);
#ifdef NN_DEBUGGABLE
uint32_t getProp(const char* str, uint32_t defaultValue = 0);
diff --git a/common/include/ValidateHal.h b/common/include/ValidateHal.h
index 40733bb..bfe1483 100644
--- a/common/include/ValidateHal.h
+++ b/common/include/ValidateHal.h
@@ -27,7 +27,8 @@
V1_0,
V1_1,
V1_2,
- LATEST = V1_2,
+ V1_3,
+ LATEST = V1_3,
};
// Verifies that the model is valid, i.e. it is consistent, takes
@@ -55,6 +56,7 @@
bool validOperandType(hal::V1_0::OperandType operand);
bool validOperandType(hal::V1_2::OperandType operand);
+bool validOperandType(hal::V1_3::OperandType operand);
// Verfies that the memory pool is valid in the specified HAL version.
bool validatePool(const hal::hidl_memory& pool, HalVersion ver = HalVersion::LATEST);
diff --git a/common/operations/BidirectionalSequenceLSTM.h b/common/operations/BidirectionalSequenceLSTM.h
index 77859fc..fdf21d8 100644
--- a/common/operations/BidirectionalSequenceLSTM.h
+++ b/common/operations/BidirectionalSequenceLSTM.h
@@ -24,7 +24,6 @@
#include <vector>
#include "ActivationFunctor.h"
-#include "HalOperation.h"
#include "LSTM.h"
#include "OperationsUtils.h"
@@ -35,12 +34,11 @@
class BidirectionalSequenceLSTM {
public:
- BidirectionalSequenceLSTM(const hardware::neuralnetworks::V1_2::Operation& operation,
+ BidirectionalSequenceLSTM(const hal::Operation& operation,
std::vector<RunTimeOperandInfo>& operands);
- bool Prepare(const hardware::neuralnetworks::V1_2::Operation& operation,
- std::vector<RunTimeOperandInfo>& operands, Shape* fwOutputShape,
- Shape* bwOutputShape);
+ bool Prepare(const hal::Operation& operation, std::vector<RunTimeOperandInfo>& operands,
+ Shape* fwOutputShape, Shape* bwOutputShape);
bool Eval();
// Input Tensors of size {max_time, n_batch, n_input}
diff --git a/common/operations/EmbeddingLookup.h b/common/operations/EmbeddingLookup.h
index 9109ddf..203ab5f 100644
--- a/common/operations/EmbeddingLookup.h
+++ b/common/operations/EmbeddingLookup.h
@@ -17,10 +17,10 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_EMBEDDING_LOOKUP_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_EMBEDDING_LOOKUP_H
-#include "HalOperation.h"
-
#include <vector>
+#include "HalInterfaces.h"
+
namespace android {
namespace nn {
@@ -28,8 +28,7 @@
class EmbeddingLookup {
public:
- EmbeddingLookup(const hardware::neuralnetworks::V1_2::Operation& operation,
- std::vector<RunTimeOperandInfo>& operands);
+ EmbeddingLookup(const hal::Operation& operation, std::vector<RunTimeOperandInfo>& operands);
bool Eval();
diff --git a/common/operations/HalOperation.h b/common/operations/HalOperation.h
deleted file mode 100644
index d89e83a..0000000
--- a/common/operations/HalOperation.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_HAL_OPERATION_H
-#define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_HAL_OPERATION_H
-
-namespace android::hardware::neuralnetworks::V1_2 {
-
-// Individual operation implementations should not depend on the HAL interface,
-// but we have some that do. We use a forward declaration instead of an explicit
-// blueprint dependency to hide this fact.
-struct Operation;
-
-} // namespace android::hardware::neuralnetworks::V1_2
-
-#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_HAL_OPERATION_H
diff --git a/common/operations/HashtableLookup.h b/common/operations/HashtableLookup.h
index 854e7df..9e119ec 100644
--- a/common/operations/HashtableLookup.h
+++ b/common/operations/HashtableLookup.h
@@ -17,10 +17,10 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_HASHTABLE_LOOKUP_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_HASHTABLE_LOOKUP_H
-#include "HalOperation.h"
-
#include <vector>
+#include "HalInterfaces.h"
+
namespace android {
namespace nn {
@@ -28,8 +28,7 @@
class HashtableLookup {
public:
- HashtableLookup(const hardware::neuralnetworks::V1_2::Operation& operation,
- std::vector<RunTimeOperandInfo>& operands);
+ HashtableLookup(const hal::Operation& operation, std::vector<RunTimeOperandInfo>& operands);
bool Eval();
diff --git a/common/operations/LSHProjection.h b/common/operations/LSHProjection.h
index 8919425..5f18dfd 100644
--- a/common/operations/LSHProjection.h
+++ b/common/operations/LSHProjection.h
@@ -17,10 +17,10 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_LSHPROJECTION_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_LSHPROJECTION_H
-#include "HalOperation.h"
-
#include <vector>
+#include "HalInterfaces.h"
+
namespace android {
namespace nn {
@@ -36,11 +36,10 @@
class LSHProjection {
public:
- LSHProjection(const hardware::neuralnetworks::V1_2::Operation& operation,
- std::vector<RunTimeOperandInfo>& operands);
+ LSHProjection(const hal::Operation& operation, std::vector<RunTimeOperandInfo>& operands);
- static bool Prepare(const hardware::neuralnetworks::V1_2::Operation& operation,
- std::vector<RunTimeOperandInfo>& operands, Shape* outputShape);
+ static bool Prepare(const hal::Operation& operation, std::vector<RunTimeOperandInfo>& operands,
+ Shape* outputShape);
template <typename T>
bool Eval();
diff --git a/common/operations/LSTM.h b/common/operations/LSTM.h
index d0545c4..f91619c 100644
--- a/common/operations/LSTM.h
+++ b/common/operations/LSTM.h
@@ -17,12 +17,14 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_LSTM_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_LSTM_H
-#include "ActivationFunctor.h"
-#include "HalOperation.h"
-
#include <tensorflow/lite/kernels/internal/tensor_utils.h>
+
#include <algorithm>
#include <cmath>
+#include <vector>
+
+#include "ActivationFunctor.h"
+#include "HalInterfaces.h"
namespace android {
namespace nn {
@@ -45,12 +47,11 @@
class LSTMCell {
public:
- LSTMCell(const hardware::neuralnetworks::V1_2::Operation& operation,
- std::vector<RunTimeOperandInfo>& operands);
+ LSTMCell(const hal::Operation& operation, std::vector<RunTimeOperandInfo>& operands);
- bool Prepare(const hardware::neuralnetworks::V1_2::Operation& operation,
- std::vector<RunTimeOperandInfo>& operands, Shape* scratchShape,
- Shape* outputStateShape, Shape* cellStateShape, Shape* outputShape);
+ bool Prepare(const hal::Operation& operation, std::vector<RunTimeOperandInfo>& operands,
+ Shape* scratchShape, Shape* outputStateShape, Shape* cellStateShape,
+ Shape* outputShape);
bool Eval();
// Input Tensors of size {n_batch, n_input}
diff --git a/common/operations/Multinomial.h b/common/operations/Multinomial.h
index 8eb6ea3..54072e9 100644
--- a/common/operations/Multinomial.h
+++ b/common/operations/Multinomial.h
@@ -17,12 +17,13 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_MULTINOMIAL_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_MULTINOMIAL_H
-#include "HalOperation.h"
-
#include <tensorflow/lite/kernels/internal/tensor_utils.h>
#include <algorithm>
#include <cmath>
+#include <vector>
+
+#include "HalInterfaces.h"
namespace android {
namespace nn {
@@ -32,11 +33,10 @@
class Multinomial {
public:
- Multinomial(const hardware::neuralnetworks::V1_2::Operation& operation,
- std::vector<RunTimeOperandInfo>& operands);
+ Multinomial(const hal::Operation& operation, std::vector<RunTimeOperandInfo>& operands);
- static bool Prepare(const hardware::neuralnetworks::V1_2::Operation& operation,
- std::vector<RunTimeOperandInfo>& operands, Shape* outputShape);
+ static bool Prepare(const hal::Operation& operation, std::vector<RunTimeOperandInfo>& operands,
+ Shape* outputShape);
bool Eval();
static constexpr int kInputTensor = 0;
diff --git a/common/operations/QuantizedLSTM.h b/common/operations/QuantizedLSTM.h
index 007ae23..b5d2b25 100644
--- a/common/operations/QuantizedLSTM.h
+++ b/common/operations/QuantizedLSTM.h
@@ -17,11 +17,10 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_QUANTIZED_LSTM_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_QUANTIZED_LSTM_H
-#include "HalOperation.h"
-#include "OperationsUtils.h"
-
#include <vector>
+#include "OperationsUtils.h"
+
namespace android {
namespace nn {
@@ -29,12 +28,10 @@
class QuantizedLSTMCell {
public:
- QuantizedLSTMCell(const hardware::neuralnetworks::V1_2::Operation& operation,
- std::vector<RunTimeOperandInfo>& operands);
+ QuantizedLSTMCell(const hal::Operation& operation, std::vector<RunTimeOperandInfo>& operands);
- static bool prepare(const hardware::neuralnetworks::V1_2::Operation& operation,
- std::vector<RunTimeOperandInfo>& operands, Shape* cellStateShape,
- Shape* outputShape);
+ static bool prepare(const hal::Operation& operation, std::vector<RunTimeOperandInfo>& operands,
+ Shape* cellStateShape, Shape* outputShape);
bool eval();
// Inputs:
diff --git a/common/operations/RNN.h b/common/operations/RNN.h
index f4affe0..dc55932 100644
--- a/common/operations/RNN.h
+++ b/common/operations/RNN.h
@@ -17,8 +17,10 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_RNN_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_RNN_H
+#include <vector>
+
#include "ActivationFunctor.h"
-#include "HalOperation.h"
+#include "HalInterfaces.h"
namespace android {
namespace nn {
@@ -28,12 +30,10 @@
class RNN {
public:
- RNN(const hardware::neuralnetworks::V1_2::Operation& operation,
- std::vector<RunTimeOperandInfo>& operands);
+ RNN(const hal::Operation& operation, std::vector<RunTimeOperandInfo>& operands);
- static bool Prepare(const hardware::neuralnetworks::V1_2::Operation& operation,
- std::vector<RunTimeOperandInfo>& operands, Shape* hiddenStateShape,
- Shape* outputShape);
+ static bool Prepare(const hal::Operation& operation, std::vector<RunTimeOperandInfo>& operands,
+ Shape* hiddenStateShape, Shape* outputShape);
bool Eval();
static constexpr int kInputTensor = 0;
diff --git a/common/operations/SVDF.h b/common/operations/SVDF.h
index 2073f48..a8a9d56 100644
--- a/common/operations/SVDF.h
+++ b/common/operations/SVDF.h
@@ -17,11 +17,13 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_SVDF_H
#define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_SVDF_H
-#include "HalOperation.h"
-
#include <tensorflow/lite/kernels/internal/tensor_utils.h>
+
#include <algorithm>
#include <cmath>
+#include <vector>
+
+#include "HalInterfaces.h"
namespace android {
namespace nn {
@@ -36,12 +38,10 @@
class SVDF {
public:
- SVDF(const hardware::neuralnetworks::V1_2::Operation& operation,
- std::vector<RunTimeOperandInfo>& operands);
+ SVDF(const hal::Operation& operation, std::vector<RunTimeOperandInfo>& operands);
- static bool Prepare(const hardware::neuralnetworks::V1_2::Operation& operation,
- std::vector<RunTimeOperandInfo>& operands, Shape* stateShape,
- Shape* outputShape);
+ static bool Prepare(const hal::Operation& operation, std::vector<RunTimeOperandInfo>& operands,
+ Shape* stateShape, Shape* outputShape);
bool Eval();
static constexpr int kInputTensor = 0;
diff --git a/driver/sample/Android.bp b/driver/sample/Android.bp
index b94e72b..4c95f36 100644
--- a/driver/sample/Android.bp
+++ b/driver/sample/Android.bp
@@ -30,6 +30,7 @@
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
+ "android.hardware.neuralnetworks@1.3",
"android.hidl.allocator@1.0",
"android.hidl.memory@1.0",
"libbase",
@@ -58,43 +59,43 @@
}
cc_binary {
- name: "android.hardware.neuralnetworks@1.2-service-sample-all",
+ name: "android.hardware.neuralnetworks@1.3-service-sample-all",
srcs: ["SampleDriverAll.cpp"],
defaults: ["NeuralNetworksSampleDriver_server_defaults"],
- init_rc: ["config/android.hardware.neuralnetworks@1.2-service-sample-all.rc"],
- vintf_fragments: ["config/android.hardware.neuralnetworks@1.2-service-sample-all.xml"],
+ init_rc: ["config/android.hardware.neuralnetworks@1.3-service-sample-all.rc"],
+ vintf_fragments: ["config/android.hardware.neuralnetworks@1.3-service-sample-all.xml"],
}
cc_binary {
- name: "android.hardware.neuralnetworks@1.2-service-sample-float-fast",
+ name: "android.hardware.neuralnetworks@1.3-service-sample-float-fast",
srcs: ["SampleDriverFloatFast.cpp"],
defaults: ["NeuralNetworksSampleDriver_server_defaults"],
- init_rc: ["config/android.hardware.neuralnetworks@1.2-service-sample-float-fast.rc"],
- vintf_fragments: ["config/android.hardware.neuralnetworks@1.2-service-sample-float-fast.xml"],
+ init_rc: ["config/android.hardware.neuralnetworks@1.3-service-sample-float-fast.rc"],
+ vintf_fragments: ["config/android.hardware.neuralnetworks@1.3-service-sample-float-fast.xml"],
}
cc_binary {
- name: "android.hardware.neuralnetworks@1.2-service-sample-float-slow",
+ name: "android.hardware.neuralnetworks@1.3-service-sample-float-slow",
srcs: ["SampleDriverFloatSlow.cpp"],
defaults: ["NeuralNetworksSampleDriver_server_defaults"],
- init_rc: ["config/android.hardware.neuralnetworks@1.2-service-sample-float-slow.rc"],
- vintf_fragments: ["config/android.hardware.neuralnetworks@1.2-service-sample-float-slow.xml"],
+ init_rc: ["config/android.hardware.neuralnetworks@1.3-service-sample-float-slow.rc"],
+ vintf_fragments: ["config/android.hardware.neuralnetworks@1.3-service-sample-float-slow.xml"],
}
cc_binary {
- name: "android.hardware.neuralnetworks@1.2-service-sample-quant",
+ name: "android.hardware.neuralnetworks@1.3-service-sample-quant",
srcs: ["SampleDriverQuant.cpp"],
defaults: ["NeuralNetworksSampleDriver_server_defaults"],
- init_rc: ["config/android.hardware.neuralnetworks@1.2-service-sample-quant.rc"],
- vintf_fragments: ["config/android.hardware.neuralnetworks@1.2-service-sample-quant.xml"],
+ init_rc: ["config/android.hardware.neuralnetworks@1.3-service-sample-quant.rc"],
+ vintf_fragments: ["config/android.hardware.neuralnetworks@1.3-service-sample-quant.xml"],
}
cc_binary {
- name: "android.hardware.neuralnetworks@1.2-service-sample-minimal",
+ name: "android.hardware.neuralnetworks@1.3-service-sample-minimal",
srcs: ["SampleDriverMinimal.cpp"],
defaults: ["NeuralNetworksSampleDriver_server_defaults"],
- init_rc: ["config/android.hardware.neuralnetworks@1.2-service-sample-minimal.rc"],
- vintf_fragments: ["config/android.hardware.neuralnetworks@1.2-service-sample-minimal.xml"],
+ init_rc: ["config/android.hardware.neuralnetworks@1.3-service-sample-minimal.rc"],
+ vintf_fragments: ["config/android.hardware.neuralnetworks@1.3-service-sample-minimal.xml"],
}
cc_library_static {
diff --git a/driver/sample/SampleDriver.cpp b/driver/sample/SampleDriver.cpp
index 701eab6..0448c2d 100644
--- a/driver/sample/SampleDriver.cpp
+++ b/driver/sample/SampleDriver.cpp
@@ -18,18 +18,25 @@
#include "SampleDriver.h"
+#include <android-base/logging.h>
+#include <hidl/LegacySupport.h>
+
+#include <algorithm>
+#include <chrono>
+#include <map>
+#include <memory>
+#include <optional>
+#include <thread>
+#include <tuple>
+#include <utility>
+#include <vector>
+
#include "CpuExecutor.h"
#include "ExecutionBurstServer.h"
#include "HalInterfaces.h"
#include "Tracing.h"
#include "ValidateHal.h"
-#include <android-base/logging.h>
-#include <hidl/LegacySupport.h>
-#include <chrono>
-#include <optional>
-#include <thread>
-
namespace android {
namespace nn {
namespace sample_driver {
@@ -55,7 +62,7 @@
Return<void> SampleDriver::getCapabilities(getCapabilities_cb cb) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
"SampleDriver::getCapabilities");
- return getCapabilities_1_2([&](ErrorStatus error, const V1_2::Capabilities& capabilities) {
+ return getCapabilities_1_3([&](ErrorStatus error, const V1_3::Capabilities& capabilities) {
// TODO(dgross): Do we need to check compliantWithV1_0(capabilities)?
cb(error, convertToV1_0(capabilities));
});
@@ -64,12 +71,21 @@
Return<void> SampleDriver::getCapabilities_1_1(getCapabilities_1_1_cb cb) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
"SampleDriver::getCapabilities_1_1");
- return getCapabilities_1_2([&](ErrorStatus error, const V1_2::Capabilities& capabilities) {
+ return getCapabilities_1_3([&](ErrorStatus error, const V1_3::Capabilities& capabilities) {
// TODO(dgross): Do we need to check compliantWithV1_1(capabilities)?
cb(error, convertToV1_1(capabilities));
});
}
+Return<void> SampleDriver::getCapabilities_1_2(getCapabilities_1_2_cb cb) {
+ NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
+ "SampleDriver::getCapabilities_1_2");
+ return getCapabilities_1_3([&](ErrorStatus error, const V1_3::Capabilities& capabilities) {
+ // TODO(dgross): Do we need to check compliantWithV1_2(capabilities)?
+ cb(error, convertToV1_2(capabilities));
+ });
+}
+
Return<void> SampleDriver::getVersionString(getVersionString_cb cb) {
NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION,
"SampleDriver::getVersionString");
@@ -96,11 +112,10 @@
"SampleDriver::getSupportedOperations");
if (!validateModel(model)) {
VLOG(DRIVER) << "getSupportedOperations";
- std::vector<bool> supported;
- cb(ErrorStatus::INVALID_ARGUMENT, supported);
+ cb(ErrorStatus::INVALID_ARGUMENT, {});
return Void();
}
- return getSupportedOperations_1_2(convertToV1_2(model), cb);
+ return getSupportedOperations_1_3(convertToV1_3(model), cb);
}
Return<void> SampleDriver::getSupportedOperations_1_1(const V1_1::Model& model,
@@ -109,11 +124,22 @@
"SampleDriver::getSupportedOperations_1_1");
if (!validateModel(model)) {
VLOG(DRIVER) << "getSupportedOperations_1_1";
- std::vector<bool> supported;
- cb(ErrorStatus::INVALID_ARGUMENT, supported);
+ cb(ErrorStatus::INVALID_ARGUMENT, {});
return Void();
}
- return getSupportedOperations_1_2(convertToV1_2(model), cb);
+ return getSupportedOperations_1_3(convertToV1_3(model), cb);
+}
+
+Return<void> SampleDriver::getSupportedOperations_1_2(const V1_2::Model& model,
+ getSupportedOperations_1_2_cb cb) {
+ NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION,
+ "SampleDriver::getSupportedOperations_1_2");
+ if (!validateModel(model)) {
+ VLOG(DRIVER) << "getSupportedOperations_1_2";
+ cb(ErrorStatus::INVALID_ARGUMENT, {});
+ return Void();
+ }
+ return getSupportedOperations_1_3(convertToV1_3(model), cb);
}
Return<void> SampleDriver::getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) {
@@ -161,7 +187,7 @@
// asynchronously prepare the model from a new, detached thread
std::thread([model, driver, callback] {
sp<SamplePreparedModel> preparedModel =
- new SamplePreparedModel(convertToV1_2(model), driver);
+ new SamplePreparedModel(convertToV1_3(model), driver);
if (!preparedModel->initialize()) {
notify(callback, ErrorStatus::INVALID_ARGUMENT, nullptr);
return;
@@ -193,6 +219,14 @@
return prepareModelBase(model, this, preference, callback);
}
+Return<ErrorStatus> SampleDriver::prepareModel_1_3(
+ const V1_3::Model& model, ExecutionPreference preference, const hidl_vec<hidl_handle>&,
+ const hidl_vec<hidl_handle>&, const CacheToken&,
+ const sp<V1_2::IPreparedModelCallback>& callback) {
+ NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel_1_3");
+ return prepareModelBase(model, this, preference, callback);
+}
+
Return<ErrorStatus> SampleDriver::prepareModelFromCache(
const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const CacheToken&,
const sp<V1_2::IPreparedModelCallback>& callback) {
diff --git a/driver/sample/SampleDriver.h b/driver/sample/SampleDriver.h
index eb7b468..a85dcd5 100644
--- a/driver/sample/SampleDriver.h
+++ b/driver/sample/SampleDriver.h
@@ -17,12 +17,13 @@
#ifndef ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_SAMPLE_DRIVER_H
#define ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_SAMPLE_DRIVER_H
+#include <string>
+#include <vector>
+
#include "CpuExecutor.h"
#include "HalInterfaces.h"
#include "NeuralNetworks.h"
-#include <string>
-
namespace android {
namespace nn {
namespace sample_driver {
@@ -44,6 +45,7 @@
~SampleDriver() override {}
hal::Return<void> getCapabilities(getCapabilities_cb cb) override;
hal::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb cb) override;
+ hal::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb) override;
hal::Return<void> getVersionString(getVersionString_cb cb) override;
hal::Return<void> getType(getType_cb cb) override;
hal::Return<void> getSupportedExtensions(getSupportedExtensions_cb) override;
@@ -51,6 +53,8 @@
getSupportedOperations_cb cb) override;
hal::Return<void> getSupportedOperations_1_1(const hal::V1_1::Model& model,
getSupportedOperations_1_1_cb cb) override;
+ hal::Return<void> getSupportedOperations_1_2(const hal::V1_2::Model& model,
+ getSupportedOperations_1_2_cb cb) override;
hal::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override;
hal::Return<hal::ErrorStatus> prepareModel(
const hal::V1_0::Model& model,
@@ -63,6 +67,11 @@
const hal::hidl_vec<hal::hidl_handle>& modelCache,
const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token,
const sp<hal::V1_2::IPreparedModelCallback>& callback) override;
+ hal::Return<hal::ErrorStatus> prepareModel_1_3(
+ const hal::V1_3::Model& model, hal::ExecutionPreference preference,
+ const hal::hidl_vec<hal::hidl_handle>& modelCache,
+ const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token,
+ const sp<hal::V1_2::IPreparedModelCallback>& callback) override;
hal::Return<hal::ErrorStatus> prepareModelFromCache(
const hal::hidl_vec<hal::hidl_handle>& modelCache,
const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token,
diff --git a/driver/sample/SampleDriverFloatFast.cpp b/driver/sample/SampleDriverFloatFast.cpp
index 3611bba..701eb3b 100644
--- a/driver/sample/SampleDriverFloatFast.cpp
+++ b/driver/sample/SampleDriverFloatFast.cpp
@@ -16,15 +16,16 @@
#define LOG_TAG "SampleDriverFloatFast"
-#include "SampleDriver.h"
-
-#include "HalInterfaces.h"
-#include "Utils.h"
-#include "ValidateHal.h"
-
#include <android-base/logging.h>
#include <hidl/LegacySupport.h>
+
#include <thread>
+#include <vector>
+
+#include "HalInterfaces.h"
+#include "SampleDriver.h"
+#include "Utils.h"
+#include "ValidateHal.h"
namespace android {
namespace nn {
@@ -35,19 +36,19 @@
class SampleDriverFloatFast : public SampleDriver {
public:
SampleDriverFloatFast() : SampleDriver("sample-float-fast") {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb) override;
- Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) override;
+ Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
+ Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override;
};
-Return<void> SampleDriverFloatFast::getCapabilities_1_2(getCapabilities_1_2_cb cb) {
+Return<void> SampleDriverFloatFast::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
android::nn::initVLogMask();
VLOG(DRIVER) << "getCapabilities()";
Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.7f, .powerUsage = 1.1f},
.relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.7f, .powerUsage = 1.1f},
- .operandPerformance = nonExtensionOperandPerformance({1.0f, 1.0f})};
+ .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>({1.0f, 1.0f})};
update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT32,
{.execTime = 0.8f, .powerUsage = 1.2f});
update(&capabilities.operandPerformance, OperandType::FLOAT32,
@@ -57,8 +58,8 @@
return Void();
}
-Return<void> SampleDriverFloatFast::getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) {
+Return<void> SampleDriverFloatFast::getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) {
VLOG(DRIVER) << "getSupportedOperations()";
if (validateModel(model)) {
const size_t count = model.operations.size();
diff --git a/driver/sample/SampleDriverFloatSlow.cpp b/driver/sample/SampleDriverFloatSlow.cpp
index af49837..1966fd1 100644
--- a/driver/sample/SampleDriverFloatSlow.cpp
+++ b/driver/sample/SampleDriverFloatSlow.cpp
@@ -16,15 +16,16 @@
#define LOG_TAG "SampleDriverFloatSlow"
-#include "SampleDriver.h"
-
-#include "HalInterfaces.h"
-#include "Utils.h"
-#include "ValidateHal.h"
-
#include <android-base/logging.h>
#include <hidl/LegacySupport.h>
+
#include <thread>
+#include <vector>
+
+#include "HalInterfaces.h"
+#include "SampleDriver.h"
+#include "Utils.h"
+#include "ValidateHal.h"
namespace android {
namespace nn {
@@ -35,19 +36,19 @@
class SampleDriverFloatSlow : public SampleDriver {
public:
SampleDriverFloatSlow() : SampleDriver("sample-float-slow") {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb) override;
- Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) override;
+ Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
+ Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override;
};
-Return<void> SampleDriverFloatSlow::getCapabilities_1_2(getCapabilities_1_2_cb cb) {
+Return<void> SampleDriverFloatSlow::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
android::nn::initVLogMask();
VLOG(DRIVER) << "getCapabilities()";
Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = {.execTime = 1.2f, .powerUsage = 0.6f},
.relaxedFloat32toFloat16PerformanceTensor = {.execTime = 1.2f, .powerUsage = 0.6f},
- .operandPerformance = nonExtensionOperandPerformance({1.0f, 1.0f})};
+ .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>({1.0f, 1.0f})};
update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT32,
{.execTime = 1.3f, .powerUsage = 0.7f});
update(&capabilities.operandPerformance, OperandType::FLOAT32,
@@ -57,8 +58,8 @@
return Void();
}
-Return<void> SampleDriverFloatSlow::getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) {
+Return<void> SampleDriverFloatSlow::getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) {
VLOG(DRIVER) << "getSupportedOperations()";
if (validateModel(model)) {
const size_t count = model.operations.size();
diff --git a/driver/sample/SampleDriverFull.cpp b/driver/sample/SampleDriverFull.cpp
index eb7fa8d..acbf9f3 100644
--- a/driver/sample/SampleDriverFull.cpp
+++ b/driver/sample/SampleDriverFull.cpp
@@ -18,6 +18,8 @@
#include "SampleDriverFull.h"
+#include <vector>
+
#include "Utils.h"
#include "ValidateHal.h"
@@ -27,19 +29,20 @@
using namespace hal;
-Return<void> SampleDriverFull::getCapabilities_1_2(getCapabilities_1_2_cb cb) {
+Return<void> SampleDriverFull::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
android::nn::initVLogMask();
- VLOG(DRIVER) << "getCapabilities_1_2()";
- Capabilities capabilities = {.relaxedFloat32toFloat16PerformanceScalar = mPerf,
- .relaxedFloat32toFloat16PerformanceTensor = mPerf,
- .operandPerformance = nonExtensionOperandPerformance(mPerf)};
+ VLOG(DRIVER) << "getCapabilities_1_3()";
+ Capabilities capabilities = {
+ .relaxedFloat32toFloat16PerformanceScalar = mPerf,
+ .relaxedFloat32toFloat16PerformanceTensor = mPerf,
+ .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(mPerf)};
cb(ErrorStatus::NONE, capabilities);
return Void();
}
-Return<void> SampleDriverFull::getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) {
- VLOG(DRIVER) << "getSupportedOperations_1_2()";
+Return<void> SampleDriverFull::getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) {
+ VLOG(DRIVER) << "getSupportedOperations_1_3()";
if (validateModel(model)) {
const size_t count = model.operations.size();
std::vector<bool> supported(count, true);
diff --git a/driver/sample/SampleDriverFull.h b/driver/sample/SampleDriverFull.h
index 66f4aa7..155463a 100644
--- a/driver/sample/SampleDriverFull.h
+++ b/driver/sample/SampleDriverFull.h
@@ -28,9 +28,9 @@
public:
SampleDriverFull(const char* name, hal::PerformanceInfo perf)
: SampleDriver(name), mPerf(perf) {}
- hal::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb) override;
- hal::Return<void> getSupportedOperations_1_2(const hal::V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) override;
+ hal::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
+ hal::Return<void> getSupportedOperations_1_3(const hal::V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override;
private:
hal::PerformanceInfo mPerf;
diff --git a/driver/sample/SampleDriverMinimal.cpp b/driver/sample/SampleDriverMinimal.cpp
index e042034..a6cddd7 100644
--- a/driver/sample/SampleDriverMinimal.cpp
+++ b/driver/sample/SampleDriverMinimal.cpp
@@ -16,17 +16,18 @@
#define LOG_TAG "SampleDriverMinimal"
-#include "SampleDriver.h"
+#include <android-base/logging.h>
+#include <hidl/LegacySupport.h>
+
+#include <thread>
+#include <vector>
#include "HalInterfaces.h"
#include "NeuralNetworksOEM.h"
+#include "SampleDriver.h"
#include "Utils.h"
#include "ValidateHal.h"
-#include <android-base/logging.h>
-#include <hidl/LegacySupport.h>
-#include <thread>
-
namespace android {
namespace nn {
namespace sample_driver {
@@ -36,19 +37,19 @@
class SampleDriverMinimal : public SampleDriver {
public:
SampleDriverMinimal() : SampleDriver("sample-minimal") {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb) override;
- Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) override;
+ Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
+ Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override;
};
-Return<void> SampleDriverMinimal::getCapabilities_1_2(getCapabilities_1_2_cb cb) {
+Return<void> SampleDriverMinimal::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
android::nn::initVLogMask();
VLOG(DRIVER) << "getCapabilities()";
Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.4f, .powerUsage = 0.5f},
.relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.4f, .powerUsage = 0.5f},
- .operandPerformance = nonExtensionOperandPerformance({1.0f, 1.0f})};
+ .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>({1.0f, 1.0f})};
update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT32,
{.execTime = 0.4f, .powerUsage = 0.5f});
update(&capabilities.operandPerformance, OperandType::FLOAT32,
@@ -58,8 +59,8 @@
return Void();
}
-Return<void> SampleDriverMinimal::getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) {
+Return<void> SampleDriverMinimal::getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) {
VLOG(DRIVER) << "getSupportedOperations()";
if (validateModel(model)) {
const size_t count = model.operations.size();
diff --git a/driver/sample/SampleDriverQuant.cpp b/driver/sample/SampleDriverQuant.cpp
index 83fc550..87c257b 100644
--- a/driver/sample/SampleDriverQuant.cpp
+++ b/driver/sample/SampleDriverQuant.cpp
@@ -16,15 +16,16 @@
#define LOG_TAG "SampleDriverQuant"
-#include "SampleDriver.h"
-
-#include "HalInterfaces.h"
-#include "Utils.h"
-#include "ValidateHal.h"
-
#include <android-base/logging.h>
#include <hidl/LegacySupport.h>
+
#include <thread>
+#include <vector>
+
+#include "HalInterfaces.h"
+#include "SampleDriver.h"
+#include "Utils.h"
+#include "ValidateHal.h"
namespace android {
namespace nn {
@@ -35,26 +36,26 @@
class SampleDriverQuant : public SampleDriver {
public:
SampleDriverQuant() : SampleDriver("sample-quant") {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb) override;
- Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) override;
+ Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
+ Return<void> getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override;
};
-Return<void> SampleDriverQuant::getCapabilities_1_2(getCapabilities_1_2_cb cb) {
+Return<void> SampleDriverQuant::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
android::nn::initVLogMask();
VLOG(DRIVER) << "getCapabilities()";
Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = {.execTime = 50.0f, .powerUsage = 1.0f},
.relaxedFloat32toFloat16PerformanceTensor = {.execTime = 50.0f, .powerUsage = 1.0f},
- .operandPerformance = nonExtensionOperandPerformance({50.0f, 1.0f})};
+ .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>({50.0f, 1.0f})};
cb(ErrorStatus::NONE, capabilities);
return Void();
}
-Return<void> SampleDriverQuant::getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) {
+Return<void> SampleDriverQuant::getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) {
VLOG(DRIVER) << "getSupportedOperations()";
if (validateModel(model)) {
const size_t count = model.operations.size();
diff --git a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-float-fast.xml b/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-float-fast.xml
deleted file mode 100644
index 42e860d..0000000
--- a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-float-fast.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-<manifest version="1.0" type="device">
- <hal format="hidl">
- <name>android.hardware.neuralnetworks</name>
- <transport>hwbinder</transport>
- <fqname>@1.2::IDevice/sample-float-fast</fqname>
- </hal>
-</manifest>
diff --git a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-float-slow.xml b/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-float-slow.xml
deleted file mode 100644
index b4128ab..0000000
--- a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-float-slow.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-<manifest version="1.0" type="device">
- <hal format="hidl">
- <name>android.hardware.neuralnetworks</name>
- <transport>hwbinder</transport>
- <fqname>@1.2::IDevice/sample-float-slow</fqname>
- </hal>
-</manifest>
diff --git a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-quant.xml b/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-quant.xml
deleted file mode 100644
index 3b5c6fb..0000000
--- a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-quant.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-<manifest version="1.0" type="device">
- <hal format="hidl">
- <name>android.hardware.neuralnetworks</name>
- <transport>hwbinder</transport>
- <fqname>@1.2::IDevice/sample-quant</fqname>
- </hal>
-</manifest>
diff --git a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-all.rc b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-all.rc
similarity index 68%
rename from driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-all.rc
rename to driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-all.rc
index c5a0c70..5120eb4 100644
--- a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-all.rc
+++ b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-all.rc
@@ -1,4 +1,4 @@
-service neuralnetworks_hal_service_sample_all /vendor/bin/hw/android.hardware.neuralnetworks@1.2-service-sample-all
+service neuralnetworks_hal_service_sample_all /vendor/bin/hw/android.hardware.neuralnetworks@1.3-service-sample-all
class hal
user system
group system
diff --git a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-all.xml b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-all.xml
similarity index 78%
rename from driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-all.xml
rename to driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-all.xml
index db02de0..6638ed5 100644
--- a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-all.xml
+++ b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-all.xml
@@ -2,6 +2,6 @@
<hal format="hidl">
<name>android.hardware.neuralnetworks</name>
<transport>hwbinder</transport>
- <fqname>@1.2::IDevice/sample-all</fqname>
+ <fqname>@1.3::IDevice/sample-all</fqname>
</hal>
</manifest>
diff --git a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-float-fast.rc b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-fast.rc
similarity index 63%
rename from driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-float-fast.rc
rename to driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-fast.rc
index 89f8bf4..7271d39 100644
--- a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-float-fast.rc
+++ b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-fast.rc
@@ -1,4 +1,4 @@
-service neuralnetworks_hal_service_sample_float_fast /vendor/bin/hw/android.hardware.neuralnetworks@1.2-service-sample-float-fast
+service neuralnetworks_hal_service_sample_float_fast /vendor/bin/hw/android.hardware.neuralnetworks@1.3-service-sample-float-fast
class hal
user system
group system
diff --git a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-minimal.xml b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-fast.xml
similarity index 75%
copy from driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-minimal.xml
copy to driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-fast.xml
index 8603298..bb2e59d 100644
--- a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-minimal.xml
+++ b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-fast.xml
@@ -2,6 +2,6 @@
<hal format="hidl">
<name>android.hardware.neuralnetworks</name>
<transport>hwbinder</transport>
- <fqname>@1.2::IDevice/sample-minimal</fqname>
+ <fqname>@1.3::IDevice/sample-float-fast</fqname>
</hal>
</manifest>
diff --git a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-float-slow.rc b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-slow.rc
similarity index 63%
rename from driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-float-slow.rc
rename to driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-slow.rc
index 79d8f71..b11c61a 100644
--- a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-float-slow.rc
+++ b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-slow.rc
@@ -1,4 +1,4 @@
-service neuralnetworks_hal_service_sample_float_slow /vendor/bin/hw/android.hardware.neuralnetworks@1.2-service-sample-float-slow
+service neuralnetworks_hal_service_sample_float_slow /vendor/bin/hw/android.hardware.neuralnetworks@1.3-service-sample-float-slow
class hal
user system
group system
diff --git a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-minimal.xml b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-slow.xml
similarity index 75%
copy from driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-minimal.xml
copy to driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-slow.xml
index 8603298..aa38ba8 100644
--- a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-minimal.xml
+++ b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-slow.xml
@@ -2,6 +2,6 @@
<hal format="hidl">
<name>android.hardware.neuralnetworks</name>
<transport>hwbinder</transport>
- <fqname>@1.2::IDevice/sample-minimal</fqname>
+ <fqname>@1.3::IDevice/sample-float-slow</fqname>
</hal>
</manifest>
diff --git a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-minimal.rc b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-minimal.rc
similarity index 64%
rename from driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-minimal.rc
rename to driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-minimal.rc
index c68d72c..aa5df86 100644
--- a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-minimal.rc
+++ b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-minimal.rc
@@ -1,4 +1,4 @@
-service neuralnetworks_hal_service_sample_minimal /vendor/bin/hw/android.hardware.neuralnetworks@1.2-service-sample-minimal
+service neuralnetworks_hal_service_sample_minimal /vendor/bin/hw/android.hardware.neuralnetworks@1.3-service-sample-minimal
class hal
user system
group system
diff --git a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-minimal.xml b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-minimal.xml
similarity index 76%
rename from driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-minimal.xml
rename to driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-minimal.xml
index 8603298..982e1d4 100644
--- a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-minimal.xml
+++ b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-minimal.xml
@@ -2,6 +2,6 @@
<hal format="hidl">
<name>android.hardware.neuralnetworks</name>
<transport>hwbinder</transport>
- <fqname>@1.2::IDevice/sample-minimal</fqname>
+ <fqname>@1.3::IDevice/sample-minimal</fqname>
</hal>
</manifest>
diff --git a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-quant.rc b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-quant.rc
similarity index 66%
rename from driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-quant.rc
rename to driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-quant.rc
index 37ae4bc..96fbf79 100644
--- a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-quant.rc
+++ b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-quant.rc
@@ -1,4 +1,4 @@
-service neuralnetworks_hal_service_sample_quant /vendor/bin/hw/android.hardware.neuralnetworks@1.2-service-sample-quant
+service neuralnetworks_hal_service_sample_quant /vendor/bin/hw/android.hardware.neuralnetworks@1.3-service-sample-quant
class hal
user system
group system
diff --git a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-all.xml b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-quant.xml
similarity index 77%
copy from driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-all.xml
copy to driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-quant.xml
index db02de0..c5e6b94 100644
--- a/driver/sample/config/android.hardware.neuralnetworks@1.2-service-sample-all.xml
+++ b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-quant.xml
@@ -2,6 +2,6 @@
<hal format="hidl">
<name>android.hardware.neuralnetworks</name>
<transport>hwbinder</transport>
- <fqname>@1.2::IDevice/sample-all</fqname>
+ <fqname>@1.3::IDevice/sample-quant</fqname>
</hal>
</manifest>
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 49f4411..672a116 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -57,6 +57,7 @@
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
+ "android.hardware.neuralnetworks@1.3",
"android.hidl.allocator@1.0",
"android.hidl.memory@1.0",
"libbase",
diff --git a/runtime/Manager.cpp b/runtime/Manager.cpp
index c081572..4bc4612 100644
--- a/runtime/Manager.cpp
+++ b/runtime/Manager.cpp
@@ -620,21 +620,21 @@
}
}
// Create as many pools as there are input / output.
- auto fixPointerArguments = [&requestPoolInfos](
- const std::vector<ModelArgumentInfo>& argumentInfos) {
- std::vector<DataLocation> ptrArgsLocations;
- for (const ModelArgumentInfo& argumentInfo : argumentInfos) {
- if (argumentInfo.state == ModelArgumentInfo::POINTER) {
- ptrArgsLocations.push_back(
- {.poolIndex = static_cast<uint32_t>(requestPoolInfos.size()),
- .offset = 0,
- .length = argumentInfo.locationAndLength.length});
- requestPoolInfos.emplace_back(RunTimePoolInfo::createFromExistingBuffer(
- static_cast<uint8_t*>(argumentInfo.buffer)));
- }
- }
- return ptrArgsLocations;
- };
+ auto fixPointerArguments =
+ [&requestPoolInfos](const std::vector<ModelArgumentInfo>& argumentInfos) {
+ std::vector<DataLocation> ptrArgsLocations;
+ for (const ModelArgumentInfo& argumentInfo : argumentInfos) {
+ if (argumentInfo.state == ModelArgumentInfo::POINTER) {
+ ptrArgsLocations.push_back(
+ {.poolIndex = static_cast<uint32_t>(requestPoolInfos.size()),
+ .offset = 0,
+ .length = argumentInfo.locationAndLength.length});
+ requestPoolInfos.emplace_back(RunTimePoolInfo::createFromExistingBuffer(
+ static_cast<uint8_t*>(argumentInfo.buffer)));
+ }
+ }
+ return ptrArgsLocations;
+ };
const std::vector<DataLocation> inputPtrArgsLocations = fixPointerArguments(inputs);
const std::vector<DataLocation> outputPtrArgsLocations = fixPointerArguments(outputs);
diff --git a/runtime/VersionedInterfaces.cpp b/runtime/VersionedInterfaces.cpp
index 078ab95..325b75c 100644
--- a/runtime/VersionedInterfaces.cpp
+++ b/runtime/VersionedInterfaces.cpp
@@ -326,6 +326,7 @@
: mDeviceV1_0(std::move(device)),
mDeviceV1_1(V1_1::IDevice::castFrom(mDeviceV1_0).withDefault(nullptr)),
mDeviceV1_2(V1_2::IDevice::castFrom(mDeviceV1_0).withDefault(nullptr)),
+ mDeviceV1_3(V1_3::IDevice::castFrom(mDeviceV1_0).withDefault(nullptr)),
mDeathHandler(std::move(deathHandler)) {}
VersionedIDevice::Core::~Core() {
@@ -342,6 +343,7 @@
: mDeviceV1_0(std::move(other.mDeviceV1_0)),
mDeviceV1_1(std::move(other.mDeviceV1_1)),
mDeviceV1_2(std::move(other.mDeviceV1_2)),
+ mDeviceV1_3(std::move(other.mDeviceV1_3)),
mDeathHandler(std::move(other.mDeathHandler)) {
other.mDeathHandler = nullptr;
}
@@ -351,6 +353,7 @@
mDeviceV1_0 = std::move(other.mDeviceV1_0);
mDeviceV1_1 = std::move(other.mDeviceV1_1);
mDeviceV1_2 = std::move(other.mDeviceV1_2);
+ mDeviceV1_3 = std::move(other.mDeviceV1_3);
mDeathHandler = std::move(other.mDeathHandler);
other.mDeathHandler = nullptr;
}
@@ -461,14 +464,31 @@
const std::pair<ErrorStatus, Capabilities> kFailure = {ErrorStatus::GENERAL_FAILURE, {}};
std::pair<ErrorStatus, Capabilities> result;
- // version 1.2+ HAL
+ // version 1.3+ HAL
+ if (getDevice<V1_3::IDevice>() != nullptr) {
+ NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getCapabilities_1_3");
+ Return<void> ret = recoverable<void, V1_3::IDevice>(
+ __FUNCTION__, [&result](const sp<V1_3::IDevice>& device) {
+ return device->getCapabilities_1_3(
+ [&result](ErrorStatus error, const Capabilities& capabilities) {
+ result = std::make_pair(error, capabilities);
+ });
+ });
+ if (!ret.isOk()) {
+ LOG(ERROR) << "getCapabilities_1_3 failure: " << ret.description();
+ return {ErrorStatus::GENERAL_FAILURE, {}};
+ }
+ return result;
+ }
+
+ // version 1.2 HAL
if (getDevice<V1_2::IDevice>() != nullptr) {
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getCapabilities_1_2");
Return<void> ret = recoverable<void, V1_2::IDevice>(
__FUNCTION__, [&result](const sp<V1_2::IDevice>& device) {
return device->getCapabilities_1_2(
- [&result](ErrorStatus error, const Capabilities& capabilities) {
- result = std::make_pair(error, capabilities);
+ [&result](ErrorStatus error, const V1_2::Capabilities& capabilities) {
+ result = std::make_pair(error, convertToV1_3(capabilities));
});
});
if (!ret.isOk()) {
@@ -486,7 +506,7 @@
return device->getCapabilities_1_1(
[&result](ErrorStatus error, const V1_1::Capabilities& capabilities) {
// Time taken to convert capabilities is trivial
- result = std::make_pair(error, convertToV1_2(capabilities));
+ result = std::make_pair(error, convertToV1_3(capabilities));
});
});
if (!ret.isOk()) {
@@ -504,7 +524,7 @@
return device->getCapabilities(
[&result](ErrorStatus error, const V1_0::Capabilities& capabilities) {
// Time taken to convert capabilities is trivial
- result = std::make_pair(error, convertToV1_2(capabilities));
+ result = std::make_pair(error, convertToV1_3(capabilities));
});
});
if (!ret.isOk()) {
@@ -578,13 +598,42 @@
return std::make_pair(status, std::move(remappedSupported));
};
- // version 1.2+ HAL
+ // version 1.3+ HAL
+ if (getDevice<V1_3::IDevice>() != nullptr) {
+ NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations_1_3");
+ Return<void> ret = recoverable<void, V1_3::IDevice>(
+ __FUNCTION__, [&model, &result](const sp<V1_3::IDevice>& device) {
+ return device->getSupportedOperations_1_3(
+ model, [&result](ErrorStatus error, const hidl_vec<bool>& supported) {
+ result = std::make_pair(error, supported);
+ });
+ });
+ if (!ret.isOk()) {
+ LOG(ERROR) << "getSupportedOperations_1_3 failure: " << ret.description();
+ return kFailure;
+ }
+ return result;
+ }
+
+ // version 1.2 HAL
if (getDevice<V1_2::IDevice>() != nullptr) {
+ const bool compliant = compliantWithV1_2(model);
+ V1_2::Model model12;
+ std::function<uint32_t(uint32_t)> submodelOperationIndexToModelOperationIndex;
+ if (compliant) {
+ model12 = convertToV1_2(model);
+ } else {
+ const auto slice12 = metaModel.getSliceV1_2();
+ if (!slice12.has_value()) {
+ return noneSupported();
+ }
+ std::tie(model12, submodelOperationIndexToModelOperationIndex) = *slice12;
+ }
NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations_1_2");
Return<void> ret = recoverable<void, V1_2::IDevice>(
- __FUNCTION__, [&model, &result](const sp<V1_2::IDevice>& device) {
+ __FUNCTION__, [&model12, &result](const sp<V1_2::IDevice>& device) {
return device->getSupportedOperations_1_2(
- model, [&result](ErrorStatus error, const hidl_vec<bool>& supported) {
+ model12, [&result](ErrorStatus error, const hidl_vec<bool>& supported) {
result = std::make_pair(error, supported);
});
});
@@ -592,6 +641,9 @@
LOG(ERROR) << "getSupportedOperations_1_2 failure: " << ret.description();
return kFailure;
}
+ if (!compliant) {
+ return remappedResult(result, submodelOperationIndexToModelOperationIndex);
+ }
return result;
}
@@ -672,28 +724,70 @@
const sp<PreparedModelCallback> callback = new PreparedModelCallback();
- // If 1.2 device, try preparing model
- if (getDevice<V1_2::IDevice>() != nullptr) {
- const Return<ErrorStatus> ret = recoverable<ErrorStatus, V1_2::IDevice>(
+ // If 1.3 device, try preparing model
+ if (getDevice<V1_3::IDevice>() != nullptr) {
+ const Return<ErrorStatus> ret = recoverable<ErrorStatus, V1_3::IDevice>(
__FUNCTION__,
[&model, &preference, &modelCache, &dataCache, &token,
- &callback](const sp<V1_2::IDevice>& device) {
- return device->prepareModel_1_2(model, preference, modelCache, dataCache, token,
+ &callback](const sp<V1_3::IDevice>& device) {
+ return device->prepareModel_1_3(model, preference, modelCache, dataCache, token,
callback);
},
callback);
if (!ret.isOk()) {
- LOG(ERROR) << "prepareModel_1_2 failure: " << ret.description();
+ LOG(ERROR) << "prepareModel_1_3 failure: " << ret.description();
return kFailure;
}
if (ret != ErrorStatus::NONE) {
- LOG(ERROR) << "prepareModel_1_2 returned " << toString(static_cast<ErrorStatus>(ret));
+ LOG(ERROR) << "prepareModel_1_3 returned " << toString(static_cast<ErrorStatus>(ret));
return kFailure;
}
callback->wait();
return {callback->getStatus(), makeVersionedIPreparedModel(callback->getPreparedModel())};
}
+ // If 1.2 device, try preparing model (requires conversion)
+ if (getDevice<V1_2::IDevice>() != nullptr) {
+ bool compliant = false;
+ V1_2::Model model12;
+ {
+ // Attribute time spent in model inspection and conversion to
+ // Runtime, as the time may be substantial (0.03ms for mobilenet,
+ // but could be larger for other models).
+ NNTRACE_FULL_SUBTRACT(NNTRACE_LAYER_RUNTIME, NNTRACE_PHASE_COMPILATION,
+ "VersionedIDevice::prepareModel_1_2");
+ compliant = compliantWithV1_2(model);
+ if (compliant) {
+ model12 = convertToV1_2(model); // copy is elided
+ }
+ }
+ if (compliant) {
+ const Return<ErrorStatus> ret = recoverable<ErrorStatus, V1_2::IDevice>(
+ __FUNCTION__,
+ [&model12, &preference, &modelCache, &dataCache, &token,
+ &callback](const sp<V1_2::IDevice>& device) {
+ return device->prepareModel_1_2(model12, preference, modelCache, dataCache,
+ token, callback);
+ },
+ callback);
+ if (!ret.isOk()) {
+ LOG(ERROR) << "prepareModel_1_2 failure: " << ret.description();
+ return kFailure;
+ }
+ if (ret != ErrorStatus::NONE) {
+ LOG(ERROR) << "prepareModel_1_2 returned "
+ << toString(static_cast<ErrorStatus>(ret));
+ return kFailure;
+ }
+ callback->wait();
+ return {callback->getStatus(),
+ makeVersionedIPreparedModel(callback->getPreparedModel())};
+ }
+
+ LOG(ERROR) << "Could not handle prepareModel_1_2!";
+ return kFailure;
+ }
+
// If 1.1 device, try preparing model (requires conversion)
if (getDevice<V1_1::IDevice>() != nullptr) {
bool compliant = false;
@@ -839,7 +933,9 @@
int64_t VersionedIDevice::getFeatureLevel() const {
constexpr int64_t kFailure = -1;
- if (getDevice<V1_2::IDevice>() != nullptr) {
+ if (getDevice<V1_3::IDevice>() != nullptr) {
+ return __ANDROID_API_R__;
+ } else if (getDevice<V1_2::IDevice>() != nullptr) {
return __ANDROID_API_Q__;
} else if (getDevice<V1_1::IDevice>() != nullptr) {
return __ANDROID_API_P__;
diff --git a/runtime/VersionedInterfaces.h b/runtime/VersionedInterfaces.h
index 7bbc2fb..8665745 100644
--- a/runtime/VersionedInterfaces.h
+++ b/runtime/VersionedInterfaces.h
@@ -502,6 +502,10 @@
sp<hal::V1_2::IDevice> getDevice() const {
return mDeviceV1_2;
}
+ template <>
+ sp<hal::V1_3::IDevice> getDevice() const {
+ return mDeviceV1_3;
+ }
/**
* Returns sp<*::IDevice> (as per getDevice()) and the
@@ -535,6 +539,7 @@
sp<hal::V1_0::IDevice> mDeviceV1_0;
sp<hal::V1_1::IDevice> mDeviceV1_1;
sp<hal::V1_2::IDevice> mDeviceV1_2;
+ sp<hal::V1_3::IDevice> mDeviceV1_3;
/**
* HIDL callback to be invoked if the service for mDeviceV1_0 crashes.
diff --git a/runtime/include/NeuralNetworks.h b/runtime/include/NeuralNetworks.h
index 3e5aed1..75caa55 100644
--- a/runtime/include/NeuralNetworks.h
+++ b/runtime/include/NeuralNetworks.h
@@ -181,6 +181,22 @@
*/
ANEURALNETWORKS_TENSOR_QUANT8_SYMM = 13,
#endif // __ANDROID_API__ >= __ANDROID_API_Q__
+#if __ANDROID_API__ >= __ANDROID_API_R__
+ /**
+ * A tensor of 8 bit signed integers that represent real numbers.
+ *
+ * Attached to this tensor are two numbers that can be used to convert the
+ * 8 bit integer to the real value and vice versa. These two numbers are:
+ * - scale: a 32 bit floating point value greater than zero.
+ * - zeroPoint: a 32 bit integer, in range [-128, 127].
+ *
+ * The formula is:
+ * real_value = (integer_value - zeroPoint) * scale.
+ *
+ * Available since API level 30.
+ */
+ ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED = 14,
+#endif // __ANDROID_API__ >= __ANDROID_API_R__
} OperandCode;
/**
diff --git a/runtime/test/Android.bp b/runtime/test/Android.bp
index 12ac2ff..608172e 100644
--- a/runtime/test/Android.bp
+++ b/runtime/test/Android.bp
@@ -40,6 +40,7 @@
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
+ "android.hardware.neuralnetworks@1.3",
"libc++fs",
"libneuralnetworks_generated_test_harness",
],
@@ -77,6 +78,7 @@
"neuralnetworks_generated_V1_0_example",
"neuralnetworks_generated_V1_1_example",
"neuralnetworks_generated_V1_2_example",
+ "neuralnetworks_generated_V1_3_example",
],
cflags: [
"-UNDEBUG",
@@ -326,3 +328,9 @@
srcs: ["generated/spec_V1_2/*.example.cpp"],
static_libs: ["libneuralnetworks_generated_test_harness"],
}
+
+cc_library_static {
+ name: "neuralnetworks_generated_V1_3_example",
+ srcs: ["generated/spec_V1_3/*.example.cpp"],
+ static_libs: ["libneuralnetworks_generated_test_harness"],
+}
diff --git a/runtime/test/TestAssertions.cpp b/runtime/test/TestAssertions.cpp
index 64f60fa..a36c6f3 100644
--- a/runtime/test/TestAssertions.cpp
+++ b/runtime/test/TestAssertions.cpp
@@ -38,6 +38,7 @@
CHECK_TEST_ENUM(TestOperandType, TENSOR_QUANT8_SYMM_PER_CHANNEL);
CHECK_TEST_ENUM(TestOperandType, TENSOR_QUANT16_ASYMM);
CHECK_TEST_ENUM(TestOperandType, TENSOR_QUANT8_SYMM);
+CHECK_TEST_ENUM(TestOperandType, TENSOR_QUANT8_ASYMM_SIGNED);
CHECK_TEST_ENUM(TestOperationType, ADD);
CHECK_TEST_ENUM(TestOperationType, AVERAGE_POOL_2D);
diff --git a/runtime/test/TestCompilationCaching.cpp b/runtime/test/TestCompilationCaching.cpp
index e0e2987..c061e85 100644
--- a/runtime/test/TestCompilationCaching.cpp
+++ b/runtime/test/TestCompilationCaching.cpp
@@ -14,16 +14,20 @@
* limitations under the License.
*/
+#include <gtest/gtest.h>
+
+#include <cstdlib>
+#include <filesystem>
+#include <numeric>
+#include <string>
+#include <tuple>
+#include <vector>
+
#include "HalInterfaces.h"
#include "Manager.h"
#include "SampleDriver.h"
#include "TestNeuralNetworksWrapper.h"
-#include <gtest/gtest.h>
-#include <cstdlib>
-#include <filesystem>
-#include <numeric>
-
using namespace android::nn;
using namespace hal;
using Result = test_wrapper::Result;
@@ -110,19 +114,19 @@
~CachingDriver() override {}
// Reports faster than cpu.
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb) override {
+ Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
android::nn::initVLogMask();
const PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1};
Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
- .operandPerformance = android::nn::nonExtensionOperandPerformance(kPerf)};
+ .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf)};
cb(ErrorStatus::NONE, capabilities);
return Void();
}
// Reports supporting all operations.
- Return<void> getSupportedOperations_1_2(const Model& model,
+ Return<void> getSupportedOperations_1_3(const Model& model,
getSupportedOperations_cb cb) override {
std::vector<bool> supported(model.operations.size(), true);
cb(ErrorStatus::NONE, supported);
@@ -137,7 +141,7 @@
// Generates CachingPreparedModel.
// Writes the cache entry per mCacheXData and sets mHasCalledPrepareModel.
- Return<ErrorStatus> prepareModel_1_2(const Model&, ExecutionPreference,
+ Return<ErrorStatus> prepareModel_1_3(const Model&, ExecutionPreference,
const hidl_vec<hidl_handle>& modelCacheHandle,
const hidl_vec<hidl_handle>& dataCacheHandle,
const CacheToken&,
diff --git a/runtime/test/TestCompliance.cpp b/runtime/test/TestCompliance.cpp
index 461024a..678f16b 100644
--- a/runtime/test/TestCompliance.cpp
+++ b/runtime/test/TestCompliance.cpp
@@ -26,7 +26,7 @@
using namespace hal;
using namespace test_helper;
-using HidlModel = V1_2::Model;
+using HidlModel = V1_3::Model;
using WrapperModel = test_wrapper::Model;
using WrapperOperandType = test_wrapper::OperandType;
using WrapperType = test_wrapper::Type;
@@ -43,20 +43,30 @@
return modelBuilder->makeHidlModel();
}
+void testAvailableSinceV1_3(std::function<void(WrapperModel*)> createModel) {
+ HidlModel model = createHidlModel(createModel);
+ ASSERT_FALSE(compliantWithV1_2(model));
+ ASSERT_FALSE(compliantWithV1_1(model));
+ ASSERT_FALSE(compliantWithV1_0(model));
+}
+
void testAvailableSinceV1_2(std::function<void(WrapperModel*)> createModel) {
HidlModel model = createHidlModel(createModel);
+ ASSERT_TRUE(compliantWithV1_2(model));
ASSERT_FALSE(compliantWithV1_1(model));
ASSERT_FALSE(compliantWithV1_0(model));
}
void testAvailableSinceV1_1(std::function<void(WrapperModel*)> createModel) {
HidlModel model = createHidlModel(createModel);
+ ASSERT_TRUE(compliantWithV1_2(model));
ASSERT_TRUE(compliantWithV1_1(model));
ASSERT_FALSE(compliantWithV1_0(model));
}
void testAvailableSinceV1_0(std::function<void(WrapperModel*)> createModel) {
HidlModel model = createHidlModel(createModel);
+ ASSERT_TRUE(compliantWithV1_2(model));
ASSERT_TRUE(compliantWithV1_1(model));
ASSERT_TRUE(compliantWithV1_0(model));
}
@@ -162,6 +172,9 @@
case TestHalVersion::V1_2:
testAvailableSinceV1_2(createModel);
break;
+ case TestHalVersion::V1_3:
+ testAvailableSinceV1_3(createModel);
+ break;
case TestHalVersion::UNKNOWN:
FAIL();
}
diff --git a/runtime/test/TestExecution.cpp b/runtime/test/TestExecution.cpp
index e4c8d21..de5020a 100644
--- a/runtime/test/TestExecution.cpp
+++ b/runtime/test/TestExecution.cpp
@@ -16,6 +16,15 @@
#undef NDEBUG
+#include <gtest/gtest.h>
+
+#include <algorithm>
+#include <cassert>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <vector>
+
#include "Callbacks.h"
#include "CompilationBuilder.h"
#include "HalInterfaces.h"
@@ -26,19 +35,13 @@
#include "TestNeuralNetworksWrapper.h"
#include "ValidateHal.h"
-#include <algorithm>
-#include <cassert>
-#include <vector>
-
-#include <gtest/gtest.h>
-
namespace android {
using namespace nn::hal;
using CompilationBuilder = nn::CompilationBuilder;
using Device = nn::Device;
using DeviceManager = nn::DeviceManager;
-using HidlModel = V1_2::Model;
+using HidlModel = V1_3::Model;
using PreparedModelCallback = nn::PreparedModelCallback;
using Result = nn::test_wrapper::Result;
using SampleDriver = nn::sample_driver::SampleDriver;
@@ -148,30 +151,31 @@
};
// Behaves like SampleDriver, except that it produces wrapped IPreparedModel.
-class TestDriver12 : public SampleDriver {
+class TestDriver13 : public SampleDriver {
public:
// Allow dummying up the error status for execution of all models
// prepared from this driver. If errorStatus is NONE, then
// execute behaves normally (and sends back the actual execution
- // status). Otherwise, don't bother to execute, and just send
+ // status). Otherwise, don't bother to execute, and just send
// back errorStatus (as the execution status, not the launch
// status).
- TestDriver12(const std::string& name, ErrorStatus errorStatus)
+ TestDriver13(const std::string& name, ErrorStatus errorStatus)
: SampleDriver(name.c_str()), mErrorStatus(errorStatus) {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
+ Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
android::nn::initVLogMask();
const PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
- .operandPerformance = nn::nonExtensionOperandPerformance(kPerf)};
+ .operandPerformance =
+ nn::nonExtensionOperandPerformance<nn::HalVersion::V1_3>(kPerf)};
_hidl_cb(ErrorStatus::NONE, capabilities);
return Void();
}
- Return<void> getSupportedOperations_1_2(const HidlModel& model,
- getSupportedOperations_1_2_cb cb) override {
+ Return<void> getSupportedOperations_1_3(const HidlModel& model,
+ getSupportedOperations_1_3_cb cb) override {
if (nn::validateModel(model)) {
std::vector<bool> supported(model.operations.size(), true);
cb(ErrorStatus::NONE, supported);
@@ -182,11 +186,40 @@
return Void();
}
- Return<ErrorStatus> prepareModel_1_2(
+ Return<ErrorStatus> prepareModel_1_3(
const HidlModel& model, ExecutionPreference preference,
const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
const CacheToken& token, const sp<IPreparedModelCallback>& actualCallback) override {
sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
+ Return<ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_3(
+ model, preference, modelCache, dataCache, token, localCallback);
+ if (!prepareModelReturn.isOkUnchecked()) {
+ return prepareModelReturn;
+ }
+ if (prepareModelReturn != ErrorStatus::NONE) {
+ actualCallback->notify_1_2(
+ localCallback->getStatus(),
+ V1_2::IPreparedModel::castFrom(localCallback->getPreparedModel()));
+ return prepareModelReturn;
+ }
+ localCallback->wait();
+ if (localCallback->getStatus() != ErrorStatus::NONE) {
+ actualCallback->notify_1_2(
+ localCallback->getStatus(),
+ V1_2::IPreparedModel::castFrom(localCallback->getPreparedModel()));
+ } else {
+ actualCallback->notify_1_2(
+ ErrorStatus::NONE,
+ new TestPreparedModel12(localCallback->getPreparedModel(), mErrorStatus));
+ }
+ return prepareModelReturn;
+ }
+
+ Return<ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, ExecutionPreference preference,
+ const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
+ const CacheToken& token, const sp<IPreparedModelCallback>& actualCallback) override {
+ sp<PreparedModelCallback> localCallback = new PreparedModelCallback;
Return<ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_2(
model, preference, modelCache, dataCache, token, localCallback);
if (!prepareModelReturn.isOkUnchecked()) {
@@ -246,62 +279,127 @@
ErrorStatus mErrorStatus;
};
-// Like TestDriver, but implementing 1.1
-class TestDriver11 : public V1_1::IDevice {
+// Like TestDriver, but implementing 1.2
+class TestDriver12 : public V1_2::IDevice {
public:
- TestDriver11(const std::string& name, ErrorStatus errorStatus)
- : m12Driver(new TestDriver12(name, errorStatus)) {}
+ TestDriver12(const std::string& name, ErrorStatus errorStatus)
+ : mLatestDriver(new TestDriver13(name, errorStatus)) {}
+ Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
+ return mLatestDriver->getCapabilities_1_2(_hidl_cb);
+ }
Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
- return m12Driver->getCapabilities_1_1(_hidl_cb);
+ return mLatestDriver->getCapabilities_1_1(_hidl_cb);
+ }
+ Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ return mLatestDriver->getCapabilities(_hidl_cb);
+ }
+ Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
+ getSupportedOperations_1_2_cb _hidl_cb) override {
+ return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb);
}
Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
getSupportedOperations_1_1_cb _hidl_cb) override {
- return m12Driver->getSupportedOperations_1_1(model, _hidl_cb);
+ return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
+ }
+ Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
+ return mLatestDriver->getSupportedOperations(model, _hidl_cb);
+ }
+ Return<ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, ExecutionPreference preference,
+ const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
+ const CacheToken& token, const sp<IPreparedModelCallback>& actualCallback) override {
+ return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token,
+ actualCallback);
}
Return<ErrorStatus> prepareModel_1_1(
const V1_1::Model& model, ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
- return m12Driver->prepareModel_1_1(model, preference, actualCallback);
- }
- Return<DeviceStatus> getStatus() override { return m12Driver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
- return m12Driver->getCapabilities(_hidl_cb);
- }
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
- return m12Driver->getSupportedOperations(model, _hidl_cb);
+ return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
Return<ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
- return m12Driver->prepareModel(model, actualCallback);
+ return mLatestDriver->prepareModel(model, actualCallback);
+ }
+ Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
+ return mLatestDriver->getVersionString(_hidl_cb);
+ }
+ Return<void> getType(getType_cb _hidl_cb) override { return mLatestDriver->getType(_hidl_cb); }
+ Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
+ return mLatestDriver->getSupportedExtensions(_hidl_cb);
+ }
+ Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
+ return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
+ }
+ Return<ErrorStatus> prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache,
+ const CacheToken& token,
+ const sp<V1_2::IPreparedModelCallback>& callback) {
+ return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
}
private:
- const sp<V1_2::IDevice> m12Driver;
+ const sp<V1_3::IDevice> mLatestDriver;
+};
+
+// Like TestDriver, but implementing 1.1
+class TestDriver11 : public V1_1::IDevice {
+ public:
+ TestDriver11(const std::string& name, ErrorStatus errorStatus)
+ : mLatestDriver(new TestDriver13(name, errorStatus)) {}
+ Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ return mLatestDriver->getCapabilities_1_1(_hidl_cb);
+ }
+ Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
+ getSupportedOperations_1_1_cb _hidl_cb) override {
+ return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
+ }
+ Return<ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, ExecutionPreference preference,
+ const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
+ return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
+ }
+ Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ return mLatestDriver->getCapabilities(_hidl_cb);
+ }
+ Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
+ return mLatestDriver->getSupportedOperations(model, _hidl_cb);
+ }
+ Return<ErrorStatus> prepareModel(
+ const V1_0::Model& model,
+ const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
+ return mLatestDriver->prepareModel(model, actualCallback);
+ }
+
+ private:
+ const sp<V1_3::IDevice> mLatestDriver;
};
// Like TestDriver, but implementing 1.0
class TestDriver10 : public V1_0::IDevice {
public:
TestDriver10(const std::string& name, ErrorStatus errorStatus)
- : m12Driver(new TestDriver12(name, errorStatus)) {}
+ : mLatestDriver(new TestDriver13(name, errorStatus)) {}
Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
- return m12Driver->getCapabilities(_hidl_cb);
+ return mLatestDriver->getCapabilities(_hidl_cb);
}
Return<void> getSupportedOperations(const V1_0::Model& model,
getSupportedOperations_cb _hidl_cb) override {
- return m12Driver->getSupportedOperations(model, _hidl_cb);
+ return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
Return<ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
- return m12Driver->prepareModel(model, actualCallback);
+ return mLatestDriver->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return m12Driver->getStatus(); }
+ Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
private:
- const sp<V1_2::IDevice> m12Driver;
+ const sp<V1_3::IDevice> mLatestDriver;
};
// This class adds some simple utilities on top of WrapperCompilation in order
@@ -512,6 +610,12 @@
std::make_tuple(ErrorStatus::INVALID_ARGUMENT, Result::BAD_DATA,
/* kUseIntrospectionAPI */ false));
+class ExecutionTest13 : public ExecutionTestTemplate<TestDriver13> {};
+TEST_P(ExecutionTest13, Wait) {
+ TestWait();
+}
+INSTANTIATE_TEST_CASE_P(Flavor, ExecutionTest13, kTestValues);
+
class ExecutionTest12 : public ExecutionTestTemplate<TestDriver12> {};
TEST_P(ExecutionTest12, Wait) {
TestWait();
@@ -543,7 +647,7 @@
std::make_tuple(ErrorStatus::INVALID_ARGUMENT, Result::BAD_DATA,
/* kUseIntrospectionAPI */ true));
-INSTANTIATE_TEST_CASE_P(IntrospectionFlavor, ExecutionTest12, kIntrospectionTestValues);
+INSTANTIATE_TEST_CASE_P(IntrospectionFlavor, ExecutionTest13, kIntrospectionTestValues);
} // namespace
} // namespace android
diff --git a/runtime/test/TestExtensions.cpp b/runtime/test/TestExtensions.cpp
index 6fe5927..fd9f17d 100644
--- a/runtime/test/TestExtensions.cpp
+++ b/runtime/test/TestExtensions.cpp
@@ -16,6 +16,9 @@
#include <gtest/gtest.h>
+#include <string>
+#include <vector>
+
#include "HalInterfaces.h"
#include "Manager.h"
#include "NeuralNetworks.h"
@@ -50,12 +53,12 @@
return Void();
}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb) override {
+ Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(ErrorStatus::NONE, {/* Dummy zero-filled capabilities. */});
return Void();
}
- Return<void> getSupportedOperations_1_2(const Model&, getSupportedOperations_cb) override {
+ Return<void> getSupportedOperations_1_3(const Model&, getSupportedOperations_cb) override {
CHECK(false) << "not implemented";
return Void();
}
diff --git a/runtime/test/TestIntrospectionControl.cpp b/runtime/test/TestIntrospectionControl.cpp
index e25769a..84617d9 100644
--- a/runtime/test/TestIntrospectionControl.cpp
+++ b/runtime/test/TestIntrospectionControl.cpp
@@ -14,6 +14,16 @@
* limitations under the License.
*/
+#include <gtest/gtest.h>
+
+#include <iterator>
+#include <map>
+#include <queue>
+#include <set>
+#include <string>
+#include <tuple>
+#include <vector>
+
#include "CompilationBuilder.h"
#include "ExecutionBurstServer.h"
#include "HalInterfaces.h"
@@ -25,13 +35,6 @@
#include "Utils.h"
#include "ValidateHal.h"
-#include <gtest/gtest.h>
-
-#include <iterator>
-#include <map>
-#include <queue>
-#include <set>
-
namespace {
using namespace ::android;
@@ -42,7 +45,7 @@
using DeviceManager = nn::DeviceManager;
using ExecutePreference = nn::test_wrapper::ExecutePreference;
using ExecutionBurstServer = nn::ExecutionBurstServer;
-using HidlModel = V1_2::Model;
+using HidlModel = V1_3::Model;
using PreparedModelCallback = nn::PreparedModelCallback;
using Result = nn::test_wrapper::Result;
using SampleDriver = nn::sample_driver::SampleDriver;
@@ -58,19 +61,19 @@
constexpr Timing kGoodTiming = {.timeOnDevice = 123, .timeInDriver = 456};
// This is an IDevice for testing purposes. The test driver has customized
-// getCapabilities_1_1 and getSupportedOperations_1_2.
+// getCapabilities_1_3 and getSupportedOperations_1_3.
class TestDriver : public SampleDriver {
public:
TestDriver(const char* name, Capabilities capabilities, const std::vector<bool>& supportedOps)
: SampleDriver(name), mCapabilities(capabilities), mSupportedOps(supportedOps) {}
~TestDriver() override {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb) override {
+ Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(ErrorStatus::NONE, mCapabilities);
return Void();
}
- Return<void> getSupportedOperations_1_2(const Model& model,
+ Return<void> getSupportedOperations_1_3(const Model& model,
getSupportedOperations_cb cb) override {
if (!android::nn::validateModel(model)) {
cb(ErrorStatus::INVALID_ARGUMENT, std::vector<bool>());
@@ -110,9 +113,11 @@
DeviceSpecification(const std::string& name, float perf, std::vector<bool>& supportedOps)
: mName(name), mSupportedOps(supportedOps) {
PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
- mCapabilities = {.relaxedFloat32toFloat16PerformanceScalar = perfInfo,
- .relaxedFloat32toFloat16PerformanceTensor = perfInfo,
- .operandPerformance = nn::nonExtensionOperandPerformance(perfInfo)};
+ mCapabilities = {
+ .relaxedFloat32toFloat16PerformanceScalar = perfInfo,
+ .relaxedFloat32toFloat16PerformanceTensor = perfInfo,
+ .operandPerformance =
+ nn::nonExtensionOperandPerformance<nn::HalVersion::V1_3>(perfInfo)};
}
std::string mName;
Capabilities mCapabilities;
@@ -406,23 +411,36 @@
};
// Behaves like SampleDriver, except that it produces customized IPrepareModel.
-class TestDriver12 : public SampleDriver {
+class TestDriver13 : public SampleDriver {
public:
- TestDriver12(const std::string& name, Success success)
+ TestDriver13(const std::string& name, Success success)
: SampleDriver(name.c_str()), mSuccess(success) {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
+ Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
android::nn::initVLogMask();
const PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
- .operandPerformance = nn::nonExtensionOperandPerformance(kPerf)};
+ .operandPerformance =
+ nn::nonExtensionOperandPerformance<nn::HalVersion::V1_3>(kPerf)};
_hidl_cb(ErrorStatus::NONE, capabilities);
return Void();
}
- Return<void> getSupportedOperations_1_2(const HidlModel& model,
+ Return<void> getSupportedOperations_1_3(const HidlModel& model,
+ getSupportedOperations_1_3_cb cb) override {
+ if (nn::validateModel(model)) {
+ std::vector<bool> supported(model.operations.size(), true);
+ cb(ErrorStatus::NONE, supported);
+ } else {
+ std::vector<bool> supported;
+ cb(ErrorStatus::INVALID_ARGUMENT, supported);
+ }
+ return Void();
+ }
+
+ Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
getSupportedOperations_1_2_cb cb) override {
if (nn::validateModel(model)) {
std::vector<bool> supported(model.operations.size(), true);
@@ -434,7 +452,7 @@
return Void();
}
- Return<ErrorStatus> prepareModel_1_2(const HidlModel& model, ExecutionPreference,
+ Return<ErrorStatus> prepareModel_1_3(const HidlModel& model, ExecutionPreference,
const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&,
const CacheToken&,
const sp<IPreparedModelCallback>& callback) override {
@@ -442,11 +460,20 @@
return ErrorStatus::NONE;
}
+ Return<ErrorStatus> prepareModel_1_2(const V1_2::Model& model, ExecutionPreference,
+ const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&,
+ const CacheToken&,
+ const sp<IPreparedModelCallback>& callback) override {
+ callback->notify_1_2(ErrorStatus::NONE,
+ new TestPreparedModel12(nn::convertToV1_3(model), this, mSuccess));
+ return ErrorStatus::NONE;
+ }
+
Return<ErrorStatus> prepareModel_1_1(
const V1_1::Model& model, ExecutionPreference,
const sp<V1_0::IPreparedModelCallback>& callback) override {
callback->notify(ErrorStatus::NONE,
- new TestPreparedModel10(nn::convertToV1_2(model), this, mSuccess));
+ new TestPreparedModel10(nn::convertToV1_3(model), this, mSuccess));
return ErrorStatus::NONE;
}
@@ -460,39 +487,105 @@
Success mSuccess;
};
-// Like TestDriver, but implementing 1.1
-class TestDriver11 : public V1_1::IDevice {
+// Like TestDriver, but implementing 1.2
+class TestDriver12 : public V1_2::IDevice {
public:
- TestDriver11(const std::string& name, Success success)
- : m12Driver(new TestDriver12(name, success)) {}
+ TestDriver12(const std::string& name, Success success)
+ : mLatestDriver(new TestDriver13(name, success)) {}
+ Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
+ return mLatestDriver->getCapabilities_1_2(_hidl_cb);
+ }
Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
- return m12Driver->getCapabilities_1_1(_hidl_cb);
+ return mLatestDriver->getCapabilities_1_1(_hidl_cb);
+ }
+ Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ return mLatestDriver->getCapabilities(_hidl_cb);
+ }
+ Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
+ getSupportedOperations_1_2_cb _hidl_cb) override {
+ return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb);
}
Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
getSupportedOperations_1_1_cb _hidl_cb) override {
- return m12Driver->getSupportedOperations_1_1(model, _hidl_cb);
+ return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
+ }
+ Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
+ return mLatestDriver->getSupportedOperations(model, _hidl_cb);
+ }
+ Return<ErrorStatus> prepareModel_1_2(const V1_2::Model& model, ExecutionPreference preference,
+ const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache,
+ const CacheToken& token,
+ const sp<IPreparedModelCallback>& callback) override {
+ return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token,
+ callback);
}
Return<ErrorStatus> prepareModel_1_1(
const V1_1::Model& model, ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
- return m12Driver->prepareModel_1_1(model, preference, actualCallback);
- }
- Return<DeviceStatus> getStatus() override { return m12Driver->getStatus(); }
- Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
- return m12Driver->getCapabilities(_hidl_cb);
- }
- Return<void> getSupportedOperations(const V1_0::Model& model,
- getSupportedOperations_cb _hidl_cb) override {
- return m12Driver->getSupportedOperations(model, _hidl_cb);
+ return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
Return<ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
- return m12Driver->prepareModel(model, actualCallback);
+ return mLatestDriver->prepareModel(model, actualCallback);
+ }
+ Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
+ return mLatestDriver->getVersionString(_hidl_cb);
+ }
+ Return<void> getType(getType_cb _hidl_cb) override { return mLatestDriver->getType(_hidl_cb); }
+ Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
+ return mLatestDriver->getSupportedExtensions(_hidl_cb);
+ }
+ Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
+ return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
+ }
+ Return<ErrorStatus> prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache,
+ const CacheToken& token,
+ const sp<V1_2::IPreparedModelCallback>& callback) {
+ return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
}
private:
- const sp<V1_2::IDevice> m12Driver;
+ const sp<V1_3::IDevice> mLatestDriver;
+};
+
+// Like TestDriver, but implementing 1.1
+class TestDriver11 : public V1_1::IDevice {
+ public:
+ TestDriver11(const std::string& name, Success success)
+ : mLatestDriver(new TestDriver13(name, success)) {}
+ Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ return mLatestDriver->getCapabilities_1_1(_hidl_cb);
+ }
+ Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
+ getSupportedOperations_1_1_cb _hidl_cb) override {
+ return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
+ }
+ Return<ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, ExecutionPreference preference,
+ const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
+ return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
+ }
+ Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ return mLatestDriver->getCapabilities(_hidl_cb);
+ }
+ Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
+ return mLatestDriver->getSupportedOperations(model, _hidl_cb);
+ }
+ Return<ErrorStatus> prepareModel(
+ const V1_0::Model& model,
+ const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
+ return mLatestDriver->prepareModel(model, actualCallback);
+ }
+
+ private:
+ const sp<V1_3::IDevice> mLatestDriver;
};
} // namespace test_drivers
diff --git a/runtime/test/TestOperandExtraParams.cpp b/runtime/test/TestOperandExtraParams.cpp
index cc20cce..7a4f4ff 100644
--- a/runtime/test/TestOperandExtraParams.cpp
+++ b/runtime/test/TestOperandExtraParams.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "NeuralNetworks.h"
#include "NeuralNetworksOEM.h"
#include "NeuralNetworksWrapper.h"
#ifndef NNTEST_ONLY_PUBLIC_API
@@ -89,6 +90,12 @@
.dimensions = dims,
.scale = 1.0,
.zeroPoint = 32768};
+ case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED:
+ return {.type = dataType,
+ .dimensionCount = 4,
+ .dimensions = dims,
+ .scale = 1.0,
+ .zeroPoint = 1};
default:
ADD_FAILURE();
return {};
@@ -135,6 +142,7 @@
ANEURALNETWORKS_TENSOR_FLOAT16,
ANEURALNETWORKS_TENSOR_BOOL8,
ANEURALNETWORKS_TENSOR_QUANT8_SYMM,
+ ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
};
#ifndef NNTEST_ONLY_PUBLIC_API
diff --git a/runtime/test/TestPartitioning.cpp b/runtime/test/TestPartitioning.cpp
index 11ec944..b01e580 100644
--- a/runtime/test/TestPartitioning.cpp
+++ b/runtime/test/TestPartitioning.cpp
@@ -14,6 +14,19 @@
* limitations under the License.
*/
+#include <gtest/gtest.h>
+
+#include <algorithm>
+#include <filesystem>
+#include <functional>
+#include <map>
+#include <memory>
+#include <queue>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
#include "CompilationBuilder.h"
#include "ExecutionPlan.h"
#include "HalInterfaces.h"
@@ -26,14 +39,6 @@
#include "Utils.h"
#include "ValidateHal.h"
-#include <gtest/gtest.h>
-
-#include <filesystem>
-#include <functional>
-#include <map>
-#include <queue>
-#include <type_traits>
-
// Uncomment the following line to generate some debugging output that
// may be useful when analyzing failures:
//
@@ -138,7 +143,7 @@
using ExecutionPlan = ::android::nn::ExecutionPlan;
using ExecutionStep = ::android::nn::ExecutionStep;
using HalVersion = ::android::nn::HalVersion;
-using HidlModel = V1_2::Model;
+using HidlModel = V1_3::Model;
using ModelBuilder = ::android::nn::ModelBuilder;
using Result = ::android::nn::test_wrapper::Result;
using SampleDriver = ::android::nn::sample_driver::SampleDriver;
@@ -157,7 +162,8 @@
PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf};
return {.relaxedFloat32toFloat16PerformanceScalar = perfInfo,
.relaxedFloat32toFloat16PerformanceTensor = perfInfo,
- .operandPerformance = ::android::nn::nonExtensionOperandPerformance(perfInfo)};
+ .operandPerformance =
+ ::android::nn::nonExtensionOperandPerformance<HalVersion::V1_3>(perfInfo)};
};
void update(Capabilities* capabilities, OperandType type, float perf) {
@@ -326,7 +332,7 @@
return Void();
}
- Return<ErrorStatus> prepareModel_1_2(const Model& model, ExecutionPreference,
+ Return<ErrorStatus> prepareModel_1_3(const Model& model, ExecutionPreference,
const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&,
const CacheToken&,
const sp<IPreparedModelCallback>& cb) override {
@@ -345,12 +351,12 @@
Return<DeviceStatus> getStatus() override { return DeviceStatus::AVAILABLE; }
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb) override {
+ Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override {
cb(ErrorStatus::NONE, mCapabilities);
return Void();
}
- Return<void> getSupportedOperations_1_2(const Model& model,
+ Return<void> getSupportedOperations_1_3(const Model& model,
getSupportedOperations_cb cb) override {
if (!android::nn::validateModel(model)) {
cb(ErrorStatus::INVALID_ARGUMENT, std::vector<bool>());
@@ -393,41 +399,108 @@
OEM mOEM;
};
+// Like PartitioningDriver, but implementing 1.2
+class PartitioningDriverV1_2 : public V1_2::IDevice {
+ public:
+ PartitioningDriverV1_2(const char* name, const char* version, Capabilities capabilities,
+ uint32_t operationMask,
+ PartitioningDriver::OEM oem = PartitioningDriver::OEMNo)
+ : mLatestDriver(new PartitioningDriver(name, version, capabilities, operationMask, oem)) {}
+ Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
+ return mLatestDriver->getCapabilities_1_2(_hidl_cb);
+ }
+ Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
+ getSupportedOperations_1_2_cb _hidl_cb) override {
+ return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb);
+ }
+ Return<ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, ExecutionPreference preference,
+ const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
+ const CacheToken& token, const sp<IPreparedModelCallback>& actualCallback) override {
+ return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token,
+ actualCallback);
+ }
+ Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
+ return mLatestDriver->getVersionString(_hidl_cb);
+ }
+ Return<void> getType(getType_cb _hidl_cb) override { return mLatestDriver->getType(_hidl_cb); }
+ Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
+ return mLatestDriver->getSupportedExtensions(_hidl_cb);
+ }
+ Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
+ return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
+ }
+ Return<ErrorStatus> prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache,
+ const CacheToken& token,
+ const sp<V1_2::IPreparedModelCallback>& callback) {
+ return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
+ }
+ Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ return mLatestDriver->getCapabilities_1_1(_hidl_cb);
+ }
+ Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
+ getSupportedOperations_1_1_cb _hidl_cb) override {
+ return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
+ }
+ Return<ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, ExecutionPreference preference,
+ const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
+ return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
+ }
+ Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ return mLatestDriver->getCapabilities(_hidl_cb);
+ }
+ Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
+ return mLatestDriver->getSupportedOperations(model, _hidl_cb);
+ }
+ Return<ErrorStatus> prepareModel(
+ const V1_0::Model& model,
+ const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
+ return mLatestDriver->prepareModel(model, actualCallback);
+ }
+
+ private:
+ const sp<V1_3::IDevice> mLatestDriver;
+};
+
// Like PartitioningDriver, but implementing 1.1
class PartitioningDriverV1_1 : public V1_1::IDevice {
public:
PartitioningDriverV1_1(const char* name, const char* version, Capabilities capabilities,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo)
- : mDriverV1_2(new PartitioningDriver(name, version, capabilities, operationMask, oem)) {}
+ : mLatestDriver(new PartitioningDriver(name, version, capabilities, operationMask, oem)) {}
Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
- return mDriverV1_2->getCapabilities_1_1(_hidl_cb);
+ return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
getSupportedOperations_1_1_cb _hidl_cb) override {
- return mDriverV1_2->getSupportedOperations_1_1(model, _hidl_cb);
+ return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
Return<ErrorStatus> prepareModel_1_1(
const V1_1::Model& model, ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
- return mDriverV1_2->prepareModel_1_1(model, preference, actualCallback);
+ return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
+ Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
- return mDriverV1_2->getCapabilities(_hidl_cb);
+ return mLatestDriver->getCapabilities(_hidl_cb);
}
Return<void> getSupportedOperations(const V1_0::Model& model,
getSupportedOperations_cb _hidl_cb) override {
- return mDriverV1_2->getSupportedOperations(model, _hidl_cb);
+ return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
Return<ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
- return mDriverV1_2->prepareModel(model, actualCallback);
+ return mLatestDriver->prepareModel(model, actualCallback);
}
private:
- const sp<V1_2::IDevice> mDriverV1_2;
+ const sp<V1_3::IDevice> mLatestDriver;
};
// Like PartitioningDriver, but implementing 1.0
@@ -436,23 +509,23 @@
PartitioningDriverV1_0(const char* name, const char* version, Capabilities capabilities,
uint32_t operationMask,
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo)
- : mDriverV1_2(new PartitioningDriver(name, version, capabilities, operationMask, oem)) {}
+ : mLatestDriver(new PartitioningDriver(name, version, capabilities, operationMask, oem)) {}
Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
- return mDriverV1_2->getCapabilities(_hidl_cb);
+ return mLatestDriver->getCapabilities(_hidl_cb);
}
Return<void> getSupportedOperations(const V1_0::Model& model,
getSupportedOperations_cb _hidl_cb) override {
- return mDriverV1_2->getSupportedOperations(model, _hidl_cb);
+ return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
Return<ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
- return mDriverV1_2->prepareModel(model, actualCallback);
+ return mLatestDriver->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
+ Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
private:
- const sp<V1_2::IDevice> mDriverV1_2;
+ const sp<V1_3::IDevice> mLatestDriver;
};
// This class adds some simple abstractions and utilities on top of
@@ -499,6 +572,7 @@
case ANEURALNETWORKS_TENSOR_INT32:
case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM:
+ case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED:
case ANEURALNETWORKS_TENSOR_QUANT8_SYMM:
case ANEURALNETWORKS_TENSOR_QUANT16_ASYMM:
case ANEURALNETWORKS_TENSOR_QUANT16_SYMM: {
@@ -705,10 +779,12 @@
PartitioningDriver::OEM oem = PartitioningDriver::OEMNo)
: mName(name), mVersionString(version), mOperationMask(operationMask), mOEM(oem) {
PerformanceInfo perfRelaxedInfo = {.execTime = perfRelaxed, .powerUsage = perfRelaxed};
- mCapabilities = {.relaxedFloat32toFloat16PerformanceScalar = perfRelaxedInfo,
- .relaxedFloat32toFloat16PerformanceTensor = perfRelaxedInfo,
- .operandPerformance = ::android::nn::nonExtensionOperandPerformance(
- {.execTime = perf, .powerUsage = perf})};
+ mCapabilities = {
+ .relaxedFloat32toFloat16PerformanceScalar = perfRelaxedInfo,
+ .relaxedFloat32toFloat16PerformanceTensor = perfRelaxedInfo,
+ .operandPerformance =
+ ::android::nn::nonExtensionOperandPerformance<HalVersion::V1_3>(
+ {.execTime = perf, .powerUsage = perf})};
}
DeviceSpecification(const std::string& name, float perf, HalVersion halVersion,
uint32_t operationMaskV1_0, uint32_t operationMaskV1_1 = 0,
@@ -773,12 +849,18 @@
for (const auto& specification : specifications) {
V1_0::IDevice* halDriver = nullptr;
switch (specification.mHalVersion) {
- case HalVersion::V1_2:
+ case HalVersion::V1_3:
halDriver = new PartitioningDriver(
specification.mName.c_str(), specification.mVersionString.c_str(),
specification.mCapabilities, specification.mOperationMask,
specification.mOEM);
break;
+ case HalVersion::V1_2:
+ halDriver = new PartitioningDriverV1_2(
+ specification.mName.c_str(), specification.mVersionString.c_str(),
+ specification.mCapabilities, specification.mOperationMask,
+ specification.mOEM);
+ break;
case HalVersion::V1_1:
halDriver = new PartitioningDriverV1_1(
specification.mName.c_str(), specification.mVersionString.c_str(),
diff --git a/runtime/test/TestPartitioningRandom.cpp b/runtime/test/TestPartitioningRandom.cpp
index faf28d7..c6b8208 100644
--- a/runtime/test/TestPartitioningRandom.cpp
+++ b/runtime/test/TestPartitioningRandom.cpp
@@ -16,6 +16,23 @@
#undef NDEBUG
+#include <android-base/logging.h>
+#include <gtest/gtest.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <cassert>
+#include <cstdio>
+#include <iterator>
+#include <map>
+#include <memory>
+#include <random>
+#include <set>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
#include "CompilationBuilder.h"
#include "HalInterfaces.h"
#include "Manager.h"
@@ -26,22 +43,6 @@
#include "Utils.h"
#include "ValidateHal.h"
-#include <algorithm>
-#include <cassert>
-#include <cstdio>
-#include <iterator>
-#include <memory>
-#include <random>
-#include <set>
-#include <tuple>
-#include <utility>
-#include <vector>
-
-#include <unistd.h>
-
-#include <android-base/logging.h>
-#include <gtest/gtest.h>
-
// Uncomment the following line to generate some debugging output that
// may be useful when analyzing failures:
//
@@ -96,7 +97,7 @@
using DeviceManager = nn::DeviceManager;
using ExecutionPlan = nn::ExecutionPlan;
using HalVersion = nn::HalVersion;
-using HidlModel = V1_2::Model;
+using HidlModel = V1_3::Model;
using ModelBuilder = nn::ModelBuilder;
using Result = nn::test_wrapper::Result;
using SampleDriver = nn::sample_driver::SampleDriver;
@@ -509,6 +510,8 @@
return "V1_1";
case HalVersion::V1_2:
return "V1_2";
+ case HalVersion::V1_3:
+ return "V1_3";
default:
return "V_UNKNOWN";
}
@@ -521,18 +524,18 @@
TestDriver(const char* name, std::set<Signature> signatures)
: SampleDriver(name), mSignatures(std::move(signatures)) {}
- Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
+ Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override {
android::nn::initVLogMask();
const PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f};
Capabilities capabilities = {
.relaxedFloat32toFloat16PerformanceScalar = kPerf,
.relaxedFloat32toFloat16PerformanceTensor = kPerf,
- .operandPerformance = nn::nonExtensionOperandPerformance(kPerf)};
+ .operandPerformance = nn::nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf)};
_hidl_cb(ErrorStatus::NONE, capabilities);
return Void();
}
- Return<void> getSupportedOperations_1_2(const HidlModel& model,
+ Return<void> getSupportedOperations_1_3(const HidlModel& model,
getSupportedOperations_cb cb) override {
if (nn::validateModel(model)) {
const size_t count = model.operations.size();
@@ -549,14 +552,14 @@
return Void();
}
- Return<ErrorStatus> prepareModel_1_2(const HidlModel& model, ExecutionPreference preference,
+ Return<ErrorStatus> prepareModel_1_3(const HidlModel& model, ExecutionPreference preference,
const hidl_vec<hidl_handle>& modelCache,
const hidl_vec<hidl_handle>& dataCache,
const CacheToken& token,
const sp<IPreparedModelCallback>& callback) override {
// NOTE: We verify that all operations in the model are supported.
ErrorStatus outStatus = ErrorStatus::INVALID_ARGUMENT;
- auto ret = getSupportedOperations_1_2(
+ auto ret = getSupportedOperations_1_3(
model,
[&outStatus](ErrorStatus inStatus, const hidl_vec<bool>& supportedOperations) {
if (inStatus == ErrorStatus::NONE) {
@@ -567,7 +570,7 @@
}
});
if (ret.isOk() && (outStatus == ErrorStatus::NONE)) {
- return SampleDriver::prepareModel_1_2(model, preference, modelCache, dataCache, token,
+ return SampleDriver::prepareModel_1_3(model, preference, modelCache, dataCache, token,
callback);
} else {
callback->notify_1_2(ErrorStatus::INVALID_ARGUMENT, nullptr);
@@ -579,62 +582,127 @@
const std::set<Signature> mSignatures;
};
-// Like TestDriver, but implementing 1.1
-class TestDriverV1_1 : public V1_1::IDevice {
+// Like TestDriver, but implementing 1.2
+class TestDriverV1_2 : public V1_2::IDevice {
public:
- TestDriverV1_1(const char* name, std::set<Signature> signatures)
- : mDriverV1_2(new TestDriver(name, std::move(signatures))) {}
+ TestDriverV1_2(const char* name, std::set<Signature> signatures)
+ : mLatestDriver(new TestDriver(name, std::move(signatures))) {}
+ Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override {
+ return mLatestDriver->getCapabilities_1_2(_hidl_cb);
+ }
+ Return<void> getSupportedOperations_1_2(const V1_2::Model& model,
+ getSupportedOperations_1_2_cb _hidl_cb) override {
+ return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb);
+ }
+ Return<ErrorStatus> prepareModel_1_2(
+ const V1_2::Model& model, ExecutionPreference preference,
+ const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
+ const CacheToken& token, const sp<IPreparedModelCallback>& actualCallback) override {
+ return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token,
+ actualCallback);
+ }
+ Return<void> getVersionString(getVersionString_cb _hidl_cb) override {
+ return mLatestDriver->getVersionString(_hidl_cb);
+ }
+ Return<void> getType(getType_cb _hidl_cb) override { return mLatestDriver->getType(_hidl_cb); }
+ Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) {
+ return mLatestDriver->getSupportedExtensions(_hidl_cb);
+ }
+ Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) {
+ return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb);
+ }
+ Return<ErrorStatus> prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache,
+ const CacheToken& token,
+ const sp<V1_2::IPreparedModelCallback>& callback) {
+ return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback);
+ }
Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
- return mDriverV1_2->getCapabilities_1_1(_hidl_cb);
+ return mLatestDriver->getCapabilities_1_1(_hidl_cb);
}
Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
getSupportedOperations_1_1_cb _hidl_cb) override {
- return mDriverV1_2->getSupportedOperations_1_1(model, _hidl_cb);
+ return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
}
Return<ErrorStatus> prepareModel_1_1(
const V1_1::Model& model, ExecutionPreference preference,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
- return mDriverV1_2->prepareModel_1_1(model, preference, actualCallback);
+ return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
+ Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
- return mDriverV1_2->getCapabilities(_hidl_cb);
+ return mLatestDriver->getCapabilities(_hidl_cb);
}
Return<void> getSupportedOperations(const V1_0::Model& model,
getSupportedOperations_cb _hidl_cb) override {
- return mDriverV1_2->getSupportedOperations(model, _hidl_cb);
+ return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
Return<ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
- return mDriverV1_2->prepareModel(model, actualCallback);
+ return mLatestDriver->prepareModel(model, actualCallback);
}
private:
- const sp<V1_2::IDevice> mDriverV1_2;
+ const sp<V1_3::IDevice> mLatestDriver;
+};
+
+// Like TestDriver, but implementing 1.1
+class TestDriverV1_1 : public V1_1::IDevice {
+ public:
+ TestDriverV1_1(const char* name, std::set<Signature> signatures)
+ : mLatestDriver(new TestDriver(name, std::move(signatures))) {}
+ Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override {
+ return mLatestDriver->getCapabilities_1_1(_hidl_cb);
+ }
+ Return<void> getSupportedOperations_1_1(const V1_1::Model& model,
+ getSupportedOperations_1_1_cb _hidl_cb) override {
+ return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb);
+ }
+ Return<ErrorStatus> prepareModel_1_1(
+ const V1_1::Model& model, ExecutionPreference preference,
+ const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
+ return mLatestDriver->prepareModel_1_1(model, preference, actualCallback);
+ }
+ Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
+ Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
+ return mLatestDriver->getCapabilities(_hidl_cb);
+ }
+ Return<void> getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb _hidl_cb) override {
+ return mLatestDriver->getSupportedOperations(model, _hidl_cb);
+ }
+ Return<ErrorStatus> prepareModel(
+ const V1_0::Model& model,
+ const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
+ return mLatestDriver->prepareModel(model, actualCallback);
+ }
+
+ private:
+ const sp<V1_3::IDevice> mLatestDriver;
};
// Like TestDriver, but implementing 1.0
class TestDriverV1_0 : public V1_0::IDevice {
public:
TestDriverV1_0(const char* name, std::set<Signature> signatures)
- : mDriverV1_2(new TestDriver(name, std::move(signatures))) {}
+ : mLatestDriver(new TestDriver(name, std::move(signatures))) {}
Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override {
- return mDriverV1_2->getCapabilities(_hidl_cb);
+ return mLatestDriver->getCapabilities(_hidl_cb);
}
Return<void> getSupportedOperations(const V1_0::Model& model,
getSupportedOperations_cb _hidl_cb) override {
- return mDriverV1_2->getSupportedOperations(model, _hidl_cb);
+ return mLatestDriver->getSupportedOperations(model, _hidl_cb);
}
Return<ErrorStatus> prepareModel(
const V1_0::Model& model,
const sp<V1_0::IPreparedModelCallback>& actualCallback) override {
- return mDriverV1_2->prepareModel(model, actualCallback);
+ return mLatestDriver->prepareModel(model, actualCallback);
}
- Return<DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); }
+ Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); }
private:
- const sp<V1_2::IDevice> mDriverV1_2;
+ const sp<V1_3::IDevice> mLatestDriver;
};
V1_0::IDevice* RandomPartitioningTest::makeTestDriver(HalVersion version, const char* name,
@@ -645,6 +713,8 @@
case HalVersion::V1_1:
return new TestDriverV1_1(name, std::move(signatures));
case HalVersion::V1_2:
+ return new TestDriverV1_2(name, std::move(signatures));
+ case HalVersion::V1_3:
return new TestDriver(name, std::move(signatures));
default:
ADD_FAILURE() << "Unexpected HalVersion " << static_cast<int32_t>(version);
diff --git a/runtime/test/fibonacci_extension/FibonacciDriver.cpp b/runtime/test/fibonacci_extension/FibonacciDriver.cpp
index cd1edb6..36d2552 100644
--- a/runtime/test/fibonacci_extension/FibonacciDriver.cpp
+++ b/runtime/test/fibonacci_extension/FibonacciDriver.cpp
@@ -18,6 +18,9 @@
#include "FibonacciDriver.h"
+#include <vector>
+
+#include "FibonacciExtension.h"
#include "HalInterfaces.h"
#include "NeuralNetworksExtensions.h"
#include "OperationResolver.h"
@@ -25,10 +28,6 @@
#include "Utils.h"
#include "ValidateHal.h"
-#include "FibonacciExtension.h"
-
-#include <vector>
-
namespace android {
namespace nn {
namespace sample_driver {
@@ -173,19 +172,20 @@
return Void();
}
-Return<void> FibonacciDriver::getCapabilities_1_2(getCapabilities_1_2_cb cb) {
+Return<void> FibonacciDriver::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
android::nn::initVLogMask();
VLOG(DRIVER) << "getCapabilities()";
static const PerformanceInfo kPerf = {.execTime = 1.0f, .powerUsage = 1.0f};
- Capabilities capabilities = {.relaxedFloat32toFloat16PerformanceScalar = kPerf,
- .relaxedFloat32toFloat16PerformanceTensor = kPerf,
- .operandPerformance = nonExtensionOperandPerformance(kPerf)};
+ Capabilities capabilities = {
+ .relaxedFloat32toFloat16PerformanceScalar = kPerf,
+ .relaxedFloat32toFloat16PerformanceTensor = kPerf,
+ .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf)};
cb(ErrorStatus::NONE, capabilities);
return Void();
}
-Return<void> FibonacciDriver::getSupportedOperations_1_2(const V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) {
+Return<void> FibonacciDriver::getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) {
VLOG(DRIVER) << "getSupportedOperations()";
if (!validateModel(model)) {
cb(ErrorStatus::INVALID_ARGUMENT, {});
diff --git a/runtime/test/fibonacci_extension/FibonacciDriver.h b/runtime/test/fibonacci_extension/FibonacciDriver.h
index 2ae6da0..303edd8 100644
--- a/runtime/test/fibonacci_extension/FibonacciDriver.h
+++ b/runtime/test/fibonacci_extension/FibonacciDriver.h
@@ -46,9 +46,9 @@
public:
FibonacciDriver() : SampleDriver(kDriverName, FibonacciOperationResolver::get()) {}
hal::Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override;
- hal::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb) override;
- hal::Return<void> getSupportedOperations_1_2(const hal::V1_2::Model& model,
- getSupportedOperations_1_2_cb cb) override;
+ hal::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override;
+ hal::Return<void> getSupportedOperations_1_3(const hal::V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) override;
static constexpr char kDriverName[] = "sample-driver-fibonacci-extension";
};
diff --git a/runtime/test/fuzzing/RandomGraphGeneratorUtils.h b/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
index 1611a4a..8335727 100644
--- a/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
+++ b/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
@@ -19,10 +19,12 @@
#include <chrono>
#include <fstream>
+#include <limits>
#include <memory>
#include <random>
#include <sstream>
#include <string>
+#include <vector>
#include "RandomGraphGenerator.h"
#include "RandomVariable.h"
@@ -251,6 +253,7 @@
"TENSOR_QUANT8_SYMM_PER_CHANNEL",
"TENSOR_QUANT16_ASYMM",
"TENSOR_QUANT8_SYMM",
+ "TENSOR_QUANT8_ASYMM_SIGNED",
};
static const char* kLifeTimeNames[6] = {
@@ -264,7 +267,7 @@
true, // ANEURALNETWORKS_UINT32
false, // ANEURALNETWORKS_TENSOR_FLOAT32
false, // ANEURALNETWORKS_TENSOR_INT32
- false, // ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8
+ false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
true, // ANEURALNETWORKS_BOOL
false, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM
false, // ANEURALNETWORKS_TENSOR_FLOAT16
@@ -273,6 +276,7 @@
false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL
false, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM
false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM
+ false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
};
static const uint32_t kSizeOfDataType[]{
@@ -281,7 +285,7 @@
4, // ANEURALNETWORKS_UINT32
4, // ANEURALNETWORKS_TENSOR_FLOAT32
4, // ANEURALNETWORKS_TENSOR_INT32
- 1, // ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8
+ 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
1, // ANEURALNETWORKS_BOOL
2, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM
2, // ANEURALNETWORKS_TENSOR_FLOAT16
@@ -290,6 +294,7 @@
1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL
2, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM
1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM
+ 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
};
template <>
diff --git a/tools/test_generator/test_harness/include/TestHarness.h b/tools/test_generator/test_harness/include/TestHarness.h
index 51dd78e..28f11a6 100644
--- a/tools/test_generator/test_harness/include/TestHarness.h
+++ b/tools/test_generator/test_harness/include/TestHarness.h
@@ -67,6 +67,7 @@
TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
TENSOR_QUANT16_ASYMM = 12,
TENSOR_QUANT8_SYMM = 13,
+ TENSOR_QUANT8_ASYMM_SIGNED = 14,
};
enum class TestOperandLifeTime {
@@ -176,7 +177,7 @@
RESIZE_NEAREST_NEIGHBOR = 94,
};
-enum class TestHalVersion { UNKNOWN, V1_0, V1_1, V1_2 };
+enum class TestHalVersion { UNKNOWN, V1_0, V1_1, V1_2, V1_3 };
// Manages the data buffer for a test operand.
class TestBuffer {