Merge "Improve validation of the HAL structures." into oc-mr1-nn-dev
diff --git a/1.0/Device.cpp b/1.0/Device.cpp
index 6516a53..e4b4f53 100644
--- a/1.0/Device.cpp
+++ b/1.0/Device.cpp
@@ -89,7 +89,7 @@
void Device::asyncPrepare(const Model& model, const sp<IPreparedModelCallback>& callback) {
std::shared_ptr<hexagon::Model> hexagonModel = std::make_shared<hexagon::Model>(model);
- if (hexagonModel->compile()) {
+ if (hexagonModel->prepare()) {
callback->notify(ErrorStatus::NONE, new PreparedModel(model, hexagonModel));
}
else {
@@ -114,9 +114,9 @@
return ErrorStatus::DEVICE_UNAVAILABLE;
}
- // This thread is intentionally detached because the sample driver service
- // is expected to live forever.
- std::thread([this, model, callback]{ asyncPrepare(model, callback); }).detach();
+ // TODO: once nnlib hanging issue is resolved, make this function
+ // asynchronous again
+ asyncPrepare(model, callback);
return ErrorStatus::NONE;
}
diff --git a/1.0/HexagonController.cpp b/1.0/HexagonController.cpp
index be36f40..343e8fa 100644
--- a/1.0/HexagonController.cpp
+++ b/1.0/HexagonController.cpp
@@ -21,10 +21,35 @@
#define LOAD_HEXAGON_FUNCTION(name) \
mFn_##name = loadFunction<hexagon_nn_controller_##name##_fn>("hexagon_nn_controller_"#name);
-#define CONTROLLER_CHECK(function, ...) \
- int err = mFn_##function(__VA_ARGS__); \
- if (err != 0) { \
- return err; \
+#define CLOSE_HEXAGON_FUNCTION(name) mFn_##name = nullptr;
+
+#define FOR_EACH_FUNCTION(MACRO) \
+ MACRO(init) \
+ MACRO(getlog) \
+ MACRO(snpprint) \
+ MACRO(set_debug_level) \
+ MACRO(prepare) \
+ MACRO(append_node) \
+ MACRO(append_const_node) \
+ MACRO(execute_new) \
+ MACRO(execute) \
+ MACRO(teardown) \
+ MACRO(get_perfinfo) \
+ MACRO(reset_perfinfo) \
+ MACRO(version) \
+ MACRO(last_execution_cycles) \
+ MACRO(GetHexagonBinaryVersion) \
+ MACRO(PrintLog) \
+ MACRO(op_name_to_id) \
+ MACRO(op_id_to_name) \
+ MACRO(disable_dcvs) \
+ MACRO(set_powersave_level) \
+ MACRO(config)
+
+#define CONTROLLER_CHECK(function, ...) \
+ int err = mFn_##function(__VA_ARGS__); \
+ if (err != 0) { \
+ return err; \
}
namespace android {
@@ -34,41 +59,35 @@
namespace implementation {
namespace hexagon {
+const char Controller::kFilename[] = "libhexagon_nn_controller.so";
+
Controller::Controller() {
- const char* filename = "libhexagon_nn_controller.so";
-
- mHandle = dlopen(filename, RTLD_LAZY | RTLD_LOCAL);
- if (mHandle == nullptr) {
- LOG(ERROR) << "FAILED TO LOAD LIBRARY libhexagon_nn_controller: " << dlerror();
- }
-
- LOAD_HEXAGON_FUNCTION(init)
- LOAD_HEXAGON_FUNCTION(getlog)
- LOAD_HEXAGON_FUNCTION(snpprint)
- LOAD_HEXAGON_FUNCTION(set_debug_level)
- LOAD_HEXAGON_FUNCTION(prepare)
- LOAD_HEXAGON_FUNCTION(append_node)
- LOAD_HEXAGON_FUNCTION(append_const_node)
- LOAD_HEXAGON_FUNCTION(execute_new)
- LOAD_HEXAGON_FUNCTION(execute)
- LOAD_HEXAGON_FUNCTION(teardown)
- LOAD_HEXAGON_FUNCTION(get_perfinfo)
- LOAD_HEXAGON_FUNCTION(reset_perfinfo)
- LOAD_HEXAGON_FUNCTION(version)
- LOAD_HEXAGON_FUNCTION(last_execution_cycles)
- LOAD_HEXAGON_FUNCTION(GetHexagonBinaryVersion)
- LOAD_HEXAGON_FUNCTION(PrintLog)
- LOAD_HEXAGON_FUNCTION(op_name_to_id)
- LOAD_HEXAGON_FUNCTION(op_id_to_name)
- LOAD_HEXAGON_FUNCTION(disable_dcvs)
- LOAD_HEXAGON_FUNCTION(set_powersave_level)
- LOAD_HEXAGON_FUNCTION(config)
+ openNnlib();
}
Controller::~Controller() {
+ closeNnlib();
+}
+
+bool Controller::openNnlib() {
+ mHandle = dlopen(kFilename, RTLD_LAZY | RTLD_LOCAL);
+ HEXAGON_SOFT_ASSERT_NE(mHandle, 0, "FAILED TO LOAD LIBRARY "/* << kFilename << ": " << dlerror()*/);
+ FOR_EACH_FUNCTION(LOAD_HEXAGON_FUNCTION)
+ return true;
+}
+
+bool Controller::closeNnlib() {
+ FOR_EACH_FUNCTION(CLOSE_HEXAGON_FUNCTION)
if (mHandle != nullptr) {
- dlclose(mHandle);
+ int err = dlclose(mHandle);
+ mHandle = nullptr;
+ HEXAGON_SOFT_ASSERT_EQ(err, 0, "FAILED TO CLOSE LIBRARY " << kFilename);
}
+ return true;
+}
+
+bool Controller::resetNnlib() {
+ return closeNnlib() && openNnlib();
}
Controller& Controller::getInstance() {
diff --git a/1.0/HexagonController.h b/1.0/HexagonController.h
index 04775ec..7a31c74 100644
--- a/1.0/HexagonController.h
+++ b/1.0/HexagonController.h
@@ -49,8 +49,12 @@
return reinterpret_cast<Function>(fn);
}
+ bool openNnlib();
+ bool closeNnlib();
+
public:
static Controller& getInstance();
+ bool resetNnlib();
hexagon_nn_nn_id init();
@@ -132,7 +136,8 @@
// members
private:
- void* mHandle;
+ static const char kFilename[];
+ void* mHandle;
hexagon_nn_controller_init_fn mFn_init;
hexagon_nn_controller_getlog_fn mFn_getlog;
hexagon_nn_controller_snpprint_fn mFn_snpprint;
diff --git a/1.0/HexagonModel.cpp b/1.0/HexagonModel.cpp
index 6e7e773..7f7b1c8 100644
--- a/1.0/HexagonModel.cpp
+++ b/1.0/HexagonModel.cpp
@@ -46,10 +46,7 @@
return info;
}
-Model::Model(const NeuralnetworksModel& model) : mNodeCount(0), mCompiled(false) {
- mGraphId = hexagon::Controller::getInstance().init();
- hexagon::Controller::getInstance().set_debug_level(mGraphId, 99);
-
+Model::Model(const NeuralnetworksModel& model) : mGraphId(0), mNodeCount(0), mCompiled(false) {
mPools = mapPools(model.pools);
mOperands = getOperandsInfo(model, mPools);
std::for_each(mPools.begin(), mPools.end(), [](RunTimePoolInfo& mem) { mem.update(); });
@@ -64,41 +61,42 @@
}
Model& Model::operator=(Model&& other) {
- mNodeCount = other.mNodeCount;
- mGraphId = other.mGraphId;
- mCompiled = other.mCompiled;
- mOperands = std::move(other.mOperands);
- mOperations = std::move(other.mOperations);
- mInputs = std::move(other.mInputs);
- mOutputs = std::move(other.mOutputs);
- mPools = std::move(other.mPools);
- other.mGraphId = {};
- other.mCompiled = false;
+ if (this != &other) {
+ mNodeCount = other.mNodeCount;
+ mGraphId = other.mGraphId;
+ mCompiled = other.mCompiled;
+ mOperands = std::move(other.mOperands);
+ mOperations = std::move(other.mOperations);
+ mInputs = std::move(other.mInputs);
+ mOutputs = std::move(other.mOutputs);
+ mPools = std::move(other.mPools);
+ other.mNodeCount = 0;
+ other.mGraphId = {};
+ other.mCompiled = false;
+ }
return *this;
}
Model::~Model() {
- if (mGraphId != hexagon_nn_nn_id{}) {
- hexagon::Controller::getInstance().teardown(mGraphId);
- }
-}
-
-std::string Model::getDebugLog() {
- char buffer[16*1024];
- int err = hexagon::Controller::getInstance().getlog(
- mGraphId, reinterpret_cast<uint8_t*>(buffer), sizeof(buffer));
- HEXAGON_SOFT_ASSERT_EQ(0, err, "failed getDebugLog");
- return buffer;
+ clearModel();
}
std::string Model::getLog() {
char buffer[16*1024];
- int err = hexagon::Controller::getInstance().snpprint(
+ int err = hexagon::Controller::getInstance().getlog(
mGraphId, reinterpret_cast<uint8_t*>(buffer), sizeof(buffer));
HEXAGON_SOFT_ASSERT_EQ(0, err, "failed getLog");
return buffer;
}
+std::string Model::getGraph() {
+ char buffer[16*1024];
+ int err = hexagon::Controller::getInstance().snpprint(
+ mGraphId, reinterpret_cast<uint8_t*>(buffer), sizeof(buffer));
+ HEXAGON_SOFT_ASSERT_EQ(0, err, "failed getGraph");
+ return buffer;
+}
+
uint32_t Model::getNextNode() {
return ++mNodeCount;
}
@@ -167,8 +165,9 @@
const hexagon_nn_input& Model::getQuantizationMin(uint32_t operand) {
OperandInfo& operandInfo = mOperands[operand];
if (operandInfo.hexagon_input_min == hexagon_nn_input{}) {
- float real_value =
- (std::numeric_limits<uint8_t>::min() - operandInfo.zeroPoint) * operandInfo.scale;
+ float real_value = operandInfo.type == OperandType::TENSOR_QUANT8_ASYMM ?
+ (std::numeric_limits<uint8_t>::min() - operandInfo.zeroPoint) * operandInfo.scale :
+ std::numeric_limits<uint32_t>::min() * operandInfo.scale;
operandInfo.hexagon_input_min = createValues<float>({real_value});
}
return operandInfo.hexagon_input_min;
@@ -177,8 +176,9 @@
const hexagon_nn_input& Model::getQuantizationMax(uint32_t operand) {
OperandInfo& operandInfo = mOperands[operand];
if (operandInfo.hexagon_input_max == hexagon_nn_input{}) {
- float real_value =
- (std::numeric_limits<uint8_t>::max() - operandInfo.zeroPoint) * operandInfo.scale;
+ float real_value = operandInfo.type == OperandType::TENSOR_QUANT8_ASYMM ?
+ (std::numeric_limits<uint8_t>::max() - operandInfo.zeroPoint) * operandInfo.scale :
+ std::numeric_limits<uint32_t>::max() * operandInfo.scale;
operandInfo.hexagon_input_max = createValues<float>({real_value});
}
return operandInfo.hexagon_input_max;
@@ -408,7 +408,7 @@
bool Model::addFusedQuant8Operation(op_type op,
hexagon_nn_padding_type pad,
- const hexagon_nn_input& bias,
+ const std::vector<hexagon_nn_input>& bias,
op_type activation,
const std::vector<hexagon_nn_input>& inputs,
const std::vector<uint32_t>& outputs) {
@@ -430,28 +430,32 @@
// base operation
node = addOperationInternal(op, pad, inputs, out32);
HEXAGON_SOFT_ASSERT_NE(0, node, "Error adding base operation");
- const hexagon_nn_input old_min = {.src_id = node, .output_idx = 1};
- const hexagon_nn_input old_max = {.src_id = node, .output_idx = 2};
+ hexagon_nn_input previous = {.src_id = node, .output_idx = 0};
+ hexagon_nn_input previous_min = {.src_id = node, .output_idx = 1};
+ hexagon_nn_input previous_max = {.src_id = node, .output_idx = 2};
// add bias
- if (bias != hexagon_nn_input{}) {
- std::vector<hexagon_nn_input> buffer1_in = {{.src_id = node, .output_idx = 0}, bias,
- old_min, old_max, old_min, old_max};
- node = addOperationInternal(OP_QuantizedBiasAdd_32p32to32, NN_PAD_NA, buffer1_in, out32);
+ if (bias.size() == 3) {
+ node = addOperationInternal(OP_QuantizedBiasAdd_32p32to32, NN_PAD_NA,
+ {previous, bias[0], previous_min, previous_max, bias[1], bias[2]}, out32);
HEXAGON_SOFT_ASSERT_NE(0, node, "Error adding bias operation");
+ previous.src_id = node;
+ previous_min.src_id = node;
+ previous_max.src_id = node;
}
// requantize
- const hexagon_nn_input buffer2_in = {.src_id = node, .output_idx = 0};
node = addOperationInternal(OP_Requantize_32to8, NN_PAD_NA,
- {buffer2_in, old_min, old_max, new_min, new_max}, out8);
+ {previous, previous_min, previous_max, new_min, new_max}, out8);
HEXAGON_SOFT_ASSERT_NE(0, node, "Error adding requantize operation");
+ previous.src_id = node;
+ previous_min.src_id = node;
+ previous_max.src_id = node;
// activation
- std::vector<hexagon_nn_input> buffer3 = {{.src_id = node, .output_idx = 0},
- {.src_id = node, .output_idx = 1}, {.src_id = node, .output_idx = 2}};
- buffer3.insert(buffer3.end(), actArgs.begin(), actArgs.end());
- node = addOperationInternal(activation, NN_PAD_NA, buffer3, out8);
+ std::vector<hexagon_nn_input> buffer = {previous, previous_min, previous_max};
+ buffer.insert(buffer.end(), actArgs.begin(), actArgs.end());
+ node = addOperationInternal(activation, NN_PAD_NA, buffer, out8);
HEXAGON_SOFT_ASSERT_NE(0, node, "Error adding activation operation");
return registerHexagonInputs(outputs, node);
@@ -526,17 +530,17 @@
return true;
}
-void Model::resetModel() {
+void Model::clearModel() {
mCompiled = false;
for (OperandInfo& operand : mOperands) {
operand.hexagon_input = {};
+ operand.hexagon_input_min = {};
+ operand.hexagon_input_max = {};
operand.hexagon_output = {};
}
if (mGraphId != hexagon_nn_nn_id{}) {
hexagon::Controller::getInstance().teardown(mGraphId);
}
- mGraphId = hexagon::Controller::getInstance().init();
- hexagon::Controller::getInstance().set_debug_level(mGraphId, 99);
}
std::vector<bool> Model::supportedOperations() {
@@ -554,13 +558,17 @@
return supported;
}
-bool Model::compile() {
+bool Model::prepare() {
if (!verifyOperations() || !verifyOperands()) {
return false;
}
+ mGraphId = hexagon::Controller::getInstance().init();
+ HEXAGON_SOFT_ASSERT_NE(0, mGraphId, "Hexagon could not allocate new graph");
+ hexagon::Controller::getInstance().set_debug_level(mGraphId, 0);
+
if (!addInputs() || !addOperations() || !addOutputs()) {
- resetModel();
+ clearModel();
return false;
}
diff --git a/1.0/HexagonModel.h b/1.0/HexagonModel.h
index 500c074..40b3bc2 100644
--- a/1.0/HexagonModel.h
+++ b/1.0/HexagonModel.h
@@ -79,8 +79,8 @@
Model(const NeuralnetworksModel& model);
~Model();
- std::string getDebugLog();
std::string getLog();
+ std::string getGraph();
// model check
const int32_t* getPointer(uint32_t operand);
@@ -127,13 +127,13 @@
const std::vector<uint32_t>& outputs);
bool addFusedQuant8Operation(op_type op,
hexagon_nn_padding_type pad,
- const hexagon_nn_input& bias,
+ const std::vector<hexagon_nn_input>& bias,
op_type activation,
const std::vector<hexagon_nn_input>& inputs,
const std::vector<uint32_t>& outputs);
std::vector<bool> supportedOperations();
- bool compile();
+ bool prepare();
bool execute(const Request& request);
private:
@@ -155,7 +155,7 @@
bool addOperations();
bool addOutputs();
- void resetModel();
+ void clearModel();
// members
hexagon_nn_nn_id mGraphId;
diff --git a/1.0/HexagonOperationsPrepare.cpp b/1.0/HexagonOperationsPrepare.cpp
index 1c2d7cd..0a6b392 100644
--- a/1.0/HexagonOperationsPrepare.cpp
+++ b/1.0/HexagonOperationsPrepare.cpp
@@ -511,7 +511,7 @@
// add node to graph
return model->addFusedQuant8Operation(OP_QuantizedAdd_8p8to32, NN_PAD_NA, {}, act,
- {in1, in2, in1_min, in2_min, in1_max, in2_max}, outs);
+ {in1, in2, in1_min, in1_max, in2_min, in2_max}, outs);
}
bool average_pool_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
@@ -638,13 +638,15 @@
const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
const hexagon_nn_input& filter_min = model->getQuantizationMin(ins[1]);
const hexagon_nn_input& filter_max = model->getQuantizationMax(ins[1]);
+ const hexagon_nn_input& bias_min = model->getQuantizationMin(ins[2]);
+ const hexagon_nn_input& bias_max = model->getQuantizationMax(ins[2]);
const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
// add node to graph
- return model->addFusedQuant8Operation(OP_QuantizedConv2d_8x8to32, pad, bias, act,
- {input, filter, input_min, input_max,
- filter_min, filter_max, stride}, outs);
+ return model->addFusedQuant8Operation(
+ OP_QuantizedConv2d_8x8to32, pad, {bias, bias_min, bias_max}, act,
+ {input, filter, input_min, input_max, filter_min, filter_max, stride}, outs);
}
bool depthwise_conv_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
@@ -695,14 +697,16 @@
const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
const hexagon_nn_input& filter_min = model->getQuantizationMin(ins[1]);
const hexagon_nn_input& filter_max = model->getQuantizationMax(ins[1]);
+ const hexagon_nn_input& bias_min = model->getQuantizationMin(ins[2]);
+ const hexagon_nn_input& bias_max = model->getQuantizationMax(ins[2]);
const hexagon_nn_input filter = model->createDepthwiseFilterTensor(ins[1], depth_multiplier);
const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
// add node to graph
- return model->addFusedQuant8Operation(OP_QuantizedDepthwiseConv2d_8x8to32, pad, bias, act,
- {input, filter, input_min, input_max, filter_min,
- filter_max, stride}, outs);
+ return model->addFusedQuant8Operation(
+ OP_QuantizedDepthwiseConv2d_8x8to32, pad, {bias, bias_min, bias_max}, act,
+ {input, filter, input_min, input_max, filter_min, filter_max, stride}, outs);
}
bool dequantize(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
@@ -736,11 +740,13 @@
const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
const hexagon_nn_input& weights_min = model->getQuantizationMin(ins[1]);
const hexagon_nn_input& weights_max = model->getQuantizationMax(ins[1]);
+ const hexagon_nn_input& bias_min = model->getQuantizationMin(ins[2]);
+ const hexagon_nn_input& bias_max = model->getQuantizationMax(ins[2]);
// add node to graph
- return model->addFusedQuant8Operation(OP_QuantizedMatMul_8x8to32, NN_PAD_NA, bias, act,
- {input, weights, input_min, input_max,
- weights_min, weights_max}, outs);
+ return model->addFusedQuant8Operation(
+ OP_QuantizedMatMul_8x8to32, NN_PAD_NA, {bias, bias_min, bias_max}, act,
+ {input, weights, input_min, input_max, weights_min, weights_max}, outs);
}
bool logistic(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
diff --git a/1.0/HexagonUtils.cpp b/1.0/HexagonUtils.cpp
index a1c55d2..2ed6ad2 100644
--- a/1.0/HexagonUtils.cpp
+++ b/1.0/HexagonUtils.cpp
@@ -32,7 +32,12 @@
bool isHexagonAvailable() {
int version = -1;
- hexagon::Controller::getInstance().version(&version);
+ Controller::getInstance().version(&version);
+ if (version != 92) {
+ LOG(INFO) << "ATTEMPTING TO RESTART NNLIB";
+ Controller::getInstance().resetNnlib();
+ Controller::getInstance().version(&version);
+ }
return version == 92;
}
@@ -198,206 +203,6 @@
return output;
}
-namespace {
-
-const char* kOps[] = {
- "OP_INPUT",
- "OP_OUTPUT",
- "OP_Nop",
- "OP_Const",
- "OP_Check",
- "OP_Close_f",
- "OP_Close_quint8",
- "OP_Close_q_quint8",
- "OP_Close_int32",
- "OP_Close_qint32",
- "OP_PPrint_8",
- "OP_PPrint_32",
- "OP_PPrint_f",
- "OP_PreFree",
- "OP_Flatten",
- "OP_QuantizedConv2d_8x8to32",
- "OP_QuantizedConv2d_8x8to32_ref",
- "OP_QuantizedMatMul_8x8to32",
- "OP_QuantizedMatMul_8x8to32_ref",
- "OP_QuantizeDownAndShrinkRange_32to8",
- "OP_QuantizeDownAndShrinkRange_32to8_ref",
- "OP_QuantizedRelu_8",
- "OP_QuantizedRelu_8_ref",
- "OP_QuantizedReluX_8",
- "OP_QuantizedReluX_8_ref",
- "OP_QuantizedMaxPool_8",
- "OP_QuantizedMaxPool_8_ref",
- "OP_QuantizedAvgPool_8",
- "OP_QuantizedAvgPool_8_ref",
- "OP_QuantizedL2Pool_8",
- "OP_QuantizedL2Pool_8_ref",
- "OP_QuantizedConcat_8",
- "OP_QuantizedConcat_8_ref",
- "OP_QuantizedBiasAdd_8p8to32",
- "OP_QuantizedBiasAdd_8p8to32_ref",
- "OP_Min_f",
- "OP_Min_f_ref",
- "OP_Max_f",
- "OP_Max_f_ref",
- "OP_Quantize",
- "OP_Quantize_ref",
- "OP_Dequantize",
- "OP_Dequantize_ref",
- "OP_Supernode_8x8p8to8",
- "OP_Supernode_8x8p8to8_ref",
- "OP_QuantizedFlatten",
- "OP_Softmax_f",
- "OP_Conv2d_f",
- "OP_MatMul_f",
- "OP_Relu_f",
- "OP_ReluX_f",
- "OP_AvgPool_f",
- "OP_L2Pool_f",
- "OP_MaxPool_f",
- "OP_Concat_f",
- "OP_BiasAdd_f",
- "OP_LRN_f",
- "OP_Variable",
- "OP_Assign",
- "OP_Reshape",
- "OP_QuantizedReshape",
- "OP_Tanh_f",
- "OP_Sigmoid_f",
- "OP_Slice_8",
- "OP_Slice_f",
- "OP_QuantizedSlice_8",
- "OP_Add_f",
- "OP_Mul_f",
- "OP_Minimum_f",
- "OP_Maximum_f",
- "OP_Requantize_32to8",
- "OP_Requantize_32to8_ref",
- "OP_RequantizationRange_32",
- "OP_RequantizationRange_32_ref",
- "OP_Neg_f",
- "OP_Sub_f",
- "OP_AddN_f",
- "OP_Range_int32",
- "OP_Rank_int32",
- "OP_Transpose_int32",
- "OP_Transpose_f",
- "OP_InstanceNorm_f",
- "OP_QuantizedInstanceNorm_8",
- "OP_QuantizedInstanceNorm_8_ref",
- "OP_Sub_int32",
- "OP_Add_int32",
- "OP_Split_f",
- "OP_Dequantize_qint32_f",
- "OP_PRelu_f",
- "OP_QuantizedPRelu_8",
- "OP_QuantizedPRelu_8_ref",
- "OP_Sum_f",
- "OP_Prod_f",
- "OP_Mul_int32",
- "OP_LogicalAnd_int32",
- "OP_LogicalOr_int32",
- "OP_LogicalXor_int32",
- "OP_Shape_int32",
- "OP_Pack_int32",
- "OP_MirrorPad_f",
- "OP_ResizeNearestNeighbor_f",
- "OP_StridedSlice_int32",
- "OP_StridedSlice_f",
- "OP_ExpandDims_int32",
- "OP_ExpandDims_f",
- "OP_LogSoftmax_f",
- "OP_Split_int32",
- "OP_QuantizedSplit_8",
- "OP_Deconv_f",
- "OP_QuantizedDeconv_8x8to32",
- "OP_QuantizedDeconv_8x8to32_ref",
- "OP_QuantizedMul_8x8to32",
- "OP_QuantizedMul_8x8to32_ref",
- "OP_QuantizedAdd_8p8to32",
- "OP_QuantizedAdd_8p8to32_ref",
- "OP_QuantizedSigmoid_8",
- "OP_QuantizedSigmoid_8_ref",
- "OP_QuantizedTanh_8",
- "OP_QuantizedTanh_8_ref",
- "OP_QuantizedSoftmax_8",
- "OP_QuantizedSoftmax_8_ref",
- "OP_QuantizedLRN_8",
- "OP_QuantizedLRN_8_ref",
- "OP_Quantizedpad2d_frame_8p",
- "OP_Quantizedpad2d_frame_8p_ref",
- "OP_QuantizedSub_8p8to32",
- "OP_QuantizedSub_8p8to32_ref",
- "OP_QuantizedMaximum_8",
- "OP_QuantizedMaximum_8_ref",
- "OP_QuantizedMinimum_8",
- "OP_QuantizedMinimum_8_ref",
- "OP_Pad_f",
- "OP_SpaceToBatchND_f",
- "OP_BatchToSpaceND_f",
- "OP_QuantizedPad_8",
- "OP_ResizeBilinear_f",
- "OP_ConcatV2_f",
- "OP_ConcatV2_int32",
- "OP_Prod_int32",
- "OP_Slice_int32",
- "OP_QuantizedAdd_8p8to8",
- "OP_QuantizedResizeBilinear_8",
- "OP_Supernode_8x8p8to8_d32",
- "OP_Convert_to_d32",
- "OP_Convert_from_d32",
- "OP_QuantizedMaxPool_8_d32",
- "OP_QuantizedMaxPool_8_d32_ref",
- "OP_QuantizedConcat_8_d32",
- "OP_QuantizedConcat_8_d32_ref",
- "OP_QuantizedAvgPool_8_d32",
- "OP_QuantizedAvgPool_8_d32_ref",
- "OP_Sink",
- "OP_QuantizedPRelu_8_d32",
- "OP_QuantizedPRelu_8_d32_ref",
- "OP_AutoQuantize",
- "OP_AutoQuantize_ref",
- "OP_QuantizedDepthwiseConv2d_8x8to32",
- "OP_QuantizedDepthwiseConv2d_8x8to32_ref",
- "OP_DepthwiseConv2d_f",
- "OP_DepthwiseSupernode_8x8p8to8",
- "OP_DepthwiseSupernode_8x8p8to8_d32",
- "OP_QuantizedMul_8x8to8_d32",
- "OP_QuantizedMul_8x8to8_d32_ref",
- "OP_FullyConnected_u8",
- "OP_QuantizedAdd_8x8to8_d32",
- "OP_QuantizedAdd_8x8to8_d32_ref",
- "OP_QuantizedClamp_8",
- "OP_QuantizedClamp_8_ref",
- "OP_Clamp_f",
- "OP_QuantizeForTest_d32",
- "OP_Close_d32",
- "OP_QuantizedSub_8x8to8_d32",
- "OP_QuantizedSub_8x8to8_d32_ref",
- "OP_InputSupernode_8x8p8to8_outd32",
- "OP_QuantizedLRN_8_d32",
- "OP_QuantizedBiasAdd_32p32to32",
- "OP_QuantizedBiasAdd_32p32to32_ref",
- "OP_Quantize_int32",
- "OP_Quantize_int32_ref",
- "OP_Supernode_8x8p32to8",
- "OP_DepthwiseSupernode_8x8p32to8",
- "OP_Supernode_8x8p32to8_d32",
- "OP_DepthwiseSupernode_8x8p32to8_d32",
- "OP_InputSupernode_8x8p32to8_outd32",
-};
-
-const char* kPadding[] = {
- "NN_PAD_NA",
- "NN_PAD_SAME",
- "NN_PAD_VALID",
- "NN_PAD_MIRROR_REFLECT",
- "NN_PAD_MIRROR_SYMMETRIC",
- "NN_PAD_SAME_CAFFE",
-};
-
-} // anonymous namespace
-
// printers
std::string toString(uint32_t val) {
return std::to_string(val);
@@ -412,13 +217,26 @@
}
std::string toString(op_type op) {
- return static_cast<size_t>(op) < sizeof(kOps) / sizeof(char*) ?
- kOps[static_cast<size_t>(op)] : "<invalid op_type>";
+ static const char* opText[] = {
+ #define DEF_OP(NAME,...) "OP_"#NAME,
+ #include "hexagon_nn_controller/ops.def"
+ #undef DEF_OP
+ };
+ return static_cast<size_t>(op) < sizeof(opText) / sizeof(char*) ?
+ opText[static_cast<size_t>(op)] : "<invalid op_type>";
}
std::string toString(hexagon_nn_padding_type padding) {
- return static_cast<size_t>(padding) < sizeof(kPadding) / sizeof(char*) ?
- kPadding[static_cast<size_t>(padding)] : "<invalid hexagon_nn_padding_type>";
+ static const char* paddingText[] = {
+ "NN_PAD_NA",
+ "NN_PAD_SAME",
+ "NN_PAD_VALID",
+ "NN_PAD_MIRROR_REFLECT",
+ "NN_PAD_MIRROR_SYMMETRIC",
+ "NN_PAD_SAME_CAFFE",
+ };
+ return static_cast<size_t>(padding) < sizeof(paddingText) / sizeof(char*) ?
+ paddingText[static_cast<size_t>(padding)] : "<invalid hexagon_nn_padding_type>";
}
std::string toString(const hexagon_nn_input& input) {
diff --git a/1.0/PreparedModel.cpp b/1.0/PreparedModel.cpp
index 735f564..4e091e2 100644
--- a/1.0/PreparedModel.cpp
+++ b/1.0/PreparedModel.cpp
@@ -56,9 +56,9 @@
return ErrorStatus::DEVICE_UNAVAILABLE;
}
- // This thread is intentionally detached because the sample driver service
- // is expected to live forever.
- std::thread(asyncExecute, mHexagonModel, request, callback).detach();
+ // TODO: once nnlib hanging issue is resolved, make this function
+ // asynchronous again
+ asyncExecute(mHexagonModel, request, callback);
return ErrorStatus::NONE;
}
diff --git a/1.0/hexagon_nn_controller/ops.def b/1.0/hexagon_nn_controller/ops.def
index f8b91ee..b6d727d 100644
--- a/1.0/hexagon_nn_controller/ops.def
+++ b/1.0/hexagon_nn_controller/ops.def
@@ -198,7 +198,7 @@
DEF_OP_WREF(QuantizedFC_8x8p8to8)
#endif
-DEF_OP_WREF(QuantizedAdd_8x8to8_d32)
+DEF_OP_WREF(QuantizedAdd_8p8to8_d32)
DEF_OP_WREF(QuantizedClamp_8)
DEF_OP(Clamp_f)
@@ -217,6 +217,13 @@
DEF_OP(DepthwiseSupernode_8x8p32to8_d32)
DEF_OP(InputSupernode_8x8p32to8_outd32)
+DEF_OP(PPrint_8_d32)
+DEF_OP(PPrintWithPadding_8_d32)
+DEF_OP_WREF(AutoQuantize_d32)
+
+DEF_OP_WREF(QuantizedTanh_8_d32)
+DEF_OP_WREF(QuantizedSigmoid_8_d32)
+
#ifdef __SELF_DEF_OP_WREF
#undef __SELF_DEF_OP_WREF
#undef DEF_OP_WREF